n 55 Documentation/scheduler/sched-pelt.c int n = -1; n 63 Documentation/scheduler/sched-pelt.c for (; ; n++) { n 64 Documentation/scheduler/sched-pelt.c if (n > -1) n 76 Documentation/scheduler/sched-pelt.c n--; n 87 Documentation/scheduler/sched-pelt.c for (i = 1; i <= n/HALFLIFE+1; i++) { n 27 arch/alpha/boot/misc.c #define memzero(s,n) memset ((s),0,(n)) n 126 arch/alpha/boot/misc.c unsigned n; n 131 arch/alpha/boot/misc.c for (n = 0; n < outcnt; n++) { n 17 arch/alpha/boot/stdio.c # define do_div(n, base) ({ \ n 20 arch/alpha/boot/stdio.c __rem = ((unsigned long long)(n)) % __base; \ n 21 arch/alpha/boot/stdio.c (n) = ((unsigned long long)(n)) / __base; \ n 55 arch/alpha/boot/tools/objstrip.c size_t nwritten, tocopy, n, mem_size, fil_size, pad = 0; n 240 arch/alpha/boot/tools/objstrip.c n = tocopy; n 241 arch/alpha/boot/tools/objstrip.c if (n > sizeof(buf)) { n 242 arch/alpha/boot/tools/objstrip.c n = sizeof(buf); n 244 arch/alpha/boot/tools/objstrip.c tocopy -= n; n 245 arch/alpha/boot/tools/objstrip.c if ((size_t) read(fd, buf, n) != n) { n 250 arch/alpha/boot/tools/objstrip.c nwritten = write(ofd, buf, n); n 255 arch/alpha/boot/tools/objstrip.c n -= nwritten; n 256 arch/alpha/boot/tools/objstrip.c } while (n > 0); n 271 arch/alpha/boot/tools/objstrip.c n = tocopy; n 272 arch/alpha/boot/tools/objstrip.c if (n > sizeof(buf)) { n 273 arch/alpha/boot/tools/objstrip.c n = sizeof(buf); n 275 arch/alpha/boot/tools/objstrip.c nwritten = write(ofd, buf, n); n 20 arch/alpha/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 23 arch/alpha/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 29 arch/alpha/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) \ n 32 arch/alpha/include/asm/cmpxchg.h cmpxchg_local((ptr), (o), (n)); \ n 56 arch/alpha/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 60 arch/alpha/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 68 arch/alpha/include/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) \ n 71 arch/alpha/include/asm/cmpxchg.h cmpxchg((ptr), (o), (n)); \ n 216 arch/alpha/include/asm/core_cia.h #define CIA_IOC_TB_TAGn(n) \ n 217 arch/alpha/include/asm/core_cia.h (IDENT_ADDR + 0x8760000800UL + (n)*0x40) n 220 arch/alpha/include/asm/core_cia.h #define CIA_IOC_TBn_PAGEm(n,m) \ n 221 arch/alpha/include/asm/core_cia.h (IDENT_ADDR + 0x8760001000UL + (n)*0x100 + (m)*0x40) n 133 arch/alpha/include/asm/core_t2.h #define T2_CPUn_BASE(n) (T2_CPU0_BASE + (((n)&3) * 0x001000000L)) n 55 arch/alpha/include/asm/local.h #define local_cmpxchg(l, o, n) \ n 56 arch/alpha/include/asm/local.h (cmpxchg_local(&((l)->a.counter), (o), (n))) n 57 arch/alpha/include/asm/local.h #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) n 38 arch/alpha/include/asm/mmzone.h #define PLAT_NODE_DATA_LOCALNR(p, n) \ n 39 arch/alpha/include/asm/mmzone.h (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn) n 42 arch/alpha/include/asm/mmzone.h PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) n 46 arch/alpha/include/asm/mmzone.h return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn; n 33 arch/alpha/include/asm/string.h extern inline void *__memset(void *s, int c, size_t n) n 36 arch/alpha/include/asm/string.h if (__builtin_constant_p(n)) { n 37 arch/alpha/include/asm/string.h return __builtin_memset(s, c, n); n 40 arch/alpha/include/asm/string.h return __constant_c_memset(s, c8, n); n 43 arch/alpha/include/asm/string.h return ___memset(s, c, n); n 71 arch/alpha/include/asm/string.h static inline void *memset16(uint16_t *p, uint16_t v, size_t n) n 74 arch/alpha/include/asm/string.h return __constant_c_memset(p, 0x0001000100010001UL * v, n * 2); n 75 arch/alpha/include/asm/string.h return __memset16(p, v, n * 2); n 331 arch/alpha/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *str, long n); n 121 arch/alpha/kernel/module.c struct got_entry *g, *n; n 122 arch/alpha/kernel/module.c for (g = chains[i].next; g ; g = n) { n 123 arch/alpha/kernel/module.c n = g->next; n 138 arch/alpha/kernel/module.c unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); n 154 arch/alpha/kernel/module.c for (i = 0; i < n; i++) { n 1101 arch/alpha/kernel/osf_sys.c SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, n 1121 arch/alpha/kernel/osf_sys.c return core_sys_select(n, inp, outp, exp, to); n 135 arch/alpha/kernel/pci_iommu.c long n, long mask) n 158 arch/alpha/kernel/pci_iommu.c while (i < n && p+i < nent) { n 159 arch/alpha/kernel/pci_iommu.c if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) { n 170 arch/alpha/kernel/pci_iommu.c if (i < n) { n 192 arch/alpha/kernel/pci_iommu.c iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, n 204 arch/alpha/kernel/pci_iommu.c p = iommu_arena_find_pages(dev, arena, n, mask); n 214 arch/alpha/kernel/pci_iommu.c for (i = 0; i < n; ++i) n 217 arch/alpha/kernel/pci_iommu.c arena->next_entry = p + n; n 224 arch/alpha/kernel/pci_iommu.c iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) n 230 arch/alpha/kernel/pci_iommu.c for (i = 0; i < n; ++i) n 345 arch/alpha/kernel/perf_event.c int n = 0; n 348 arch/alpha/kernel/perf_event.c if (n >= max_count) n 350 arch/alpha/kernel/perf_event.c event[n] = group; n 351 arch/alpha/kernel/perf_event.c evtype[n] = group->hw.event_base; n 352 arch/alpha/kernel/perf_event.c current_idx[n++] = PMC_NO_INDEX; n 356 arch/alpha/kernel/perf_event.c if (n >= max_count) n 358 arch/alpha/kernel/perf_event.c event[n] = pe; n 359 arch/alpha/kernel/perf_event.c evtype[n] = pe->hw.event_base; n 360 arch/alpha/kernel/perf_event.c current_idx[n++] = PMC_NO_INDEX; n 363 arch/alpha/kernel/perf_event.c return n; n 610 arch/alpha/kernel/perf_event.c int n; n 648 arch/alpha/kernel/perf_event.c n = 0; n 650 arch/alpha/kernel/perf_event.c n = collect_events(event->group_leader, n 653 arch/alpha/kernel/perf_event.c if (n < 0) n 656 arch/alpha/kernel/perf_event.c evtypes[n] = hwc->event_base; n 657 arch/alpha/kernel/perf_event.c evts[n] = event; n 659 arch/alpha/kernel/perf_event.c if (alpha_check_constraints(evts, evtypes, n + 1)) n 26 arch/alpha/lib/memcpy.c #define ALIGN_DEST_TO8_UP(d,s,n) \ n 28 arch/alpha/lib/memcpy.c if (n <= 0) return; \ n 29 arch/alpha/lib/memcpy.c n--; \ n 33 arch/alpha/lib/memcpy.c #define ALIGN_DEST_TO8_DN(d,s,n) \ n 35 arch/alpha/lib/memcpy.c if (n <= 0) return; \ n 36 arch/alpha/lib/memcpy.c n--; \ n 45 arch/alpha/lib/memcpy.c #define DO_REST_UP(d,s,n) \ n 46 arch/alpha/lib/memcpy.c while (n > 0) { \ n 47 arch/alpha/lib/memcpy.c n--; \ n 51 arch/alpha/lib/memcpy.c #define DO_REST_DN(d,s,n) \ n 52 arch/alpha/lib/memcpy.c while (n > 0) { \ n 53 arch/alpha/lib/memcpy.c n--; \ n 62 arch/alpha/lib/memcpy.c #define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n) n 63 arch/alpha/lib/memcpy.c #define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n) n 73 arch/alpha/lib/memcpy.c long n) n 75 arch/alpha/lib/memcpy.c ALIGN_DEST_TO8_UP(d,s,n); n 76 arch/alpha/lib/memcpy.c n -= 8; /* to avoid compare against 8 in the loop */ n 77 arch/alpha/lib/memcpy.c if (n >= 0) { n 83 arch/alpha/lib/memcpy.c n -= 8; n 94 arch/alpha/lib/memcpy.c } while (n >= 0); n 96 arch/alpha/lib/memcpy.c n += 8; n 97 arch/alpha/lib/memcpy.c DO_REST_UP(d,s,n); n 101 arch/alpha/lib/memcpy.c long n) n 104 arch/alpha/lib/memcpy.c s += n; n 105 arch/alpha/lib/memcpy.c d += n; n 106 arch/alpha/lib/memcpy.c while (n--) n 119 arch/alpha/lib/memcpy.c long n) n 121 arch/alpha/lib/memcpy.c ALIGN_DEST_TO8_UP(d,s,n); n 122 arch/alpha/lib/memcpy.c n -= 8; n 123 arch/alpha/lib/memcpy.c while (n >= 0) { n 126 arch/alpha/lib/memcpy.c n -= 8; n 131 arch/alpha/lib/memcpy.c n += 8; n 132 arch/alpha/lib/memcpy.c DO_REST_ALIGNED_UP(d,s,n); n 135 arch/alpha/lib/memcpy.c long n) n 137 arch/alpha/lib/memcpy.c s += n; n 138 arch/alpha/lib/memcpy.c d += n; n 139 arch/alpha/lib/memcpy.c ALIGN_DEST_TO8_DN(d,s,n); n 140 arch/alpha/lib/memcpy.c n -= 8; n 141 arch/alpha/lib/memcpy.c while (n >= 0) { n 145 arch/alpha/lib/memcpy.c n -= 8; n 149 arch/alpha/lib/memcpy.c n += 8; n 150 arch/alpha/lib/memcpy.c DO_REST_ALIGNED_DN(d,s,n); n 153 arch/alpha/lib/memcpy.c void * memcpy(void * dest, const void *src, size_t n) n 157 arch/alpha/lib/memcpy.c n); n 160 arch/alpha/lib/memcpy.c __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n); n 357 arch/arc/include/asm/bitops.h int n; n 362 arch/arc/include/asm/bitops.h : "=r"(n) /* Early clobber not needed */ n 366 arch/arc/include/asm/bitops.h return n; n 384 arch/arc/include/asm/bitops.h int n; n 390 arch/arc/include/asm/bitops.h : "=r"(n) /* Early clobber not needed */ n 394 arch/arc/include/asm/bitops.h return n; n 402 arch/arc/include/asm/bitops.h unsigned long n; n 407 arch/arc/include/asm/bitops.h : "=r"(n) n 411 arch/arc/include/asm/bitops.h return n; n 92 arch/arc/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) ({ \ n 95 arch/arc/include/asm/cmpxchg.h (unsigned long)(n)); \ n 104 arch/arc/include/asm/cmpxchg.h #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) n 66 arch/arc/include/asm/delay.h #define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \ n 67 arch/arc/include/asm/delay.h : __udelay(n)) : __udelay(n)) n 31 arch/arc/include/asm/disasm.h #define IS_BIT(word, n) ((word) & (1<<n)) n 27 arch/arc/include/asm/string.h extern void memzero(void *ptr, __kernel_size_t n); n 15 arch/arc/include/asm/switch_to.h extern void fpu_save_restore(struct task_struct *p, struct task_struct *n); n 16 arch/arc/include/asm/switch_to.h #define ARC_FPU_PREV(p, n) fpu_save_restore(p, n) n 21 arch/arc/include/asm/switch_to.h #define ARC_FPU_PREV(p, n) n 22 arch/arc/include/asm/switch_to.h #define ARC_FPU_NEXT(n) n 27 arch/arc/include/asm/switch_to.h extern void dp_save_restore(struct task_struct *p, struct task_struct *n); n 28 arch/arc/include/asm/switch_to.h #define ARC_EZNPS_DP_PREV(p, n) dp_save_restore(p, n) n 30 arch/arc/include/asm/switch_to.h #define ARC_EZNPS_DP_PREV(p, n) n 34 arch/arc/include/asm/switch_to.h struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n); n 59 arch/arc/include/asm/syscall.h unsigned int n = 6; n 62 arch/arc/include/asm/syscall.h while (n--) { n 168 arch/arc/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 173 arch/arc/include/asm/uaccess.h unsigned long orig_n = n; n 175 arch/arc/include/asm/uaccess.h if (n == 0) n 199 arch/arc/include/asm/uaccess.h : "+r" (n), n 209 arch/arc/include/asm/uaccess.h return n; n 248 arch/arc/include/asm/uaccess.h : "ir"(n) n 385 arch/arc/include/asm/uaccess.h : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), n 395 arch/arc/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 400 arch/arc/include/asm/uaccess.h unsigned long orig_n = n; n 402 arch/arc/include/asm/uaccess.h if (n == 0) n 426 arch/arc/include/asm/uaccess.h : "+r" (n), n 435 arch/arc/include/asm/uaccess.h return n; n 470 arch/arc/include/asm/uaccess.h : "ir"(n) n 607 arch/arc/include/asm/uaccess.h : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), n 616 arch/arc/include/asm/uaccess.h static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) n 618 arch/arc/include/asm/uaccess.h long res = n; n 691 arch/arc/include/asm/uaccess.h static inline long __arc_strnlen_user(const char __user *s, long n) n 715 arch/arc/include/asm/uaccess.h : "0"(s), "1"(n) n 726 arch/arc/include/asm/uaccess.h #define __clear_user(d, n) __arc_clear_user(d, n) n 727 arch/arc/include/asm/uaccess.h #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n) n 728 arch/arc/include/asm/uaccess.h #define __strnlen_user(s, n) __arc_strnlen_user(s, n) n 731 arch/arc/include/asm/uaccess.h unsigned long n); n 734 arch/arc/include/asm/uaccess.h extern long arc_strnlen_user_noinline(const char __user *src, long n); n 736 arch/arc/include/asm/uaccess.h #define __clear_user(d, n) arc_clear_user_noinline(d, n) n 737 arch/arc/include/asm/uaccess.h #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n) n 738 arch/arc/include/asm/uaccess.h #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n) n 51 arch/arc/kernel/module.c int i, n, relo_type; n 64 arch/arc/kernel/module.c n = sechdrs[relsec].sh_size / sizeof(*rel_entry); n 73 arch/arc/kernel/module.c for (i = 0; i < n; i++) { n 237 arch/arc/kernel/ptrace.c .n = ELF_NGREG, n 246 arch/arc/kernel/ptrace.c .n = ELF_ARCV2REG, n 259 arch/arc/kernel/ptrace.c .n = ARRAY_SIZE(arc_regsets) n 274 arch/arc/kernel/setup.c int n = 0; n 278 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, n 282 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n", n 288 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", n 308 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", n 316 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, n 330 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s", n 335 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "\n"); n 343 arch/arc/kernel/setup.c int n = 0; n 348 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base); n 351 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n", n 356 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s", n 360 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "ActionPoint %d/%s", n 364 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "\n"); n 368 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", n 383 arch/arc/kernel/setup.c n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n", n 30 arch/arc/kernel/troubleshoot.c int n = 0, len = sizeof(buf); n 33 arch/arc/kernel/troubleshoot.c n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", n 37 arch/arc/kernel/troubleshoot.c n += scnprintf(buf + n, len - n, "\n"); n 47 arch/arc/kernel/troubleshoot.c n += scnprintf(buf + n, len - n, "\n\n"); n 262 arch/arc/kernel/unwind.c unsigned n; n 284 arch/arc/kernel/unwind.c for (fde = table->address, n = 0; n 308 arch/arc/kernel/unwind.c ++n; n 311 arch/arc/kernel/unwind.c if (tableSize || !n) n 315 arch/arc/kernel/unwind.c + 2 * n * sizeof(unsigned long); n 328 arch/arc/kernel/unwind.c header->fde_count = n; n 332 arch/arc/kernel/unwind.c for (fde = table->address, tableSize = table->size, n = 0; n 340 arch/arc/kernel/unwind.c header->table[n].start = read_pointer(&ptr, n 344 arch/arc/kernel/unwind.c header->table[n].fde = (unsigned long)fde; n 345 arch/arc/kernel/unwind.c ++n; n 347 arch/arc/kernel/unwind.c WARN_ON(n != header->fde_count); n 350 arch/arc/kernel/unwind.c n, n 40 arch/arc/mm/cache.c int n = 0; n 45 arch/arc/mm/cache.c n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ n 47 arch/arc/mm/cache.c n += scnprintf(buf + n, len - n, \ n 59 arch/arc/mm/cache.c n += scnprintf(buf + n, len - n, n 63 arch/arc/mm/cache.c n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", n 29 arch/arc/mm/extable.c unsigned long n) n 31 arch/arc/mm/extable.c return __arc_clear_user(to, n); n 42 arch/arc/mm/extable.c long arc_strnlen_user_noinline(const char __user *src, long n) n 44 arch/arc/mm/extable.c return __arc_strnlen_user(src, n); n 794 arch/arc/mm/tlb.c int n = 0; n 803 arch/arc/mm/tlb.c n += scnprintf(buf + n, len - n, n 942 arch/arc/mm/tlb.c int n; n 947 arch/arc/mm/tlb.c for (n = way + 1; n < n_ways; n++) { n 948 arch/arc/mm/tlb.c if (pd0[way] != pd0[n]) n 953 arch/arc/mm/tlb.c pd0[way], set, way, n); n 1425 arch/arm/common/sa1111.c static int sa1111_notifier_call(struct notifier_block *n, unsigned long action, n 33 arch/arm/crypto/nhpoly1305-neon-glue.c unsigned int n = min_t(unsigned int, srclen, SZ_4K); n 36 arch/arm/crypto/nhpoly1305-neon-glue.c crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); n 38 arch/arm/crypto/nhpoly1305-neon-glue.c src += n; n 39 arch/arm/crypto/nhpoly1305-neon-glue.c srclen -= n; n 137 arch/arm/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) ({ \ n 140 arch/arm/include/asm/cmpxchg.h (unsigned long)(n), \ n 144 arch/arm/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 210 arch/arm/include/asm/cmpxchg.h #define cmpxchg_relaxed(ptr,o,n) ({ \ n 213 arch/arm/include/asm/cmpxchg.h (unsigned long)(n), \ n 237 arch/arm/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) ({ \ n 240 arch/arm/include/asm/cmpxchg.h (unsigned long)(n), \ n 269 arch/arm/include/asm/cmpxchg.h #define cmpxchg64_relaxed(ptr, o, n) ({ \ n 272 arch/arm/include/asm/cmpxchg.h (unsigned long long)(n)); \ n 275 arch/arm/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) n 103 arch/arm/include/asm/cp15.h #define CPACC_FULL(n) (3 << (n * 2)) n 104 arch/arm/include/asm/cp15.h #define CPACC_SVC(n) (1 << (n * 2)) n 105 arch/arm/include/asm/cp15.h #define CPACC_DISABLE(n) (0 << (n * 2)) n 58 arch/arm/include/asm/delay.h #define __delay(n) arm_delay_ops.delay(n) n 79 arch/arm/include/asm/delay.h #define __udelay(n) arm_delay_ops.udelay(n) n 80 arch/arm/include/asm/delay.h #define __const_udelay(n) arm_delay_ops.const_udelay(n) n 82 arch/arm/include/asm/delay.h #define udelay(n) \ n 83 arch/arm/include/asm/delay.h (__builtin_constant_p(n) ? \ n 84 arch/arm/include/asm/delay.h ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ n 85 arch/arm/include/asm/delay.h __const_udelay((n) * UDELAY_MULT)) : \ n 86 arch/arm/include/asm/delay.h __udelay(n)) n 33 arch/arm/include/asm/div64.h static inline uint32_t __div64_32(uint64_t *n, uint32_t base) n 36 arch/arm/include/asm/div64.h register unsigned long long __n asm("r0") = *n; n 47 arch/arm/include/asm/div64.h *n = __res; n 60 arch/arm/include/asm/div64.h #define do_div(n, base) __div64_32(&(n), base) n 75 arch/arm/include/asm/div64.h static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias) n 84 arch/arm/include/asm/div64.h : "r" (m), "r" (n) n 91 arch/arm/include/asm/div64.h : "r" (m), "r" (n) n 99 arch/arm/include/asm/div64.h : "r" (m), "r" (n) n 109 arch/arm/include/asm/div64.h : "r" (m), "r" (n) n 119 arch/arm/include/asm/div64.h : "r" (m), "r" (n) n 57 arch/arm/include/asm/ftrace.h #define ftrace_return_address(n) return_address(n) n 90 arch/arm/include/asm/hardware/cache-l2x0.h #define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17) n 128 arch/arm/include/asm/hardware/cache-l2x0.h #define L310_LATENCY_CTRL_SETUP(n) ((n) << 0) n 129 arch/arm/include/asm/hardware/cache-l2x0.h #define L310_LATENCY_CTRL_RD(n) ((n) << 4) n 130 arch/arm/include/asm/hardware/cache-l2x0.h #define L310_LATENCY_CTRL_WR(n) ((n) << 8) n 54 arch/arm/include/asm/mpu.h #define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1) n 133 arch/arm/include/asm/ptrace.h unsigned int n); n 30 arch/arm/include/asm/string.h static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n) n 32 arch/arm/include/asm/string.h return __memset32(p, v, n * 4); n 37 arch/arm/include/asm/string.h static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) n 39 arch/arm/include/asm/string.h return __memset64(p, v, n * 8, v >> 32); n 17 arch/arm/include/asm/thread_notify.h static inline int thread_register_notifier(struct notifier_block *n) n 20 arch/arm/include/asm/thread_notify.h return atomic_notifier_chain_register(&thread_notify_head, n); n 23 arch/arm/include/asm/thread_notify.h static inline void thread_unregister_notifier(struct notifier_block *n) n 26 arch/arm/include/asm/thread_notify.h atomic_notifier_chain_unregister(&thread_notify_head, n); n 513 arch/arm/include/asm/uaccess.h arm_copy_from_user(void *to, const void __user *from, unsigned long n); n 516 arch/arm/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 521 arch/arm/include/asm/uaccess.h n = arm_copy_from_user(to, from, n); n 523 arch/arm/include/asm/uaccess.h return n; n 527 arch/arm/include/asm/uaccess.h arm_copy_to_user(void __user *to, const void *from, unsigned long n); n 529 arch/arm/include/asm/uaccess.h __copy_to_user_std(void __user *to, const void *from, unsigned long n); n 532 arch/arm/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 537 arch/arm/include/asm/uaccess.h n = arm_copy_to_user(to, from, n); n 539 arch/arm/include/asm/uaccess.h return n; n 541 arch/arm/include/asm/uaccess.h return arm_copy_to_user(to, from, n); n 546 arch/arm/include/asm/uaccess.h arm_clear_user(void __user *addr, unsigned long n); n 548 arch/arm/include/asm/uaccess.h __clear_user_std(void __user *addr, unsigned long n); n 551 arch/arm/include/asm/uaccess.h __clear_user(void __user *addr, unsigned long n) n 554 arch/arm/include/asm/uaccess.h n = arm_clear_user(addr, n); n 556 arch/arm/include/asm/uaccess.h return n; n 561 arch/arm/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 563 arch/arm/include/asm/uaccess.h memcpy(to, (const void __force *)from, n); n 567 arch/arm/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 569 arch/arm/include/asm/uaccess.h memcpy((void __force *)to, from, n); n 572 arch/arm/include/asm/uaccess.h #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) n 577 arch/arm/include/asm/uaccess.h static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) n 579 arch/arm/include/asm/uaccess.h if (access_ok(to, n)) n 580 arch/arm/include/asm/uaccess.h n = __clear_user(to, n); n 581 arch/arm/include/asm/uaccess.h return n; n 587 arch/arm/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *str, long n); n 74 arch/arm/include/asm/v7m.h #define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n)) n 75 arch/arm/include/asm/v7m.h #define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n)) n 11 arch/arm/include/debug/imx-uart.h #define IMX1_UART_BASE_ADDR(n) IMX1_UART##n##_BASE_ADDR n 12 arch/arm/include/debug/imx-uart.h #define IMX1_UART_BASE(n) IMX1_UART_BASE_ADDR(n) n 18 arch/arm/include/debug/imx-uart.h #define IMX21_UART_BASE_ADDR(n) IMX21_UART##n##_BASE_ADDR n 19 arch/arm/include/debug/imx-uart.h #define IMX21_UART_BASE(n) IMX21_UART_BASE_ADDR(n) n 26 arch/arm/include/debug/imx-uart.h #define IMX25_UART_BASE_ADDR(n) IMX25_UART##n##_BASE_ADDR n 27 arch/arm/include/debug/imx-uart.h #define IMX25_UART_BASE(n) IMX25_UART_BASE_ADDR(n) n 34 arch/arm/include/debug/imx-uart.h #define IMX31_UART_BASE_ADDR(n) IMX31_UART##n##_BASE_ADDR n 35 arch/arm/include/debug/imx-uart.h #define IMX31_UART_BASE(n) IMX31_UART_BASE_ADDR(n) n 40 arch/arm/include/debug/imx-uart.h #define IMX35_UART_BASE_ADDR(n) IMX35_UART##n##_BASE_ADDR n 41 arch/arm/include/debug/imx-uart.h #define IMX35_UART_BASE(n) IMX35_UART_BASE_ADDR(n) n 48 arch/arm/include/debug/imx-uart.h #define IMX50_UART_BASE_ADDR(n) IMX50_UART##n##_BASE_ADDR n 49 arch/arm/include/debug/imx-uart.h #define IMX50_UART_BASE(n) IMX50_UART_BASE_ADDR(n) n 54 arch/arm/include/debug/imx-uart.h #define IMX51_UART_BASE_ADDR(n) IMX51_UART##n##_BASE_ADDR n 55 arch/arm/include/debug/imx-uart.h #define IMX51_UART_BASE(n) IMX51_UART_BASE_ADDR(n) n 62 arch/arm/include/debug/imx-uart.h #define IMX53_UART_BASE_ADDR(n) IMX53_UART##n##_BASE_ADDR n 63 arch/arm/include/debug/imx-uart.h #define IMX53_UART_BASE(n) IMX53_UART_BASE_ADDR(n) n 70 arch/arm/include/debug/imx-uart.h #define IMX6Q_UART_BASE_ADDR(n) IMX6Q_UART##n##_BASE_ADDR n 71 arch/arm/include/debug/imx-uart.h #define IMX6Q_UART_BASE(n) IMX6Q_UART_BASE_ADDR(n) n 78 arch/arm/include/debug/imx-uart.h #define IMX6SL_UART_BASE_ADDR(n) IMX6SL_UART##n##_BASE_ADDR n 79 arch/arm/include/debug/imx-uart.h #define IMX6SL_UART_BASE(n) IMX6SL_UART_BASE_ADDR(n) n 87 arch/arm/include/debug/imx-uart.h #define IMX6SX_UART_BASE_ADDR(n) IMX6SX_UART##n##_BASE_ADDR n 88 arch/arm/include/debug/imx-uart.h #define IMX6SX_UART_BASE(n) IMX6SX_UART_BASE_ADDR(n) n 98 arch/arm/include/debug/imx-uart.h #define IMX6UL_UART_BASE_ADDR(n) IMX6UL_UART##n##_BASE_ADDR n 99 arch/arm/include/debug/imx-uart.h #define IMX6UL_UART_BASE(n) IMX6UL_UART_BASE_ADDR(n) n 108 arch/arm/include/debug/imx-uart.h #define IMX7D_UART_BASE_ADDR(n) IMX7D_UART##n##_BASE_ADDR n 109 arch/arm/include/debug/imx-uart.h #define IMX7D_UART_BASE(n) IMX7D_UART_BASE_ADDR(n) n 162 arch/arm/include/uapi/asm/kvm.h #define ARM_CP15_REG_SHIFT_MASK(x,n) \ n 163 arch/arm/include/uapi/asm/kvm.h (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) n 301 arch/arm/include/uapi/asm/kvm.h #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) n 75 arch/arm/kernel/atags_compat.c char n[1024 - sizeof(unsigned long)]; n 15 arch/arm/kernel/early_printk.c static void early_write(const char *s, unsigned n) n 18 arch/arm/kernel/early_printk.c while (n) { n 19 arch/arm/kernel/early_printk.c unsigned l = min(n, sizeof(buf)-1); n 23 arch/arm/kernel/early_printk.c n -= l; n 28 arch/arm/kernel/early_printk.c static void early_console_write(struct console *con, const char *s, unsigned n) n 30 arch/arm/kernel/early_printk.c early_write(s, n); n 94 arch/arm/kernel/hw_breakpoint.c static u32 read_wb_reg(int n) n 98 arch/arm/kernel/hw_breakpoint.c switch (n) { n 105 arch/arm/kernel/hw_breakpoint.c n); n 111 arch/arm/kernel/hw_breakpoint.c static void write_wb_reg(int n, u32 val) n 113 arch/arm/kernel/hw_breakpoint.c switch (n) { n 120 arch/arm/kernel/hw_breakpoint.c n); n 1330 arch/arm/kernel/perf_event_v7.c static u32 krait_read_pmresrn(int n) n 1334 arch/arm/kernel/perf_event_v7.c switch (n) { n 1351 arch/arm/kernel/perf_event_v7.c static void krait_write_pmresrn(int n, u32 val) n 1353 arch/arm/kernel/perf_event_v7.c switch (n) { n 1709 arch/arm/kernel/perf_event_v7.c static u32 scorpion_read_pmresrn(int n) n 1713 arch/arm/kernel/perf_event_v7.c switch (n) { n 1733 arch/arm/kernel/perf_event_v7.c static void scorpion_write_pmresrn(int n, u32 val) n 1735 arch/arm/kernel/perf_event_v7.c switch (n) { n 146 arch/arm/kernel/ptrace.c unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) n 149 arch/arm/kernel/ptrace.c addr += n; n 740 arch/arm/kernel/ptrace.c .n = ELF_NGREG, n 752 arch/arm/kernel/ptrace.c .n = sizeof(struct user_fp) / sizeof(u32), n 765 arch/arm/kernel/ptrace.c .n = ARM_VFPREGS_SIZE / sizeof(u32), n 776 arch/arm/kernel/ptrace.c .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets) n 531 arch/arm/kernel/traps.c static int bad_syscall(int n, struct pt_regs *regs) n 541 arch/arm/kernel/traps.c task_pid_nr(current), current->comm, n); n 549 arch/arm/kernel/traps.c n, 0); n 511 arch/arm/kvm/coproc.c static int check_reg_table(const struct coproc_reg *table, unsigned int n) n 515 arch/arm/kvm/coproc.c for (i = 1; i < n; i++) { n 85 arch/arm/lib/uaccess_with_memcpy.c __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) n 91 arch/arm/lib/uaccess_with_memcpy.c memcpy((void *)to, from, n); n 100 arch/arm/lib/uaccess_with_memcpy.c while (n) { n 115 arch/arm/lib/uaccess_with_memcpy.c if (tocopy > n) n 116 arch/arm/lib/uaccess_with_memcpy.c tocopy = n; n 123 arch/arm/lib/uaccess_with_memcpy.c n -= tocopy; n 134 arch/arm/lib/uaccess_with_memcpy.c return n; n 138 arch/arm/lib/uaccess_with_memcpy.c arm_copy_to_user(void __user *to, const void *from, unsigned long n) n 147 arch/arm/lib/uaccess_with_memcpy.c if (n < 64) { n 149 arch/arm/lib/uaccess_with_memcpy.c n = __copy_to_user_std(to, from, n); n 152 arch/arm/lib/uaccess_with_memcpy.c n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n), n 153 arch/arm/lib/uaccess_with_memcpy.c from, n); n 155 arch/arm/lib/uaccess_with_memcpy.c return n; n 159 arch/arm/lib/uaccess_with_memcpy.c __clear_user_memset(void __user *addr, unsigned long n) n 164 arch/arm/lib/uaccess_with_memcpy.c memset((void *)addr, 0, n); n 169 arch/arm/lib/uaccess_with_memcpy.c while (n) { n 182 arch/arm/lib/uaccess_with_memcpy.c if (tocopy > n) n 183 arch/arm/lib/uaccess_with_memcpy.c tocopy = n; n 189 arch/arm/lib/uaccess_with_memcpy.c n -= tocopy; n 199 arch/arm/lib/uaccess_with_memcpy.c return n; n 202 arch/arm/lib/uaccess_with_memcpy.c unsigned long arm_clear_user(void __user *addr, unsigned long n) n 205 arch/arm/lib/uaccess_with_memcpy.c if (n < 64) { n 207 arch/arm/lib/uaccess_with_memcpy.c n = __clear_user_std(addr, n); n 210 arch/arm/lib/uaccess_with_memcpy.c n = __clear_user_memset(addr, n); n 212 arch/arm/lib/uaccess_with_memcpy.c return n; n 154 arch/arm/mach-ep93xx/ts72xx.c void __init ts72xx_register_flash(struct mtd_partition *parts, int n, n 167 arch/arm/mach-ep93xx/ts72xx.c ts72xx_nand_data.chip.nr_partitions = n; n 21 arch/arm/mach-imx/3ds_debugboard.c #define LAN9217_BASE_ADDR(n) (n + 0x0) n 23 arch/arm/mach-imx/3ds_debugboard.c #define UARTA_BASE_ADDR(n) (n + 0x8000) n 24 arch/arm/mach-imx/3ds_debugboard.c #define UARTB_BASE_ADDR(n) (n + 0x10000) n 26 arch/arm/mach-imx/3ds_debugboard.c #define BOARD_IO_ADDR(n) (n + 0x20000) n 25 arch/arm/mach-integrator/hardware.h #define __io_address(n) ((void __iomem *)IO_ADDRESS(n)) n 192 arch/arm/mach-ixp4xx/common-pci.c static u32 local_byte_lane_enable_bits(u32 n, int size) n 195 arch/arm/mach-ixp4xx/common-pci.c return (0xf & ~BIT(n)) << CRP_AD_CBE_BESL; n 197 arch/arm/mach-ixp4xx/common-pci.c return (0xf & ~(BIT(n) | BIT(n+1))) << CRP_AD_CBE_BESL; n 205 arch/arm/mach-ixp4xx/common-pci.c u32 n, data; n 207 arch/arm/mach-ixp4xx/common-pci.c n = where % 4; n 209 arch/arm/mach-ixp4xx/common-pci.c *value = (data >> (8*n)) & bytemask[size]; n 216 arch/arm/mach-ixp4xx/common-pci.c u32 n, byte_enables, data; n 218 arch/arm/mach-ixp4xx/common-pci.c n = where % 4; n 219 arch/arm/mach-ixp4xx/common-pci.c byte_enables = local_byte_lane_enable_bits(n, size); n 222 arch/arm/mach-ixp4xx/common-pci.c data = value << (8*n); n 227 arch/arm/mach-ixp4xx/common-pci.c static u32 byte_lane_enable_bits(u32 n, int size) n 230 arch/arm/mach-ixp4xx/common-pci.c return (0xf & ~BIT(n)) << 4; n 232 arch/arm/mach-ixp4xx/common-pci.c return (0xf & ~(BIT(n) | BIT(n+1))) << 4; n 240 arch/arm/mach-ixp4xx/common-pci.c u32 n, byte_enables, addr, data; n 247 arch/arm/mach-ixp4xx/common-pci.c n = where % 4; n 248 arch/arm/mach-ixp4xx/common-pci.c byte_enables = byte_lane_enable_bits(n, size); n 256 arch/arm/mach-ixp4xx/common-pci.c *value = (data >> (8*n)) & bytemask[size]; n 263 arch/arm/mach-ixp4xx/common-pci.c u32 n, byte_enables, addr, data; n 269 arch/arm/mach-ixp4xx/common-pci.c n = where % 4; n 270 arch/arm/mach-ixp4xx/common-pci.c byte_enables = byte_lane_enable_bits(n, size); n 275 arch/arm/mach-ixp4xx/common-pci.c data = value << (8*n); n 81 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 88 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 89 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL; n 90 arch/arm/mach-ixp4xx/include/mach/io.h data = value << (8*n); n 106 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 113 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 114 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL; n 115 arch/arm/mach-ixp4xx/include/mach/io.h data = value << (8*n); n 151 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 156 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 157 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL; n 161 arch/arm/mach-ixp4xx/include/mach/io.h return data >> (8*n); n 176 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 181 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 182 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL; n 186 arch/arm/mach-ixp4xx/include/mach/io.h return data>>(8*n); n 248 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 249 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 250 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL; n 251 arch/arm/mach-ixp4xx/include/mach/io.h data = value << (8*n); n 267 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 268 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 269 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL; n 270 arch/arm/mach-ixp4xx/include/mach/io.h data = value << (8*n); n 299 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 300 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 301 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL; n 305 arch/arm/mach-ixp4xx/include/mach/io.h return data >> (8*n); n 319 arch/arm/mach-ixp4xx/include/mach/io.h u32 n, byte_enables, data; n 320 arch/arm/mach-ixp4xx/include/mach/io.h n = addr % 4; n 321 arch/arm/mach-ixp4xx/include/mach/io.h byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL; n 325 arch/arm/mach-ixp4xx/include/mach/io.h return data>>(8*n); n 59 arch/arm/mach-ixp4xx/irqs.h #define _IXP4XX_GPIO_IRQ(n) (IRQ_IXP4XX_GPIO ## n) n 60 arch/arm/mach-ixp4xx/irqs.h #define IXP4XX_GPIO_IRQ(n) _IXP4XX_GPIO_IRQ(n) n 14 arch/arm/mach-lpc32xx/lpc32xx.h #define _BIT(n) _SBF(n, 1) n 284 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_HCLKDIV_PCLK_DIV(n) (((n) & 0x1F) << 2) n 285 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_HCLKDIV_DIV_2POW(n) ((n) & 0x3) n 321 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_MOSC_ADD_CAP(n) (((n) & 0x7F) << 2) n 329 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_SYSCTRL_BP_TRIG(n) (((n) & 0x3FF) << 2) n 347 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_LCDCTRL_SET_PSCALE(n) ((n - 1) & 0x1F) n 357 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_HCLKPLL_POSTDIV_2POW(n) (((n) & 0x3) << 11) n 358 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_HCLKPLL_PREDIV_PLUS1(n) (((n) & 0x3) << 9) n 359 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_HCLKPLL_PLLM(n) (((n) & 0xFF) << 1) n 365 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_ADCCTRL1_RTDIV(n) (((n) & 0xFF) << 0) n 384 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_USBCTRL_POSTDIV_2POW(n) (((n) & 0x3) << 11) n 385 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_USBCTRL_PREDIV_PLUS1(n) (((n) & 0x3) << 9) n 386 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_USBCTRL_FDBK_PLUS1(n) (((n) & 0xFF) << 1) n 396 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_SDRCLK_HCLK_DLY(n) (((n) & 0x1F) << 14) n 398 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_SDRCLK_SENS_FACT(n) (((n) & 0x7) << 10) n 402 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_SDRCLK_DQS_DLY(n) (((n) & 0x1F) << 2) n 436 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_MSCARD_SDCARD_DIV(n) ((n) & 0xF) n 468 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_SW_INT(n) (_BIT(0) | (((n) & 0x7F) << 1)) n 469 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_SW_GET_ARG(n) (((n) & 0xFE) >> 1) n 493 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_PWMCLK_PWM2_DIV(n) (((n) & 0xF) << 8) n 494 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_CLKPWR_PWMCLK_PWM1_DIV(n) (((n) & 0xF) << 4) n 607 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_TIMER_CNTR_MTCH_BIT(n) (1 << ((n) & 0x3)) n 608 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_TIMER_CNTR_CAPT_BIT(n) (1 << (4 + ((n) & 0x3))) n 619 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_TIMER_CNTR_MCR_MTCH(n) (0x1 << ((n) * 3)) n 620 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_TIMER_CNTR_MCR_RESET(n) (0x1 << (((n) * 3) + 1)) n 621 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_TIMER_CNTR_MCR_STOP(n) (0x1 << (((n) * 3) + 2)) n 660 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_UART_ENABLED_CLOCKS(n) (((n) >> 16) & 0x7F) n 661 arch/arm/mach-lpc32xx/lpc32xx.h #define LPC32XX_UART_ENABLED_CLOCK(n, u) (((n) >> (16 + (u))) & 0x1) n 17 arch/arm/mach-mmp/regs-icu.h #define ICU_INT_CONF(n) ICU_REG((n) << 2) n 15 arch/arm/mach-mmp/regs-timers.h #define TMR_TN_MM(n, m) (0x0004 + ((n) << 3) + (((n) + (m)) << 2)) n 16 arch/arm/mach-mmp/regs-timers.h #define TMR_CR(n) (0x0028 + ((n) << 2)) n 17 arch/arm/mach-mmp/regs-timers.h #define TMR_SR(n) (0x0034 + ((n) << 2)) n 18 arch/arm/mach-mmp/regs-timers.h #define TMR_IER(n) (0x0040 + ((n) << 2)) n 19 arch/arm/mach-mmp/regs-timers.h #define TMR_PLVR(n) (0x004c + ((n) << 2)) n 20 arch/arm/mach-mmp/regs-timers.h #define TMR_PLCR(n) (0x0058 + ((n) << 2)) n 25 arch/arm/mach-mmp/regs-timers.h #define TMR_ICR(n) (0x0074 + ((n) << 2)) n 29 arch/arm/mach-mmp/regs-timers.h #define TMR_ILR(n) (0x008c + ((n) << 2)) n 33 arch/arm/mach-mmp/regs-timers.h #define TMR_CVWR(n) (0x00A4 + ((n) << 2)) n 27 arch/arm/mach-omap1/include/mach/mtd-xip.h #define xip_omap_mpu_timer_base(n) \ n 29 arch/arm/mach-omap1/include/mach/mtd-xip.h (n)*OMAP_MPU_TIMER_OFFSET)) n 71 arch/arm/mach-omap1/include/mach/tc.h #define EMIFS_CCS(n) (EMIFS_CS0_CONFIG + (4 * (n))) n 72 arch/arm/mach-omap1/include/mach/tc.h #define EMIFS_ACS(n) (EMIFS_ACS0 + (4 * (n))) n 84 arch/arm/mach-omap1/pm.c const char * buf, size_t n) n 94 arch/arm/mach-omap1/pm.c return n; n 68 arch/arm/mach-omap1/time.c #define omap_mpu_timer_base(n) \ n 70 arch/arm/mach-omap1/time.c (n)*OMAP_MPU_TIMER_OFFSET)) n 48 arch/arm/mach-omap1/timer.c int n = (pdev->id - 1) << 1; n 51 arch/arm/mach-omap1/timer.c l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); n 52 arch/arm/mach-omap1/timer.c l |= source << n; n 205 arch/arm/mach-omap2/id.c n += scnprintf(buf + n, sizeof(buf) - n, #feat " "); n 211 arch/arm/mach-omap2/id.c int n = 0; n 256 arch/arm/mach-omap2/id.c n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev); n 264 arch/arm/mach-omap2/id.c if (*(buf + n - 1) == ' ') n 265 arch/arm/mach-omap2/id.c n--; n 266 arch/arm/mach-omap2/id.c n += scnprintf(buf + n, sizeof(buf) - n, ")\n"); n 179 arch/arm/mach-orion5x/dns323-setup.c static int __init dns323_parse_hex_nibble(char n) n 181 arch/arm/mach-orion5x/dns323-setup.c if (n >= '0' && n <= '9') n 182 arch/arm/mach-orion5x/dns323-setup.c return n - '0'; n 184 arch/arm/mach-orion5x/dns323-setup.c if (n >= 'A' && n <= 'F') n 185 arch/arm/mach-orion5x/dns323-setup.c return n - 'A' + 10; n 187 arch/arm/mach-orion5x/dns323-setup.c if (n >= 'a' && n <= 'f') n 188 arch/arm/mach-orion5x/dns323-setup.c return n - 'a' + 10; n 240 arch/arm/mach-orion5x/pci.c #define PCI_BAR_SIZE_DDR_CS(n) (((n) == 0) ? ORION5X_PCI_REG(0xc08) : \ n 241 arch/arm/mach-orion5x/pci.c ((n) == 1) ? ORION5X_PCI_REG(0xd08) : \ n 242 arch/arm/mach-orion5x/pci.c ((n) == 2) ? ORION5X_PCI_REG(0xc0c) : \ n 243 arch/arm/mach-orion5x/pci.c ((n) == 3) ? ORION5X_PCI_REG(0xd0c) : NULL) n 244 arch/arm/mach-orion5x/pci.c #define PCI_BAR_REMAP_DDR_CS(n) (((n) == 0) ? ORION5X_PCI_REG(0xc48) : \ n 245 arch/arm/mach-orion5x/pci.c ((n) == 1) ? ORION5X_PCI_REG(0xd48) : \ n 246 arch/arm/mach-orion5x/pci.c ((n) == 2) ? ORION5X_PCI_REG(0xc4c) : \ n 247 arch/arm/mach-orion5x/pci.c ((n) == 3) ? ORION5X_PCI_REG(0xd4c) : NULL) n 254 arch/arm/mach-orion5x/pci.c #define PCI_CONF_FUNC_BAR_CS(n) ((n) >> 1) n 255 arch/arm/mach-orion5x/pci.c #define PCI_CONF_REG_BAR_LO_CS(n) (((n) & 1) ? 0x18 : 0x10) n 256 arch/arm/mach-orion5x/pci.c #define PCI_CONF_REG_BAR_HI_CS(n) (((n) & 1) ? 0x1c : 0x14) n 471 arch/arm/mach-orion5x/ts78xx-setup.c struct kobj_attribute *attr, const char *buf, size_t n) n 488 arch/arm/mach-orion5x/ts78xx-setup.c return n; n 497 arch/arm/mach-orion5x/ts78xx-setup.c return n; n 52 arch/arm/mach-orion5x/tsx09-common.c static int __init qnap_tsx09_parse_hex_nibble(char n) n 54 arch/arm/mach-orion5x/tsx09-common.c if (n >= '0' && n <= '9') n 55 arch/arm/mach-orion5x/tsx09-common.c return n - '0'; n 57 arch/arm/mach-orion5x/tsx09-common.c if (n >= 'A' && n <= 'F') n 58 arch/arm/mach-orion5x/tsx09-common.c return n - 'A' + 10; n 60 arch/arm/mach-orion5x/tsx09-common.c if (n >= 'a' && n <= 'f') n 61 arch/arm/mach-orion5x/tsx09-common.c return n - 'a' + 10; n 9 arch/arm/mach-pxa/eseries-irq.h #define IRQ_ANGELX(n) (ANGELX_IRQ_BASE + (n)) n 19 arch/arm/mach-pxa/eseries-irq.h #define IRQ_TMIO(n) (TMIO_IRQ_BASE + (n)) n 146 arch/arm/mach-pxa/irq.c int n; n 156 arch/arm/mach-pxa/irq.c for (n = 0; n < irq_nr; n += 32) { n 157 arch/arm/mach-pxa/irq.c void __iomem *base = irq_base(n >> 5); n 30 arch/arm/mach-pxa/mfp-pxa2xx.c #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) n 41 arch/arm/mach-pxa/pxa27x-udc.h #define UDCICR_INT(n,intr) (((intr) & 0x03) << (((n) & 0x0F) * 2)) n 50 arch/arm/mach-pxa/pxa27x-udc.h #define UDCISR_INT(n,intr) (((intr) & 0x03) << (((n) & 0x0F) * 2)) n 42 arch/arm/mach-pxa/pxa3xx.c #define PECR_IE(n) ((1 << ((n) * 2)) << 28) n 43 arch/arm/mach-pxa/pxa3xx.c #define PECR_IS(n) ((1 << ((n) * 2)) << 29) n 45 arch/arm/mach-pxa/regs-u2d.h #define U2DINT(n, intr) (((intr) & 0x07) << (((n) & 0x07) * 3)) n 600 arch/arm/mach-s3c24xx/mach-h1940.c #define DECLARE_BUTTON(p, k, n, w) \ n 604 arch/arm/mach-s3c24xx/mach-h1940.c .desc = n, \ n 336 arch/arm/mach-sa1100/generic.c int __init sa11x0_register_fixed_regulator(int n, n 357 arch/arm/mach-sa1100/generic.c platform_device_register_resndata(NULL, "reg-fixed-voltage", n, n 56 arch/arm/mach-sa1100/generic.h int sa11x0_register_fixed_regulator(int n, struct fixed_voltage_config *cfg, n 65 arch/arm/mach-sa1100/include/mach/badge4.h #define BADGE4_5V_PCMCIA_SOCK(n) (1<<(n)) n 125 arch/arm/mach-sa1100/neponset.c struct neponset_drvdata *n = nep; n 129 arch/arm/mach-sa1100/neponset.c n->gpio[0]->set_multiple(n->gpio[0], &m, &v); n 32 arch/arm/mach-shmobile/platsmp-apmu.c #define CPUNCR_OFFS(n) (0x100 + (0x10 * (n))) n 37 arch/arm/mach-shmobile/platsmp-apmu.c #define CPUNST(r, n) (((r) >> (n * 4)) & 3) /* CPUn Status Bit */ n 43 arch/arm/mach-shmobile/platsmp-apmu.c #define DBGCPUNREN(n) BIT((n) + 20) /* CPUn Reset Request Enable */ n 39 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(n) BIT(n) n 46 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_STATUS_STANDBYWFI(n) BIT(16 + (n)) n 50 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_RST_CTRL_ETM_RST(n) BIT(20 + (n)) n 52 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_RST_CTRL_DBG_RST(n) BIT(16 + (n)) n 56 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_RST_CTRL_CX_RST(n) BIT(4 + (n)) n 57 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_RST_CTRL_CORE_RST(n) BIT(n) n 61 arch/arm/mach-sunxi/mc_smp.c #define PRCM_CPU_PO_RST_CTRL_CORE(n) BIT(n) n 67 arch/arm/mach-sunxi/mc_smp.c #define PRCM_PWROFF_GATING_REG_CORE(n) BIT(n) n 73 arch/arm/mach-sunxi/mc_smp.c #define R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(n) BIT(n) n 116 arch/arm/mach-tegra/iomap.h #define IO_TO_VIRT(n) ( \ n 117 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_BETWEEN((n), IO_PPSB_PHYS, IO_PPSB_SIZE) ? \ n 118 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_XLATE((n), IO_PPSB_PHYS, IO_PPSB_VIRT) : \ n 119 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_BETWEEN((n), IO_APB_PHYS, IO_APB_SIZE) ? \ n 120 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_XLATE((n), IO_APB_PHYS, IO_APB_VIRT) : \ n 121 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_BETWEEN((n), IO_CPU_PHYS, IO_CPU_SIZE) ? \ n 122 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \ n 123 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \ n 124 arch/arm/mach-tegra/iomap.h IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \ n 127 arch/arm/mach-tegra/iomap.h #define IO_ADDRESS(n) (IO_TO_VIRT(n)) n 44 arch/arm/mach-tegra/sleep.h add \rn, \rn, #1 n 46 arch/arm/mach-tegra/sleep.h cmp \tmp, \rn n 188 arch/arm/mach-ux500/db8500-regs.h #define __io_address(n) IOMEM(IO_ADDRESS(n)) n 191 arch/arm/mach-ux500/db8500-regs.h #define io_p2v(n) __io_address(n) n 26 arch/arm/mach-versatile/versatile_dt.c #define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n)) n 686 arch/arm/mm/cache-l2x0.c unsigned n = 0; n 694 arch/arm/mm/cache-l2x0.c errata[n++] = "588369"; n 701 arch/arm/mm/cache-l2x0.c errata[n++] = "727915"; n 710 arch/arm/mm/cache-l2x0.c errata[n++] = "752271"; n 717 arch/arm/mm/cache-l2x0.c errata[n++] = "753970"; n 721 arch/arm/mm/cache-l2x0.c errata[n++] = "769419"; n 723 arch/arm/mm/cache-l2x0.c if (n) { n 726 arch/arm/mm/cache-l2x0.c pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); n 727 arch/arm/mm/cache-l2x0.c for (i = 0; i < n; i++) n 596 arch/arm/mm/init.c void set_section_perms(struct section_perm *perms, int n, bool set, n 605 arch/arm/mm/init.c for (i = 0; i < n; i++) { n 628 arch/arm/mm/init.c static void update_sections_early(struct section_perm perms[], int n) n 637 arch/arm/mm/init.c set_section_perms(perms, n, true, s->mm); n 639 arch/arm/mm/init.c set_section_perms(perms, n, true, current->active_mm); n 640 arch/arm/mm/init.c set_section_perms(perms, n, true, &init_mm); n 25 arch/arm/plat-orion/pcie.c #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) n 26 arch/arm/plat-orion/pcie.c #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) n 28 arch/arm/plat-orion/pcie.c #define PCIE_BAR_CTRL_OFF(n) (0x1804 + ((n - 1) * 4)) n 29 arch/arm/plat-orion/pcie.c #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) n 30 arch/arm/plat-orion/pcie.c #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) n 31 arch/arm/plat-orion/pcie.c #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) n 262 arch/arm/plat-samsung/adc.c struct list_head *p, *n; n 265 arch/arm/plat-samsung/adc.c list_for_each_safe(p, n, &adc_pending) { n 505 arch/arm/probes/kprobes/test-core.c unsigned n, i, t, t0; n 507 arch/arm/probes/kprobes/test-core.c for (n = 1000; ; n *= 2) { n 509 arch/arm/probes/kprobes/test-core.c for (i = n; i > 0; --i) n 515 arch/arm/probes/kprobes/test-core.c return t / n; /* Time for one iteration in nanoseconds */ n 70 arch/arm/vfp/vfp.h static inline void mul64to128(u64 *resh, u64 *resl, u64 n, u64 m) n 75 arch/arm/vfp/vfp.h nl = n; n 79 arch/arm/vfp/vfp.h nh = n >> 32; n 97 arch/arm/vfp/vfp.h static inline void shift64left(u64 *resh, u64 *resl, u64 n) n 99 arch/arm/vfp/vfp.h *resh = n >> 63; n 100 arch/arm/vfp/vfp.h *resl = n << 1; n 103 arch/arm/vfp/vfp.h static inline u64 vfp_hi64multiply64(u64 n, u64 m) n 106 arch/arm/vfp/vfp.h mul64to128(&rh, &rl, n, m); n 971 arch/arm/vfp/vfpsingle.c s32 n = vfp_get_float(sn); n 973 arch/arm/vfp/vfpsingle.c pr_debug("VFP: s%u = %08x\n", sn, n); n 975 arch/arm/vfp/vfpsingle.c vfp_single_unpack(&vsn, n); n 994 arch/arm/vfp/vfpsingle.c s32 n = vfp_get_float(sn); n 996 arch/arm/vfp/vfpsingle.c pr_debug("VFP: s%u = %08x\n", sn, n); n 998 arch/arm/vfp/vfpsingle.c vfp_single_unpack(&vsn, n); n 1018 arch/arm/vfp/vfpsingle.c s32 n = vfp_get_float(sn); n 1020 arch/arm/vfp/vfpsingle.c pr_debug("VFP: s%u = %08x\n", sn, n); n 1025 arch/arm/vfp/vfpsingle.c vfp_single_unpack(&vsn, n); n 1056 arch/arm/vfp/vfpsingle.c s32 n = vfp_get_float(sn); n 1059 arch/arm/vfp/vfpsingle.c pr_debug("VFP: s%u = %08x\n", sn, n); n 1061 arch/arm/vfp/vfpsingle.c vfp_single_unpack(&vsn, n); n 65 arch/arm/xen/p2m.c struct rb_node *n = phys_to_mach.rb_node; n 70 arch/arm/xen/p2m.c while (n) { n 71 arch/arm/xen/p2m.c entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); n 79 arch/arm/xen/p2m.c n = n->rb_left; n 81 arch/arm/xen/p2m.c n = n->rb_right; n 127 arch/arm/xen/p2m.c struct rb_node *n = phys_to_mach.rb_node; n 131 arch/arm/xen/p2m.c while (n) { n 132 arch/arm/xen/p2m.c p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); n 141 arch/arm/xen/p2m.c n = n->rb_left; n 143 arch/arm/xen/p2m.c n = n->rb_right; n 70 arch/arm64/crypto/aes-ce-ccm-glue.c __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8]; n 85 arch/arm64/crypto/aes-ce-ccm-glue.c n[0] = 0; n 86 arch/arm64/crypto/aes-ce-ccm-glue.c n[1] = cpu_to_be32(msglen); n 163 arch/arm64/crypto/aes-ce-ccm-glue.c u32 n = scatterwalk_clamp(&walk, len); n 166 arch/arm64/crypto/aes-ce-ccm-glue.c if (!n) { n 168 arch/arm64/crypto/aes-ce-ccm-glue.c n = scatterwalk_clamp(&walk, len); n 171 arch/arm64/crypto/aes-ce-ccm-glue.c ccm_update_mac(ctx, mac, p, n, &macp); n 172 arch/arm64/crypto/aes-ce-ccm-glue.c len -= n; n 175 arch/arm64/crypto/aes-ce-ccm-glue.c scatterwalk_advance(&walk, n); n 377 arch/arm64/crypto/ghash-ce-glue.c u32 n = scatterwalk_clamp(&walk, len); n 380 arch/arm64/crypto/ghash-ce-glue.c if (!n) { n 382 arch/arm64/crypto/ghash-ce-glue.c n = scatterwalk_clamp(&walk, len); n 386 arch/arm64/crypto/ghash-ce-glue.c gcm_update_mac(dg, p, n, buf, &buf_count, ctx); n 387 arch/arm64/crypto/ghash-ce-glue.c len -= n; n 390 arch/arm64/crypto/ghash-ce-glue.c scatterwalk_advance(&walk, n); n 33 arch/arm64/crypto/nhpoly1305-neon-glue.c unsigned int n = min_t(unsigned int, srclen, SZ_4K); n 36 arch/arm64/crypto/nhpoly1305-neon-glue.c crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); n 38 arch/arm64/crypto/nhpoly1305-neon-glue.c src += n; n 39 arch/arm64/crypto/nhpoly1305-neon-glue.c srclen -= n; n 115 arch/arm64/include/asm/arm_dsu_pmu.h static inline u32 __dsu_pmu_read_pmceid(int n) n 117 arch/arm64/include/asm/arm_dsu_pmu.h switch (n) { n 269 arch/arm64/include/asm/assembler.h ldr \rd, [\rn, #VMA_VM_MM] n 276 arch/arm64/include/asm/assembler.h ldr \rd, [\rn, #MM_CONTEXT_ID] n 14 arch/arm64/include/asm/barrier.h #define __nops(n) ".rept " #n "\nnop\n.endr\n" n 15 arch/arm64/include/asm/barrier.h #define nops(n) asm volatile(__nops(n)) n 179 arch/arm64/include/asm/cmpxchg.h #define __cmpxchg_wrapper(sfx, ptr, o, n) \ n 184 arch/arm64/include/asm/cmpxchg.h (unsigned long)(n), sizeof(*(ptr))); \ n 88 arch/arm64/include/asm/fpsimdmacros.h .macro _check_num n, min, max n 187 arch/arm64/include/asm/fpsimdmacros.h _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34 n 188 arch/arm64/include/asm/fpsimdmacros.h _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16 n 207 arch/arm64/include/asm/fpsimdmacros.h _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 n 210 arch/arm64/include/asm/fpsimdmacros.h _for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16 n 53 arch/arm64/include/asm/ftrace.h #define ftrace_return_address(n) return_address(n) n 219 arch/arm64/include/asm/percpu.h #define this_cpu_cmpxchg_1(pcp, o, n) \ n 220 arch/arm64/include/asm/percpu.h _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) n 221 arch/arm64/include/asm/percpu.h #define this_cpu_cmpxchg_2(pcp, o, n) \ n 222 arch/arm64/include/asm/percpu.h _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) n 223 arch/arm64/include/asm/percpu.h #define this_cpu_cmpxchg_4(pcp, o, n) \ n 224 arch/arm64/include/asm/percpu.h _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) n 225 arch/arm64/include/asm/percpu.h #define this_cpu_cmpxchg_8(pcp, o, n) \ n 226 arch/arm64/include/asm/percpu.h _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) n 41 arch/arm64/include/asm/pgtable-hwdef.h #define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - 3) * (4 - (n)) + 3) n 236 arch/arm64/include/asm/ptrace.h unsigned int n); n 324 arch/arm64/include/asm/ptrace.h unsigned int n) n 327 arch/arm64/include/asm/ptrace.h if (n < NR_REG_ARGUMENTS) n 328 arch/arm64/include/asm/ptrace.h return pt_regs_read_reg(regs, n); n 60 arch/arm64/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) n 113 arch/arm64/include/asm/sysreg.h #define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4) n 114 arch/arm64/include/asm/sysreg.h #define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5) n 115 arch/arm64/include/asm/sysreg.h #define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6) n 116 arch/arm64/include/asm/sysreg.h #define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) n 332 arch/arm64/include/asm/sysreg.h #define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n) n 337 arch/arm64/include/asm/sysreg.h #define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n) n 400 arch/arm64/include/asm/sysreg.h #define __PMEV_op2(n) ((n) & 0x7) n 401 arch/arm64/include/asm/sysreg.h #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) n 402 arch/arm64/include/asm/sysreg.h #define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n)) n 403 arch/arm64/include/asm/sysreg.h #define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3)) n 404 arch/arm64/include/asm/sysreg.h #define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n)) n 44 arch/arm64/include/asm/tlbflush.h #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) n 383 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); n 384 arch/arm64/include/asm/uaccess.h #define raw_copy_from_user(to, from, n) \ n 389 arch/arm64/include/asm/uaccess.h __uaccess_mask_ptr(from), (n)); \ n 394 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); n 395 arch/arm64/include/asm/uaccess.h #define raw_copy_to_user(to, from, n) \ n 400 arch/arm64/include/asm/uaccess.h (from), (n)); \ n 405 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); n 406 arch/arm64/include/asm/uaccess.h #define raw_copy_in_user(to, from, n) \ n 411 arch/arm64/include/asm/uaccess.h __uaccess_mask_ptr(from), (n)); \ n 419 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); n 420 arch/arm64/include/asm/uaccess.h static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) n 422 arch/arm64/include/asm/uaccess.h if (access_ok(to, n)) { n 424 arch/arm64/include/asm/uaccess.h n = __arch_clear_user(__uaccess_mask_ptr(to), n); n 427 arch/arm64/include/asm/uaccess.h return n; n 433 arch/arm64/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *str, long n); n 438 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); n 203 arch/arm64/include/uapi/asm/kvm.h #define ARM64_SYS_REG_SHIFT_MASK(x,n) \ n 204 arch/arm64/include/uapi/asm/kvm.h (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \ n 205 arch/arm64/include/uapi/asm/kvm.h KVM_REG_ARM64_SYSREG_ ## n ## _MASK) n 256 arch/arm64/include/uapi/asm/kvm.h #define KVM_REG_ARM64_SVE_ZREG(n, i) \ n 259 arch/arm64/include/uapi/asm/kvm.h (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \ n 262 arch/arm64/include/uapi/asm/kvm.h #define KVM_REG_ARM64_SVE_PREG(n, i) \ n 265 arch/arm64/include/uapi/asm/kvm.h (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \ n 360 arch/arm64/include/uapi/asm/kvm.h #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) n 197 arch/arm64/include/uapi/asm/ptrace.h #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ n 198 arch/arm64/include/uapi/asm/ptrace.h (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) n 204 arch/arm64/include/uapi/asm/ptrace.h #define SVE_PT_SVE_PREG_OFFSET(vq, n) \ n 205 arch/arm64/include/uapi/asm/ptrace.h (SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) n 233 arch/arm64/include/uapi/asm/sigcontext.h #define SVE_SIG_ZREG_OFFSET(vq, n) \ n 234 arch/arm64/include/uapi/asm/sigcontext.h (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) n 239 arch/arm64/include/uapi/asm/sigcontext.h #define SVE_SIG_PREG_OFFSET(vq, n) \ n 240 arch/arm64/include/uapi/asm/sigcontext.h (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) n 38 arch/arm64/include/uapi/asm/sve_context.h #define __SVE_ZREG_OFFSET(vq, n) \ n 39 arch/arm64/include/uapi/asm/sve_context.h (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n)) n 45 arch/arm64/include/uapi/asm/sve_context.h #define __SVE_PREG_OFFSET(vq, n) \ n 46 arch/arm64/include/uapi/asm/sve_context.h (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n)) n 2029 arch/arm64/kernel/cpufeature.c bool this_cpu_has_cap(unsigned int n) n 2031 arch/arm64/kernel/cpufeature.c if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { n 2032 arch/arm64/kernel/cpufeature.c const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; n 391 arch/arm64/kernel/fpsimd.c #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ n 392 arch/arm64/kernel/fpsimd.c (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) n 105 arch/arm64/kernel/hw_breakpoint.c static u64 read_wb_reg(int reg, int n) n 109 arch/arm64/kernel/hw_breakpoint.c switch (reg + n) { n 115 arch/arm64/kernel/hw_breakpoint.c pr_warning("attempt to read from unknown breakpoint register %d\n", n); n 122 arch/arm64/kernel/hw_breakpoint.c static void write_wb_reg(int reg, int n, u64 val) n 124 arch/arm64/kernel/hw_breakpoint.c switch (reg + n) { n 130 arch/arm64/kernel/hw_breakpoint.c pr_warning("attempt to write to unknown breakpoint register %d\n", n); n 1510 arch/arm64/kernel/insn.c unsigned int immr, imms, n, ones, ror, esz, tmp; n 1547 arch/arm64/kernel/insn.c n = esz == 64; n 1597 arch/arm64/kernel/insn.c insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n); n 137 arch/arm64/kernel/ptrace.c unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) n 141 arch/arm64/kernel/ptrace.c addr += n; n 505 arch/arm64/kernel/ptrace.c limit = regset->n * regset->size; n 554 arch/arm64/kernel/ptrace.c limit = regset->n * regset->size; n 622 arch/arm64/kernel/ptrace.c return regset->n; n 1135 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_pt_regs) / sizeof(u64), n 1143 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_fpsimd_state) / sizeof(u32), n 1156 arch/arm64/kernel/ptrace.c .n = 1, n 1165 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_hwdebug_state) / sizeof(u32), n 1173 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_hwdebug_state) / sizeof(u32), n 1182 arch/arm64/kernel/ptrace.c .n = 1, n 1191 arch/arm64/kernel/ptrace.c .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), n 1203 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_pac_mask) / sizeof(u64), n 1212 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), n 1220 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), n 1232 arch/arm64/kernel/ptrace.c .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) n 1255 arch/arm64/kernel/ptrace.c if (start + num_regs > regset->n) n 1309 arch/arm64/kernel/ptrace.c if (start + num_regs > regset->n) n 1450 arch/arm64/kernel/ptrace.c .n = COMPAT_ELF_NGREG, n 1458 arch/arm64/kernel/ptrace.c .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), n 1469 arch/arm64/kernel/ptrace.c .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) n 1475 arch/arm64/kernel/ptrace.c .n = COMPAT_ELF_NGREG, n 1483 arch/arm64/kernel/ptrace.c .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), n 1491 arch/arm64/kernel/ptrace.c .n = 1, n 1500 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_hwdebug_state) / sizeof(u32), n 1508 arch/arm64/kernel/ptrace.c .n = sizeof(struct user_hwdebug_state) / sizeof(u32), n 1517 arch/arm64/kernel/ptrace.c .n = 1, n 1527 arch/arm64/kernel/ptrace.c .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) n 457 arch/arm64/kvm/guest.c int n = 0; n 490 arch/arm64/kvm/guest.c n++; n 493 arch/arm64/kvm/guest.c return n; n 573 arch/arm64/kvm/guest.c unsigned int i, n; n 592 arch/arm64/kvm/guest.c for (n = 0; n < SVE_NUM_ZREGS; n++) { n 593 arch/arm64/kvm/guest.c reg = KVM_REG_ARM64_SVE_ZREG(n, i); n 599 arch/arm64/kvm/guest.c for (n = 0; n < SVE_NUM_PREGS; n++) { n 600 arch/arm64/kvm/guest.c reg = KVM_REG_ARM64_SVE_PREG(n, i); n 15 arch/arm64/kvm/hyp/debug-sr.c #define read_debug(r,n) read_sysreg(r##n##_el1) n 16 arch/arm64/kvm/hyp/debug-sr.c #define write_debug(v,r,n) write_sysreg(v, r##n##_el1) n 986 arch/arm64/kvm/sys_regs.c #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ n 987 arch/arm64/kvm/sys_regs.c { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ n 989 arch/arm64/kvm/sys_regs.c { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ n 991 arch/arm64/kvm/sys_regs.c { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ n 993 arch/arm64/kvm/sys_regs.c { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ n 997 arch/arm64/kvm/sys_regs.c #define PMU_PMEVCNTR_EL0(n) \ n 998 arch/arm64/kvm/sys_regs.c { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \ n 999 arch/arm64/kvm/sys_regs.c access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } n 1002 arch/arm64/kvm/sys_regs.c #define PMU_PMEVTYPER_EL0(n) \ n 1003 arch/arm64/kvm/sys_regs.c { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ n 1004 arch/arm64/kvm/sys_regs.c access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } n 1720 arch/arm64/kvm/sys_regs.c #define DBG_BCR_BVR_WCR_WVR(n) \ n 1722 arch/arm64/kvm/sys_regs.c { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ n 1724 arch/arm64/kvm/sys_regs.c { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ n 1726 arch/arm64/kvm/sys_regs.c { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ n 1728 arch/arm64/kvm/sys_regs.c { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } n 1730 arch/arm64/kvm/sys_regs.c #define DBGBXVR(n) \ n 1731 arch/arm64/kvm/sys_regs.c { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } n 1832 arch/arm64/kvm/sys_regs.c #define PMU_PMEVCNTR(n) \ n 1835 arch/arm64/kvm/sys_regs.c CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ n 1839 arch/arm64/kvm/sys_regs.c #define PMU_PMEVTYPER(n) \ n 1842 arch/arm64/kvm/sys_regs.c CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ n 2735 arch/arm64/kvm/sys_regs.c static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) n 2739 arch/arm64/kvm/sys_regs.c for (i = 1; i < n; i++) { n 64 arch/arm64/kvm/va_layout.c static u32 compute_instruction(int n, u32 rd, u32 rn) n 68 arch/arm64/kvm/va_layout.c switch (n) { n 29 arch/arm64/lib/uaccess_flushcache.c unsigned long n) n 34 arch/arm64/lib/uaccess_flushcache.c rc = __arch_copy_from_user(to, from, n); n 38 arch/arm64/lib/uaccess_flushcache.c __clean_dcache_area_pop(to, n - rc); n 54 arch/c6x/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 57 arch/c6x/include/asm/cmpxchg.h (unsigned long)(n), \ n 59 arch/c6x/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 46 arch/c6x/include/asm/delay.h static inline void _ndelay(unsigned int n) n 48 arch/c6x/include/asm/delay.h _c6x_tickdelay((ticks_per_ns_scaled * n) >> C6X_NDELAY_SCALE); n 51 arch/c6x/include/asm/delay.h static inline void _udelay(unsigned int n) n 53 arch/c6x/include/asm/delay.h while (n >= 10) { n 55 arch/c6x/include/asm/delay.h n -= 10; n 57 arch/c6x/include/asm/delay.h while (n-- > 0) n 21 arch/c6x/include/asm/special_insns.h #define or_creg(reg, n) \ n 22 arch/c6x/include/asm/special_insns.h do { unsigned __x, __n = (unsigned)(n); \ n 30 arch/c6x/include/asm/special_insns.h #define and_creg(reg, n) \ n 31 arch/c6x/include/asm/special_insns.h do { unsigned __x, __n = (unsigned)(n); \ n 14 arch/c6x/include/asm/string.h asmlinkage extern void *memcpy(void *to, const void *from, size_t n); n 17 arch/c6x/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 22 arch/c6x/include/asm/uaccess.h if (__builtin_constant_p(n)) { n 23 arch/c6x/include/asm/uaccess.h switch (n) { n 48 arch/c6x/include/asm/uaccess.h memcpy(to, (const void __force *)from, n); n 53 arch/c6x/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 58 arch/c6x/include/asm/uaccess.h if (__builtin_constant_p(n)) { n 59 arch/c6x/include/asm/uaccess.h switch (n) { n 84 arch/c6x/include/asm/uaccess.h memcpy((void __force *)to, from, n); n 77 arch/c6x/kernel/ptrace.c .n = ELF_NGREG, n 88 arch/c6x/kernel/ptrace.c .n = ARRAY_SIZE(c6x_regsets), n 406 arch/c6x/kernel/setup.c #define cpu_to_ptr(n) ((void *)((long)(n)+1)) n 411 arch/c6x/kernel/setup.c int n = ptr_to_cpu(v); n 412 arch/c6x/kernel/setup.c struct cpuinfo_c6x *p = &per_cpu(cpu_data, n); n 414 arch/c6x/kernel/setup.c if (n == 0) { n 433 arch/c6x/kernel/setup.c n, n 99 arch/c6x/platforms/megamod-pic.c int n, idx; n 107 arch/c6x/platforms/megamod-pic.c n = __ffs(events); n 109 arch/c6x/platforms/megamod-pic.c irq = irq_linear_revmap(pic->irqhost, idx * 32 + n); n 111 arch/c6x/platforms/megamod-pic.c soc_writel(1 << n, &pic->regs->evtclr[idx]); n 67 arch/csky/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 68 arch/csky/include/asm/cmpxchg.h (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) n 257 arch/csky/include/asm/uaccess.h #define ___copy_to_user(to, from, n) \ n 313 arch/csky/include/asm/uaccess.h : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ n 315 arch/csky/include/asm/uaccess.h : "0"(n), "1"(to), "2"(from) \ n 319 arch/csky/include/asm/uaccess.h #define ___copy_from_user(to, from, n) \ n 380 arch/csky/include/asm/uaccess.h : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ n 382 arch/csky/include/asm/uaccess.h : "0"(n), "1"(to), "2"(from) \ n 386 arch/csky/include/asm/uaccess.h unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); n 387 arch/csky/include/asm/uaccess.h unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); n 389 arch/csky/include/asm/uaccess.h unsigned long clear_user(void *to, unsigned long n); n 390 arch/csky/include/asm/uaccess.h unsigned long __clear_user(void __user *to, unsigned long n); n 400 arch/csky/include/asm/uaccess.h long strnlen_user(const char *src, long n); n 168 arch/csky/kernel/ptrace.c .n = sizeof(struct pt_regs) / sizeof(u32), n 176 arch/csky/kernel/ptrace.c .n = sizeof(struct user_fp) / sizeof(u32), n 188 arch/csky/kernel/ptrace.c .n = ARRAY_SIZE(csky_regsets), n 8 arch/csky/lib/usercopy.c unsigned long n) n 10 arch/csky/lib/usercopy.c ___copy_from_user(to, from, n); n 11 arch/csky/lib/usercopy.c return n; n 16 arch/csky/lib/usercopy.c unsigned long n) n 18 arch/csky/lib/usercopy.c ___copy_to_user(to, from, n); n 19 arch/csky/lib/usercopy.c return n; n 129 arch/csky/lib/usercopy.c long strnlen_user(const char *s, long n) n 157 arch/csky/lib/usercopy.c : "=r"(n), "=r"(s), "=r"(res), "=r"(tmp) n 158 arch/csky/lib/usercopy.c : "0"(n), "1"(s), "2"(n) n 233 arch/csky/lib/usercopy.c clear_user(void __user *to, unsigned long n) n 235 arch/csky/lib/usercopy.c if (access_ok(to, n)) n 236 arch/csky/lib/usercopy.c __do_clear_user(to, n); n 237 arch/csky/lib/usercopy.c return n; n 253 arch/csky/lib/usercopy.c __clear_user(void __user *to, unsigned long n) n 255 arch/csky/lib/usercopy.c __do_clear_user(to, n); n 256 arch/csky/lib/usercopy.c return n; n 24 arch/h8300/boot/compressed/misc.c #define memzero(s, n) memset((s), (0), (n)) n 44 arch/h8300/boot/compressed/misc.c void *memset(void *s, int c, size_t n) n 49 arch/h8300/boot/compressed/misc.c for (i = 0; i < n; i++) n 54 arch/h8300/boot/compressed/misc.c void *memcpy(void *dest, const void *src, size_t n) n 59 arch/h8300/boot/compressed/misc.c for (i = 0; i < n; i++) n 53 arch/h8300/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 56 arch/h8300/include/asm/cmpxchg.h (unsigned long)(n), \ n 58 arch/h8300/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 22 arch/h8300/kernel/h8300_ksyms.c asmlinkage long strncpy_from_user(void *to, void *from, size_t n); n 139 arch/h8300/kernel/ptrace.c .n = ELF_NGREG, n 151 arch/h8300/kernel/ptrace.c .n = ARRAY_SIZE(h8300_regsets), n 14 arch/h8300/kernel/sim-console.c static void sim_write(struct console *con, const char *s, unsigned n) n 17 arch/h8300/kernel/sim-console.c register const unsigned _len __asm__("er2") = n; n 17 arch/hexagon/include/asm/switch_to.h #define switch_to(p, n, r) do {\ n 18 arch/hexagon/include/asm/switch_to.h r = __switch_to((p), (n), (r));\ n 52 arch/hexagon/include/asm/uaccess.h unsigned long n); n 54 arch/hexagon/include/asm/uaccess.h unsigned long n); n 61 arch/hexagon/include/asm/uaccess.h #define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n) n 66 arch/hexagon/include/asm/uaccess.h extern long __strnlen_user(const char __user *src, long n); n 69 arch/hexagon/include/asm/uaccess.h long n); n 75 arch/hexagon/include/asm/uaccess.h long n) n 77 arch/hexagon/include/asm/uaccess.h long res = __strnlen_user(src, n); n 82 arch/hexagon/include/asm/uaccess.h if (res > n) { n 83 arch/hexagon/include/asm/uaccess.h long left = raw_copy_from_user(dst, src, n); n 85 arch/hexagon/include/asm/uaccess.h memset(dst + (n - left), 0, left); n 86 arch/hexagon/include/asm/uaccess.h return n; n 159 arch/hexagon/kernel/ptrace.c .n = ELF_NGREG, n 173 arch/hexagon/kernel/ptrace.c .n = ARRAY_SIZE(hexagon_regsets) n 434 arch/ia64/hp/common/sba_iommu.c #define RESMAP_MASK(n) ~(~0UL << (n)) n 1845 arch/ia64/hp/common/sba_iommu.c loff_t n = *pos; n 1848 arch/ia64/hp/common/sba_iommu.c if (!n--) n 123 arch/ia64/include/asm/asmmacro.h # define TEXT_ALIGN(n) .align n n 125 arch/ia64/include/asm/asmmacro.h # define TEXT_ALIGN(n) n 274 arch/ia64/include/asm/io.h extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); n 275 arch/ia64/include/asm/io.h extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); n 276 arch/ia64/include/asm/io.h extern void memset_io(volatile void __iomem *s, int c, long n); n 40 arch/ia64/include/asm/kexec.h struct rsvd_region *rsvd_regions, int n); n 25 arch/ia64/include/asm/kregs.h #define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n) n 26 arch/ia64/include/asm/kregs.h #define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n) n 48 arch/ia64/include/asm/meminit.h #define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1)) n 49 arch/ia64/include/asm/meminit.h #define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) n 637 arch/ia64/include/asm/processor.h ia64_rotr (__u64 w, __u64 n) n 639 arch/ia64/include/asm/processor.h return (w >> n) | (w << (64 - n)); n 642 arch/ia64/include/asm/processor.h #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n)) n 97 arch/ia64/include/asm/uaccess.h # define __get_user_size(val, addr, n, err) \ n 101 arch/ia64/include/asm/uaccess.h asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ n 114 arch/ia64/include/asm/uaccess.h # define __put_user_size(val, addr, n, err) \ n 117 arch/ia64/include/asm/uaccess.h asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ n 126 arch/ia64/include/asm/uaccess.h # define __get_user_size(val, addr, n, err) \ n 128 arch/ia64/include/asm/uaccess.h __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ n 132 arch/ia64/include/asm/uaccess.h # define __put_user_size(val, addr, n, err) \ n 134 arch/ia64/include/asm/uaccess.h __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ n 218 arch/ia64/include/asm/uaccess.h #define __clear_user(to, n) __do_clear_user(to, n) n 220 arch/ia64/include/asm/uaccess.h #define clear_user(to, n) \ n 222 arch/ia64/include/asm/uaccess.h unsigned long __cu_len = (n); \ n 235 arch/ia64/include/asm/uaccess.h #define strncpy_from_user(to, from, n) \ n 240 arch/ia64/include/asm/uaccess.h __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ n 199 arch/ia64/include/asm/unwind.h unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat) n 201 arch/ia64/include/asm/unwind.h return unw_access_gr(i, n, &v, &nat, 1); n 205 arch/ia64/include/asm/unwind.h unw_set_br (struct unw_frame_info *i, int n, unsigned long v) n 207 arch/ia64/include/asm/unwind.h return unw_access_br(i, n, &v, 1); n 211 arch/ia64/include/asm/unwind.h unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v) n 213 arch/ia64/include/asm/unwind.h return unw_access_fr(i, n, &v, 1); n 217 arch/ia64/include/asm/unwind.h unw_set_ar (struct unw_frame_info *i, int n, unsigned long v) n 219 arch/ia64/include/asm/unwind.h return unw_access_ar(i, n, &v, 1); n 228 arch/ia64/include/asm/unwind.h #define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0) n 229 arch/ia64/include/asm/unwind.h #define unw_get_br(i,n,v) unw_access_br(i,n,v,0) n 230 arch/ia64/include/asm/unwind.h #define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0) n 231 arch/ia64/include/asm/unwind.h #define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0) n 122 arch/ia64/include/asm/uv/uv_hub.h #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) n 115 arch/ia64/include/uapi/asm/cmpxchg.h #define cmpxchg_acq(ptr, o, n) \ n 116 arch/ia64/include/uapi/asm/cmpxchg.h ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) n 117 arch/ia64/include/uapi/asm/cmpxchg.h #define cmpxchg_rel(ptr, o, n) \ n 118 arch/ia64/include/uapi/asm/cmpxchg.h ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) n 130 arch/ia64/include/uapi/asm/cmpxchg.h #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) n 131 arch/ia64/include/uapi/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) n 42 arch/ia64/include/uapi/asm/intrinsics.h #define IA64_FETCHADD(tmp,v,n,sz,sem) \ n 46 arch/ia64/include/uapi/asm/intrinsics.h tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \ n 50 arch/ia64/include/uapi/asm/intrinsics.h tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \ n 1284 arch/ia64/kernel/efi.c kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) n 1303 arch/ia64/kernel/efi.c for (i = 0; i < n; i++) { n 1308 arch/ia64/kernel/efi.c if (i < n-1 && n 57 arch/ia64/kernel/mca_drv.h #define peidx_cache_check_idx(p, n) (n) n 58 arch/ia64/kernel/mca_drv.h #define peidx_tlb_check_idx(p, n) (peidx_cache_check_idx(p, peidx_cache_check_num(p)) + n) n 59 arch/ia64/kernel/mca_drv.h #define peidx_bus_check_idx(p, n) (peidx_tlb_check_idx(p, peidx_tlb_check_num(p)) + n) n 60 arch/ia64/kernel/mca_drv.h #define peidx_reg_file_check_idx(p, n) (peidx_bus_check_idx(p, peidx_bus_check_num(p)) + n) n 61 arch/ia64/kernel/mca_drv.h #define peidx_ms_check_idx(p, n) (peidx_reg_file_check_idx(p, peidx_reg_file_check_num(p)) + n) n 63 arch/ia64/kernel/mca_drv.h #define peidx_mod_error_info(p, name, n) \ n 64 arch/ia64/kernel/mca_drv.h ({ int __idx = peidx_##name##_idx(p, n); \ n 66 arch/ia64/kernel/mca_drv.h if (peidx_##name##_num(p) > n) /*BUG*/ \ n 70 arch/ia64/kernel/mca_drv.h #define peidx_cache_check(p, n) peidx_mod_error_info(p, cache_check, n) n 71 arch/ia64/kernel/mca_drv.h #define peidx_tlb_check(p, n) peidx_mod_error_info(p, tlb_check, n) n 72 arch/ia64/kernel/mca_drv.h #define peidx_bus_check(p, n) peidx_mod_error_info(p, bus_check, n) n 73 arch/ia64/kernel/mca_drv.h #define peidx_reg_file_check(p, n) peidx_mod_error_info(p, reg_file_check, n) n 74 arch/ia64/kernel/mca_drv.h #define peidx_ms_check(p, n) peidx_mod_error_info(p, ms_check, n) n 76 arch/ia64/kernel/mca_drv.h #define peidx_check_info(proc, name, n) \ n 78 arch/ia64/kernel/mca_drv.h sal_log_mod_error_info_t *__info = peidx_mod_error_info(proc, name, n);\ n 792 arch/ia64/kernel/module.c unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela); n 798 arch/ia64/kernel/module.c relsec, n, sechdrs[relsec].sh_info); n 828 arch/ia64/kernel/module.c for (i = 0; i < n; i++) { n 135 arch/ia64/kernel/perfmon.c #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64) n 136 arch/ia64/kernel/perfmon.c #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64) n 6479 arch/ia64/kernel/perfmon.c unsigned int n, n_counters, i; n 6496 arch/ia64/kernel/perfmon.c n = 0; n 6500 arch/ia64/kernel/perfmon.c n++; n 6502 arch/ia64/kernel/perfmon.c pmu_conf->num_pmcs = n; n 6504 arch/ia64/kernel/perfmon.c n = 0; n_counters = 0; n 6508 arch/ia64/kernel/perfmon.c n++; n 6511 arch/ia64/kernel/perfmon.c pmu_conf->num_pmds = n; n 2114 arch/ia64/kernel/ptrace.c .n = ELF_NGREG, n 2121 arch/ia64/kernel/ptrace.c .n = ELF_NFPREG, n 2130 arch/ia64/kernel/ptrace.c .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) n 2140 arch/ia64/kernel/ptrace.c unsigned int n; n 2162 arch/ia64/kernel/ptrace.c count = min_t(int, args->n, cfm & 0x7f); n 2174 arch/ia64/kernel/ptrace.c while (i < args->n) { n 2186 arch/ia64/kernel/ptrace.c .n = 6, n 288 arch/ia64/kernel/salinfo.c int i, n, cpu = -1; n 299 arch/ia64/kernel/salinfo.c n = data->cpu_check; n 301 arch/ia64/kernel/salinfo.c if (cpumask_test_cpu(n, &data->cpu_event)) { n 302 arch/ia64/kernel/salinfo.c if (!cpu_online(n)) { n 303 arch/ia64/kernel/salinfo.c cpumask_clear_cpu(n, &data->cpu_event); n 306 arch/ia64/kernel/salinfo.c cpu = n; n 309 arch/ia64/kernel/salinfo.c if (++n == nr_cpu_ids) n 310 arch/ia64/kernel/salinfo.c n = 0; n 276 arch/ia64/kernel/setup.c static void __init setup_crashkernel(unsigned long total, int *n) n 285 arch/ia64/kernel/setup.c sort_regions(rsvd_region, *n); n 286 arch/ia64/kernel/setup.c *n = merge_regions(rsvd_region, *n); n 288 arch/ia64/kernel/setup.c rsvd_region, *n); n 306 arch/ia64/kernel/setup.c rsvd_region[*n].start = n 308 arch/ia64/kernel/setup.c rsvd_region[*n].end = n 310 arch/ia64/kernel/setup.c (*n)++; n 323 arch/ia64/kernel/setup.c static inline void __init setup_crashkernel(unsigned long total, int *n) n 337 arch/ia64/kernel/setup.c int n = 0; n 343 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) ia64_boot_param; n 344 arch/ia64/kernel/setup.c rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); n 345 arch/ia64/kernel/setup.c n++; n 347 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); n 348 arch/ia64/kernel/setup.c rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; n 349 arch/ia64/kernel/setup.c n++; n 351 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); n 352 arch/ia64/kernel/setup.c rsvd_region[n].end = (rsvd_region[n].start n 354 arch/ia64/kernel/setup.c n++; n 356 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); n 357 arch/ia64/kernel/setup.c rsvd_region[n].end = (unsigned long) ia64_imva(_end); n 358 arch/ia64/kernel/setup.c n++; n 362 arch/ia64/kernel/setup.c rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); n 363 arch/ia64/kernel/setup.c rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; n 364 arch/ia64/kernel/setup.c n++; n 369 arch/ia64/kernel/setup.c if (reserve_elfcorehdr(&rsvd_region[n].start, n 370 arch/ia64/kernel/setup.c &rsvd_region[n].end) == 0) n 371 arch/ia64/kernel/setup.c n++; n 374 arch/ia64/kernel/setup.c total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); n 375 arch/ia64/kernel/setup.c n++; n 377 arch/ia64/kernel/setup.c setup_crashkernel(total_memory, &n); n 380 arch/ia64/kernel/setup.c rsvd_region[n].start = ~0UL; n 381 arch/ia64/kernel/setup.c rsvd_region[n].end = ~0UL; n 382 arch/ia64/kernel/setup.c n++; n 384 arch/ia64/kernel/setup.c num_rsvd_regions = n; n 385 arch/ia64/kernel/setup.c BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); n 391 arch/ia64/kernel/setup.c for (n = 0; n < num_rsvd_regions - 1; n++) { n 392 arch/ia64/kernel/setup.c struct rsvd_region *region = &rsvd_region[n]; n 60 arch/ia64/kernel/unwind.c # define UNW_DEBUG_ON(n) unw_debug_level >= n n 62 arch/ia64/kernel/unwind.c # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) n 66 arch/ia64/kernel/unwind.c # define UNW_DEBUG_ON(n) 0 n 67 arch/ia64/kernel/unwind.c # define UNW_DPRINT(n, ...) n 100 arch/ia64/mm/discontig.c int cpu, n = 0; n 104 arch/ia64/mm/discontig.c n++; n 106 arch/ia64/mm/discontig.c return n; n 334 arch/ia64/mm/init.c # define POW2(n) (1ULL << (n)) n 118 arch/m68k/amiga/chipram.c unsigned long n = atomic_read(&chipavail); n 120 arch/m68k/amiga/chipram.c pr_debug("amiga_chip_avail : %lu bytes\n", n); n 121 arch/m68k/amiga/chipram.c return n; n 52 arch/m68k/amiga/platform.c unsigned int n; n 57 arch/m68k/amiga/platform.c n = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2; n 59 arch/m68k/amiga/platform.c zorro_resources, n); n 44 arch/m68k/emu/natfeat.c size_t n; n 46 arch/m68k/emu/natfeat.c n = strlcpy(name_copy, feature_name, sizeof(name_copy)); n 47 arch/m68k/emu/natfeat.c if (n >= sizeof(name_copy)) n 58 arch/m68k/emu/natfeat.c int n; n 61 arch/m68k/emu/natfeat.c n = vsnprintf(buf, 256, fmt, ap); n 165 arch/m68k/include/asm/atomic.h #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) n 83 arch/m68k/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 121 arch/m68k/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 123 arch/m68k/include/asm/cmpxchg.h (unsigned long)(n), sizeof(*(ptr)))) n 124 arch/m68k/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 126 arch/m68k/include/asm/cmpxchg.h (unsigned long)(n), sizeof(*(ptr)))) n 128 arch/m68k/include/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) n 136 arch/m68k/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 138 arch/m68k/include/asm/cmpxchg.h (unsigned long)(n), sizeof(*(ptr)))) n 75 arch/m68k/include/asm/delay.h #define __const_udelay(n) (__xdelay((n) * 4295)) n 91 arch/m68k/include/asm/delay.h #define udelay(n) (__builtin_constant_p(n) ? \ n 92 arch/m68k/include/asm/delay.h ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n)) n 123 arch/m68k/include/asm/delay.h #define ndelay(n) ndelay(n) n 13 arch/m68k/include/asm/div64.h #define do_div(n, base) ({ \ n 21 arch/m68k/include/asm/div64.h __n.n64 = (n); \ n 30 arch/m68k/include/asm/div64.h (n) = __n.n64; \ n 50 arch/m68k/include/asm/ide.h #define __ide_mm_insw(port, addr, n) raw_insw((u16 *)port, addr, n) n 51 arch/m68k/include/asm/ide.h #define __ide_mm_insl(port, addr, n) raw_insl((u32 *)port, addr, n) n 54 arch/m68k/include/asm/ide.h #define __ide_mm_outsw(port, addr, n) raw_outsw((u16 *)port, addr, n) n 55 arch/m68k/include/asm/ide.h #define __ide_mm_outsl(port, addr, n) raw_outsl((u32 *)port, addr, n) n 59 arch/m68k/include/asm/ide.h #define __ide_mm_insw(port, addr, n) io_insw((unsigned int)port, addr, n) n 60 arch/m68k/include/asm/ide.h #define __ide_mm_insl(port, addr, n) io_insl((unsigned int)port, addr, n) n 61 arch/m68k/include/asm/ide.h #define __ide_mm_outsw(port, addr, n) io_outsw((unsigned int)port, addr, n) n 62 arch/m68k/include/asm/ide.h #define __ide_mm_outsl(port, addr, n) io_outsl((unsigned int)port, addr, n) n 16 arch/m68k/include/asm/linkage.h #define asmlinkage_protect(n, ret, args...) \ n 17 arch/m68k/include/asm/linkage.h __asmlinkage_protect##n(ret, ##args) n 25 arch/m68k/include/asm/string.h static inline char *strncpy(char *dest, const char *src, size_t n) n 36 arch/m68k/include/asm/string.h : "+a" (dest), "+a" (src), "+d" (n) n 64 arch/m68k/include/asm/string.h #define memcmp(d, s, n) __builtin_memcmp(d, s, n) n 68 arch/m68k/include/asm/string.h #define memset(d, c, n) __builtin_memset(d, c, n) n 72 arch/m68k/include/asm/string.h #define memcpy(d, s, n) __builtin_memcpy(d, s, n) n 182 arch/m68k/include/asm/uaccess_mm.h unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); n 183 arch/m68k/include/asm/uaccess_mm.h unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); n 235 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user(void *to, const void __user *from, unsigned long n) n 239 arch/m68k/include/asm/uaccess_mm.h switch (n) { n 275 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_from_user(to, from, n); n 281 arch/m68k/include/asm/uaccess_mm.h #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ n 309 arch/m68k/include/asm/uaccess_mm.h "5: moveq.l #"#n",%0\n" \ n 316 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user(void __user *to, const void *from, unsigned long n) n 320 arch/m68k/include/asm/uaccess_mm.h switch (n) { n 356 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_to_user(to, from, n); n 363 arch/m68k/include/asm/uaccess_mm.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 365 arch/m68k/include/asm/uaccess_mm.h if (__builtin_constant_p(n)) n 366 arch/m68k/include/asm/uaccess_mm.h return __constant_copy_from_user(to, from, n); n 367 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_from_user(to, from, n); n 371 arch/m68k/include/asm/uaccess_mm.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 373 arch/m68k/include/asm/uaccess_mm.h if (__builtin_constant_p(n)) n 374 arch/m68k/include/asm/uaccess_mm.h return __constant_copy_to_user(to, from, n); n 375 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_to_user(to, from, n); n 384 arch/m68k/include/asm/uaccess_mm.h extern __must_check long strnlen_user(const char __user *str, long n); n 386 arch/m68k/include/asm/uaccess_mm.h unsigned long __clear_user(void __user *to, unsigned long n); n 106 arch/m68k/include/asm/uaccess_no.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 108 arch/m68k/include/asm/uaccess_no.h memcpy(to, (__force const void *)from, n); n 113 arch/m68k/include/asm/uaccess_no.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 115 arch/m68k/include/asm/uaccess_no.h memcpy((__force void *)to, from, n); n 140 arch/m68k/include/asm/uaccess_no.h static inline long strnlen_user(const char *src, long n) n 150 arch/m68k/include/asm/uaccess_no.h __clear_user(void *to, unsigned long n) n 152 arch/m68k/include/asm/uaccess_no.h memset(to, 0, n); n 156 arch/m68k/include/asm/uaccess_no.h #define clear_user(to,n) __clear_user(to,n) n 40 arch/m68k/kernel/bootinfo_proc.c uint16_t n = be16_to_cpu(bi->size); n 41 arch/m68k/kernel/bootinfo_proc.c size += n; n 42 arch/m68k/kernel/bootinfo_proc.c bi = (struct bi_record *)((unsigned long)bi + n); n 18 arch/m68k/kernel/early_printk.c asmlinkage void __init debug_cons_nputs(const char *s, unsigned n); n 21 arch/m68k/kernel/early_printk.c const char *s, unsigned n) n 26 arch/m68k/kernel/early_printk.c mvme16x_cons_write(c, s, n); n 28 arch/m68k/kernel/early_printk.c debug_cons_nputs(s, n); n 93 arch/m68k/kernel/uboot.c int len, n; n 95 arch/m68k/kernel/uboot.c n = strnlen(commandp, size); n 96 arch/m68k/kernel/uboot.c commandp += n; n 97 arch/m68k/kernel/uboot.c len = size - n; n 10 arch/m68k/lib/memcpy.c void *memcpy(void *to, const void *from, size_t n) n 15 arch/m68k/lib/memcpy.c if (!n) n 23 arch/m68k/lib/memcpy.c n--; n 29 arch/m68k/lib/memcpy.c for (; n; n--) n 34 arch/m68k/lib/memcpy.c if (n > 2 && (long)to & 2) { n 40 arch/m68k/lib/memcpy.c n -= 2; n 42 arch/m68k/lib/memcpy.c temp = n >> 2; n 75 arch/m68k/lib/memcpy.c if (n & 2) { n 82 arch/m68k/lib/memcpy.c if (n & 1) { n 10 arch/m68k/lib/memmove.c void *memmove(void *dest, const void *src, size_t n) n 15 arch/m68k/lib/memmove.c if (!n) n 25 arch/m68k/lib/memmove.c n--; n 27 arch/m68k/lib/memmove.c if (n > 2 && (long)dest & 2) { n 33 arch/m68k/lib/memmove.c n -= 2; n 35 arch/m68k/lib/memmove.c temp = n >> 2; n 46 arch/m68k/lib/memmove.c if (n & 2) { n 53 arch/m68k/lib/memmove.c if (n & 1) { n 59 arch/m68k/lib/memmove.c dest = (char *)dest + n; n 60 arch/m68k/lib/memmove.c src = (const char *)src + n; n 67 arch/m68k/lib/memmove.c n--; n 69 arch/m68k/lib/memmove.c if (n > 2 && (long)dest & 2) { n 75 arch/m68k/lib/memmove.c n -= 2; n 77 arch/m68k/lib/memmove.c temp = n >> 2; n 88 arch/m68k/lib/memmove.c if (n & 2) { n 95 arch/m68k/lib/memmove.c if (n & 1) { n 11 arch/m68k/lib/uaccess.c unsigned long n) n 50 arch/m68k/lib/uaccess.c : "0" (n / 4), "d" (n & 3)); n 57 arch/m68k/lib/uaccess.c unsigned long n) n 94 arch/m68k/lib/uaccess.c : "0" (n / 4), "d" (n & 3)); n 104 arch/m68k/lib/uaccess.c unsigned long __clear_user(void __user *to, unsigned long n) n 138 arch/m68k/lib/uaccess.c : "d" (0), "0" (n / 4), "d" (n & 3)); n 99 arch/microblaze/include/asm/cpuinfo.h static inline unsigned int fcpu(struct device_node *cpu, char *n) n 103 arch/microblaze/include/asm/cpuinfo.h of_property_read_u32(cpu, n, &val); n 64 arch/microblaze/include/asm/delay.h #define udelay(n) \ n 66 arch/microblaze/include/asm/delay.h if (__builtin_constant_p(n)) { \ n 67 arch/microblaze/include/asm/delay.h if ((n) / __MAX_UDELAY >= 1) \ n 70 arch/microblaze/include/asm/delay.h __udelay((n) * (19 * HZ)); \ n 72 arch/microblaze/include/asm/delay.h __udelay((n) * (19 * HZ)); \ n 76 arch/microblaze/include/asm/delay.h #define ndelay(n) \ n 78 arch/microblaze/include/asm/delay.h if (__builtin_constant_p(n)) { \ n 79 arch/microblaze/include/asm/delay.h if ((n) / __MAX_NDELAY >= 1) \ n 82 arch/microblaze/include/asm/delay.h __udelay((n) * HZ); \ n 84 arch/microblaze/include/asm/delay.h __udelay((n) * HZ); \ n 52 arch/microblaze/include/asm/mmu.h unsigned long n:1; /* No-execute */ n 46 arch/microblaze/include/asm/syscall.h unsigned int n) n 48 arch/microblaze/include/asm/syscall.h switch (n) { n 62 arch/microblaze/include/asm/syscall.h unsigned int n, n 65 arch/microblaze/include/asm/syscall.h switch (n) { n 88 arch/microblaze/include/asm/syscall.h unsigned int n = 6; n 90 arch/microblaze/include/asm/syscall.h while (n--) n 99 arch/microblaze/include/asm/syscall.h unsigned int n = 6; n 101 arch/microblaze/include/asm/syscall.h while (n--) n 99 arch/microblaze/include/asm/uaccess.h unsigned long n) n 111 arch/microblaze/include/asm/uaccess.h : "=r"(n), "=r"(to) \ n 112 arch/microblaze/include/asm/uaccess.h : "0"(n), "1"(to) n 114 arch/microblaze/include/asm/uaccess.h return n; n 118 arch/microblaze/include/asm/uaccess.h unsigned long n) n 121 arch/microblaze/include/asm/uaccess.h if (unlikely(!access_ok(to, n))) n 122 arch/microblaze/include/asm/uaccess.h return n; n 124 arch/microblaze/include/asm/uaccess.h return __clear_user(to, n); n 314 arch/microblaze/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 316 arch/microblaze/include/asm/uaccess.h return __copy_tofrom_user((__force void __user *)to, from, n); n 320 arch/microblaze/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 322 arch/microblaze/include/asm/uaccess.h return __copy_tofrom_user(to, (__force const void __user *)from, n); n 347 arch/microblaze/include/asm/uaccess.h static inline long strnlen_user(const char __user *src, long n) n 351 arch/microblaze/include/asm/uaccess.h return __strnlen_user(src, n); n 61 arch/microblaze/include/uapi/asm/ptrace.h #define PT_GPR(n) ((n) * sizeof(microblaze_reg_t)) n 35 arch/microblaze/lib/memset.c void *memset(void *v_src, int c, __kernel_size_t n) n 43 arch/microblaze/lib/memset.c while (n--) n 49 arch/microblaze/lib/memset.c void *memset(void *v_src, int c, __kernel_size_t n) n 65 arch/microblaze/lib/memset.c if (likely(n >= 4)) { n 71 arch/microblaze/lib/memset.c --n; n 74 arch/microblaze/lib/memset.c --n; n 77 arch/microblaze/lib/memset.c --n; n 83 arch/microblaze/lib/memset.c for (; n >= 4; n -= 4) n 90 arch/microblaze/lib/memset.c while (n--) n 147 arch/mips/ar7/prom.c int i, n; n 154 arch/mips/ar7/prom.c n = PSP_ENV_SIZE / sizeof(struct psp_env_chunk); n 155 arch/mips/ar7/prom.c while (i < n) { n 156 arch/mips/ar7/prom.c if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) n 10 arch/mips/boot/compressed/string.c void *memcpy(void *dest, const void *src, size_t n) n 16 arch/mips/boot/compressed/string.c for (i = 0; i < n; i++) n 21 arch/mips/boot/compressed/string.c void *memset(void *s, int c, size_t n) n 26 arch/mips/boot/compressed/string.c for (i = 0; i < n; i++) n 116 arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c int n = bits; n 117 arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c if (n > 32) n 118 arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c n = 32; n 119 arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c cvmx_helper_qlm_jtag_shift(qlm, n, 0); n 120 arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c bits -= n; n 561 arch/mips/cavium-octeon/octeon-platform.c static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac) n 569 arch/mips/cavium-octeon/octeon-platform.c old_mac = fdt_getprop(initial_boot_params, n, "local-mac-address", n 581 arch/mips/cavium-octeon/octeon-platform.c r = fdt_setprop_inplace(initial_boot_params, n, "local-mac-address", n 21 arch/mips/fw/cfe/cfe_api.c #define XPTR_FROM_NATIVE(n) ((cfe_xptr_t) (intptr_t) (n)) n 332 arch/mips/include/asm/asmmacro.h .macro copy_s_w ws, n n 341 arch/mips/include/asm/asmmacro.h .macro copy_s_d ws, n n 350 arch/mips/include/asm/asmmacro.h .macro insert_w wd, n n 359 arch/mips/include/asm/asmmacro.h .macro insert_d wd, n n 472 arch/mips/include/asm/asmmacro.h .macro copy_s_w ws, n n 481 arch/mips/include/asm/asmmacro.h .macro copy_s_d ws, n n 490 arch/mips/include/asm/asmmacro.h .macro insert_w wd, n n 499 arch/mips/include/asm/asmmacro.h .macro insert_d wd, n n 230 arch/mips/include/asm/atomic.h #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) n 429 arch/mips/include/asm/atomic.h #define atomic64_cmpxchg(v, o, n) \ n 430 arch/mips/include/asm/atomic.h ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) n 201 arch/mips/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) \ n 204 arch/mips/include/asm/cmpxchg.h cmpxchg_local((ptr), (o), (n)); \ n 207 arch/mips/include/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) \ n 210 arch/mips/include/asm/cmpxchg.h cmpxchg((ptr), (o), (n)); \ n 215 arch/mips/include/asm/cmpxchg.h # define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 288 arch/mips/include/asm/cmpxchg.h # define cmpxchg64(ptr, o, n) ({ \ n 290 arch/mips/include/asm/cmpxchg.h unsigned long long __new = (__typeof__(*(ptr)))(n); \ n 311 arch/mips/include/asm/cmpxchg.h # define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) n 671 arch/mips/include/asm/cpu-features.h #define cpu_guest_has_kscr(n) (cpu_data[0].guest.kscratch_mask & (1u << (n))) n 139 arch/mips/include/asm/cpu-info.h unsigned long n; n 93 arch/mips/include/asm/dec/interrupts.h #define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE) n 94 arch/mips/include/asm/dec/interrupts.h #define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP)) n 69 arch/mips/include/asm/dec/ioasic_ints.h #define IO_IRQ_NR(n) ((n) + IO_IRQ_BASE) n 70 arch/mips/include/asm/dec/ioasic_ints.h #define IO_IRQ_MASK(n) (1 << (n)) n 78 arch/mips/include/asm/dec/kn02.h #define KN02_IRQ_NR(n) ((n) + KN02_IRQ_BASE) n 79 arch/mips/include/asm/dec/kn02.h #define KN02_IRQ_MASK(n) (1 << (n)) n 61 arch/mips/include/asm/dec/kn05.h #define KN4K_MB_INT_IRQ_N(n) (1<<(n)) /* Individual status bits. */ n 77 arch/mips/include/asm/dec/kn05.h #define KN4K_MB_CSR_MSK_N(n) (1<<((n)+16)) /* Individual mask bits. */ n 22 arch/mips/include/asm/div64.h #define __div64_32(n, base) \ n 97 arch/mips/include/asm/emma/emma2rh.h #define EMMA2RH_IRQ_INT(n) (EMMA2RH_IRQ_BASE + (n)) n 21 arch/mips/include/asm/emma/markeins.h #define EMMA2RH_SW_IRQ_INT(n) (EMMA2RH_SW_IRQ_BASE + (n)) n 15 arch/mips/include/asm/gt64120.h #define MSK(n) ((1 << (n)) - 1) n 15 arch/mips/include/asm/hpet.h #define HPET_Tn_CFG(n) (0x100 + 0x20 * n) n 16 arch/mips/include/asm/hpet.h #define HPET_Tn_CMP(n) (0x108 + 0x20 * n) n 17 arch/mips/include/asm/hpet.h #define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n) n 18 arch/mips/include/asm/kvm_para.h register unsigned long n asm("v0"); n 21 arch/mips/include/asm/kvm_para.h n = num; n 24 arch/mips/include/asm/kvm_para.h : "=r" (r) : "r" (n) : "memory" n 33 arch/mips/include/asm/kvm_para.h register unsigned long n asm("v0"); n 37 arch/mips/include/asm/kvm_para.h n = num; n 41 arch/mips/include/asm/kvm_para.h : "=r" (r) : "r" (n), "r" (a0) : "memory" n 50 arch/mips/include/asm/kvm_para.h register unsigned long n asm("v0"); n 55 arch/mips/include/asm/kvm_para.h n = num; n 60 arch/mips/include/asm/kvm_para.h : "=r" (r) : "r" (n), "r" (a0), "r" (a1) : "memory" n 69 arch/mips/include/asm/kvm_para.h register unsigned long n asm("v0"); n 75 arch/mips/include/asm/kvm_para.h n = num; n 81 arch/mips/include/asm/kvm_para.h : "=r" (r) : "r" (n), "r" (a0), "r" (a1), "r" (a2) : "memory" n 124 arch/mips/include/asm/local.h #define local_cmpxchg(l, o, n) \ n 125 arch/mips/include/asm/local.h ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) n 126 arch/mips/include/asm/local.h #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) n 906 arch/mips/include/asm/mach-ath79/ar71xx_regs.h #define AR71XX_SPI_IOC_CS(n) BIT(16 + (n)) n 205 arch/mips/include/asm/mach-au1x00/au1xxx_psc.h #define PSC_I2SCFG_WS(n) ((n & 0xFF) << 16) n 126 arch/mips/include/asm/mach-generic/floppy.h static inline unsigned long fd_drive_type(unsigned long n) n 128 arch/mips/include/asm/mach-generic/floppy.h if (n == 0) n 25 arch/mips/include/asm/mach-ip27/mmzone.h #define NODE_DATA(n) (&__node_data[(n)]->pglist) n 26 arch/mips/include/asm/mach-ip27/mmzone.h #define hub_data(n) (&__node_data[(n)]->hub) n 123 arch/mips/include/asm/mach-jazz/floppy.h static inline unsigned long fd_drive_type(unsigned long n) n 128 arch/mips/include/asm/mach-jazz/floppy.h if (n == 0) n 32 arch/mips/include/asm/mach-loongson32/irq.h #define LS1X_IRQ(n, x) (LS1X_IRQ_BASE + (n << 5) + (x)) n 113 arch/mips/include/asm/mach-loongson32/regs-mux.h #define LS1X_CBUS_REG(n, x) \ n 114 arch/mips/include/asm/mach-loongson32/regs-mux.h ((void __iomem *)KSEG1ADDR(LS1X_CBUS_BASE + (n * 0x04) + (x))) n 116 arch/mips/include/asm/mach-loongson32/regs-mux.h #define LS1X_CBUS_FIRST(n) LS1X_CBUS_REG(n, 0x00) n 117 arch/mips/include/asm/mach-loongson32/regs-mux.h #define LS1X_CBUS_SECOND(n) LS1X_CBUS_REG(n, 0x10) n 118 arch/mips/include/asm/mach-loongson32/regs-mux.h #define LS1X_CBUS_THIRD(n) LS1X_CBUS_REG(n, 0x20) n 119 arch/mips/include/asm/mach-loongson32/regs-mux.h #define LS1X_CBUS_FOURTHT(n) LS1X_CBUS_REG(n, 0x30) n 120 arch/mips/include/asm/mach-loongson32/regs-mux.h #define LS1X_CBUS_FIFTHT(n) LS1X_CBUS_REG(n, 0x40) n 19 arch/mips/include/asm/mach-loongson64/irq.h #define LOONGSON_HT1_INT_VECTOR(n) \ n 20 arch/mips/include/asm/mach-loongson64/irq.h LOONGSON3_REG32(LOONGSON_HT1_INT_VECTOR_BASE, 4 * (n)) n 21 arch/mips/include/asm/mach-loongson64/irq.h #define LOONGSON_HT1_INTN_EN(n) \ n 22 arch/mips/include/asm/mach-loongson64/irq.h LOONGSON3_REG32(LOONGSON_HT1_INT_EN_BASE, 4 * (n)) n 31 arch/mips/include/asm/mach-loongson64/irq.h #define LOONGSON_INT_ROUTER_ENTRY(n) \ n 32 arch/mips/include/asm/mach-loongson64/irq.h LOONGSON3_REG8(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + n) n 34 arch/mips/include/asm/mach-loongson64/irq.h #define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18) n 44 arch/mips/include/asm/mach-loongson64/mmzone.h #define NODE_DATA(n) (&__node_data[(n)]->pglist) n 45 arch/mips/include/asm/mach-loongson64/mmzone.h #define hub_data(n) (&__node_data[(n)]->hub) n 65 arch/mips/include/asm/mach-netlogic/multi-node.h #define nlm_node_present(n) ((n) >= 0 && (n) < NLM_NR_NODES && \ n 66 arch/mips/include/asm/mach-netlogic/multi-node.h nlm_get_node(n)->coremask != 0) n 621 arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h #define DDRC_CFG(n) (n) n 622 arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h #define DDRC_DEBUG(n) (0x04 + n) n 623 arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h #define DDRC_CTL(n) (0x40 + n) n 1218 arch/mips/include/asm/mipsregs.h #define _IFC_REG(n) \ n 1219 arch/mips/include/asm/mipsregs.h ".ifc \\r, $" #n "\n\t" \ n 1220 arch/mips/include/asm/mipsregs.h "\\var = " #n "\n\t" \ n 115 arch/mips/include/asm/netlogic/xlp-hal/iomap.h #define XLP9XX_HDR_OFFSET(n, d, f) \ n 116 arch/mips/include/asm/netlogic/xlp-hal/iomap.h XLP_IO_PCI_OFFSET(xlp9xx_get_socbus(n), d, f) n 78 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h #define PCIE_9XX_MSIX_STATUSX(n) (n + 0x286) n 80 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h #define PCIE_9XX_MSIX_VECX(n) (n + 0x296) n 92 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h #define MSI_LINK_ADDR(n, l) (MSI_ADDR_BASE + \ n 93 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h (PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ) n 95 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h #define MSIX_LINK_ADDR(n, l) (MSIX_ADDR_BASE + \ n 96 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h (PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ) n 292 arch/mips/include/asm/nile4.h #define nile4_to_irq(n) ((n)+NUM_I8259_INTERRUPTS) n 293 arch/mips/include/asm/nile4.h #define irq_to_nile4(n) ((n)-NUM_I8259_INTERRUPTS) n 51 arch/mips/include/asm/octeon/cvmx-coremask.h int n, i; n 53 arch/mips/include/asm/octeon/cvmx-coremask.h n = core % CVMX_COREMASK_ELTSZ; n 56 arch/mips/include/asm/octeon/cvmx-coremask.h return (pcm->coremask_bitmap[i] & ((u64)1 << n)) != 0; n 82 arch/mips/include/asm/octeon/cvmx-coremask.h int n, i; n 84 arch/mips/include/asm/octeon/cvmx-coremask.h n = core % CVMX_COREMASK_ELTSZ; n 86 arch/mips/include/asm/octeon/cvmx-coremask.h pcm->coremask_bitmap[i] &= ~(1ull << n); n 138 arch/mips/include/asm/octeon/cvmx-gpio-defs.h uint64_t n:32; n 140 arch/mips/include/asm/octeon/cvmx-gpio-defs.h uint64_t n:32; n 256 arch/mips/include/asm/pci/bridge.h #define b_devio(n) b_devio_raw[((n)<2)?(n*2):(n+2)] n 421 arch/mips/include/asm/pci/bridge.h #define BRIDGE_CTRL_RST(n) ((n) << 24) n 429 arch/mips/include/asm/pci/bridge.h #define BRIDGE_CTRL_SSRAM_SIZE(n) ((n) << 17) n 436 arch/mips/include/asm/pci/bridge.h #define BRIDGE_CTRL_LLP_XBAR_CRD(n) ((n) << 12) n 441 arch/mips/include/asm/pci/bridge.h #define BRIDGE_CTRL_MAX_TRANS(n) ((n) << 4) n 443 arch/mips/include/asm/pci/bridge.h #define BRIDGE_CTRL_WIDGET_ID(n) ((n) << 0) n 620 arch/mips/include/asm/pci/bridge.h #define BRIDGE_INT_DEV_SHFT(n) ((n)*3) n 621 arch/mips/include/asm/pci/bridge.h #define BRIDGE_INT_DEV_MASK(n) (0x7 << BRIDGE_INT_DEV_SHFT(n)) n 41 arch/mips/include/asm/pmon.h #define pmon_cpustart(n, f, sp, gp) debug_vectors->_s.cpustart(n, f, sp, gp) n 112 arch/mips/include/asm/ptrace.h unsigned int n) n 116 arch/mips/include/asm/ptrace.h addr += n; n 23 arch/mips/include/asm/setup.h extern void *set_vi_handler(int n, vi_handler_t addr); n 25 arch/mips/include/asm/setup.h extern void *set_except_vector(int n, void *addr); n 147 arch/mips/include/asm/sibyte/bcm1480_int.h #define _BCM1480_INT_MASK(w, n) _SB_MAKEMASK(w, ((n) & 0x3F)) n 148 arch/mips/include/asm/sibyte/bcm1480_int.h #define _BCM1480_INT_MASK1(n) _SB_MAKEMASK1(((n) & 0x3F)) n 149 arch/mips/include/asm/sibyte/bcm1480_int.h #define _BCM1480_INT_OFFSET(n) (((n) & 0x40) << 6) n 449 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*BCM1480_SCD_PERF_CNT_SPACING)) n 215 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_MAKEMASK1(n) (_SB_MAKE64(1) << _SB_MAKE64(n)) n 216 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_MAKEMASK1_32(n) (_SB_MAKE32(1) << _SB_MAKE32(n)) n 222 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_MAKEMASK(v, n) (_SB_MAKE64((_SB_MAKE64(1)<<(v))-1) << _SB_MAKE64(n)) n 223 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_MAKEMASK_32(v, n) (_SB_MAKE32((_SB_MAKE32(1)<<(v))-1) << _SB_MAKE32(n)) n 229 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_MAKEVALUE(v, n) (_SB_MAKE64(v) << _SB_MAKE64(n)) n 230 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_MAKEVALUE_32(v, n) (_SB_MAKE32(v) << _SB_MAKE32(n)) n 232 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_GETVALUE(v, n, m) ((_SB_MAKE64(v) & _SB_MAKE64(m)) >> _SB_MAKE64(n)) n 233 arch/mips/include/asm/sibyte/sb1250_defs.h #define _SB_GETVALUE_32(v, n, m) ((_SB_MAKE32(v) & _SB_MAKE32(m)) >> _SB_MAKE32(n)) n 358 arch/mips/include/asm/sibyte/sb1250_genbus.h #define S_GPIO_INTR_TYPEX(n) (((n)/2)*2) n 359 arch/mips/include/asm/sibyte/sb1250_genbus.h #define M_GPIO_INTR_TYPEX(n) _SB_MAKEMASK(2, S_GPIO_INTR_TYPEX(n)) n 360 arch/mips/include/asm/sibyte/sb1250_genbus.h #define V_GPIO_INTR_TYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n)) n 361 arch/mips/include/asm/sibyte/sb1250_genbus.h #define G_GPIO_INTR_TYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n)) n 414 arch/mips/include/asm/sibyte/sb1250_genbus.h #define S_GPIO_INTR_ATYPEX(n) (((n)/2)*2) n 415 arch/mips/include/asm/sibyte/sb1250_genbus.h #define M_GPIO_INTR_ATYPEX(n) _SB_MAKEMASK(2, S_GPIO_INTR_ATYPEX(n)) n 416 arch/mips/include/asm/sibyte/sb1250_genbus.h #define V_GPIO_INTR_ATYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_ATYPEX(n)) n 417 arch/mips/include/asm/sibyte/sb1250_genbus.h #define G_GPIO_INTR_ATYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_ATYPEX(n), M_GPIO_INTR_ATYPEX(n)) n 689 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_ADDR_TRAP_UP(n) (A_ADDR_TRAP_UP_0 + ((n) * ADDR_TRAP_SPACING)) n 690 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_ADDR_TRAP_DOWN(n) (A_ADDR_TRAP_DOWN_0 + ((n) * ADDR_TRAP_SPACING)) n 691 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_ADDR_TRAP_CFG(n) (A_ADDR_TRAP_CFG_0 + ((n) * ADDR_TRAP_SPACING)) n 744 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_SCD_PERF_CNT(n) (A_SCD_PERF_CNT_0+(n*SCD_PERF_CNT_SPACING)) n 793 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_SCD_TRACE_EVENT(n) (((n) & 4) ? \ n 794 arch/mips/include/asm/sibyte/sb1250_regs.h (A_SCD_TRACE_EVENT_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \ n 795 arch/mips/include/asm/sibyte/sb1250_regs.h (A_SCD_TRACE_EVENT_0 + ((n) * TRACE_REGISTER_SPACING))) n 796 arch/mips/include/asm/sibyte/sb1250_regs.h #define A_SCD_TRACE_SEQUENCE(n) (((n) & 4) ? \ n 797 arch/mips/include/asm/sibyte/sb1250_regs.h (A_SCD_TRACE_SEQUENCE_4 + (((n) & 3) * TRACE_REGISTER_SPACING)) : \ n 798 arch/mips/include/asm/sibyte/sb1250_regs.h (A_SCD_TRACE_SEQUENCE_0 + ((n) * TRACE_REGISTER_SPACING))) n 408 arch/mips/include/asm/sn/klconfig.h #define IS_MIO_IOC3(l, n) (IS_MIO_PRESENT(l) && (n > 2)) n 35 arch/mips/include/asm/sn/mapped_kernel.h #define MAPPED_KERN_RO_PHYSBASE(n) (hub_data(n)->kern_vars.kv_ro_baseaddr) n 36 arch/mips/include/asm/sn/mapped_kernel.h #define MAPPED_KERN_RW_PHYSBASE(n) (hub_data(n)->kern_vars.kv_rw_baseaddr) n 58 arch/mips/include/asm/syscall.h struct task_struct *task, struct pt_regs *regs, unsigned int n) n 62 arch/mips/include/asm/syscall.h switch (n) { n 64 arch/mips/include/asm/syscall.h *arg = regs->regs[4 + n]; n 70 arch/mips/include/asm/syscall.h get_user(*arg, (int *)usp + n); n 78 arch/mips/include/asm/syscall.h get_user(*arg, (int *)usp + n); n 81 arch/mips/include/asm/syscall.h *arg = regs->regs[4 + n]; n 129 arch/mips/include/asm/syscall.h unsigned int n = 6; n 135 arch/mips/include/asm/syscall.h while (n--) n 15 arch/mips/include/asm/txx9/generic.h #define TXX9_CE(n) (unsigned long)(txx9_ce_res[(n)].start) n 96 arch/mips/include/asm/txx9/jmr3927.h #define jmr3927_led_set(n/*0-16*/) jmr3927_ioc_reg_out(~(n), JMR3927_IOC_LED_ADDR) n 98 arch/mips/include/asm/txx9/jmr3927.h #define jmr3927_led_and_set(n/*0-16*/) jmr3927_ioc_reg_out((~(n)) & jmr3927_ioc_reg_in(JMR3927_IOC_LED_ADDR), JMR3927_IOC_LED_ADDR) n 108 arch/mips/include/asm/txx9/rbtx4938.h #define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n)) n 109 arch/mips/include/asm/txx9/rbtx4938.h #define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n)) n 110 arch/mips/include/asm/txx9/rbtx4938.h #define RBTX4938_IRQ_IRC_DMA(ch, n) (RBTX4938_IRQ_IRC + TX4938_IR_DMA(ch, n)) n 114 arch/mips/include/asm/txx9/rbtx4938.h #define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n)) n 58 arch/mips/include/asm/txx9/tx4927.h #define TX4927_IR_INT(n) (2 + (n)) n 60 arch/mips/include/asm/txx9/tx4927.h #define TX4927_IR_SIO(n) (8 + (n)) n 62 arch/mips/include/asm/txx9/tx4927.h #define TX4927_IR_DMA(n) (10 + (n)) n 67 arch/mips/include/asm/txx9/tx4927.h #define TX4927_IR_TMR(n) (17 + (n)) n 69 arch/mips/include/asm/txx9/tx4938.h #define TX4938_IR_INT(n) (2 + (n)) n 71 arch/mips/include/asm/txx9/tx4938.h #define TX4938_IR_SIO(n) (8 + (n)) n 73 arch/mips/include/asm/txx9/tx4938.h #define TX4938_IR_DMA(ch, n) ((ch ? 27 : 10) + (n)) /* 10-13, 27-30 */ n 78 arch/mips/include/asm/txx9/tx4938.h #define TX4938_IR_TMR(n) (17 + (n)) n 182 arch/mips/include/asm/txx9/tx4939.h #define TX4939_IR_INT(n) (3 + (n)) n 184 arch/mips/include/asm/txx9/tx4939.h #define TX4939_IR_ETH(n) ((n) ? 43 : 6) n 188 arch/mips/include/asm/txx9/tx4939.h #define TX4939_IR_SIO(n) ((n) ? 43 + (n) : 9) /* 9,44-46 */ n 190 arch/mips/include/asm/txx9/tx4939.h #define TX4939_IR_DMA(ch, n) (((ch) ? 22 : 10) + (n)) /* 10-13,22-25 */ n 194 arch/mips/include/asm/txx9/tx4939.h #define TX4939_IR_TMR(n) (((n) >= 3 ? 45 : 16) + (n)) /* 16-18,48-50 */ n 196 arch/mips/include/asm/txx9/tx4939.h #define TX4939_IR_ATA(n) (19 + (n)) n 373 arch/mips/include/asm/txx9/tx4939.h #define TX4939_CRYPTO_CSR_INDXBST(n) ((n) << 20) n 378 arch/mips/include/asm/txx9/tx4939.h #define TX4939_CRYPTO_CSR_INDXAST(n) ((n) << 13) n 519 arch/mips/include/asm/uaccess.h #define __invoke_copy_from(func, to, from, n) \ n 527 arch/mips/include/asm/uaccess.h __cu_len_r = (n); \ n 542 arch/mips/include/asm/uaccess.h #define __invoke_copy_to(func, to, from, n) \ n 550 arch/mips/include/asm/uaccess.h __cu_len_r = (n); \ n 560 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_kernel(to, from, n) \ n 561 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) n 563 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_kernel(to, from, n) \ n 564 arch/mips/include/asm/uaccess.h __invoke_copy_to(__copy_user, to, from, n) n 566 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_kernel(to, from, n) \ n 567 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) n 570 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_user(to, from, n) \ n 571 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) n 573 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_user(to, from, n) \ n 574 arch/mips/include/asm/uaccess.h __invoke_copy_to(__copy_user, to, from, n) n 576 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_user(to, from, n) \ n 577 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) n 593 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_user(to, from, n) \ n 594 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_from_user_eva, to, from, n) n 596 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_user(to, from, n) \ n 597 arch/mips/include/asm/uaccess.h __invoke_copy_to(__copy_to_user_eva, to, from, n) n 599 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_user(to, from, n) \ n 600 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_in_user_eva, to, from, n) n 605 arch/mips/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 608 arch/mips/include/asm/uaccess.h return __invoke_copy_to_kernel(to, from, n); n 610 arch/mips/include/asm/uaccess.h return __invoke_copy_to_user(to, from, n); n 614 arch/mips/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 617 arch/mips/include/asm/uaccess.h return __invoke_copy_from_kernel(to, from, n); n 619 arch/mips/include/asm/uaccess.h return __invoke_copy_from_user(to, from, n); n 626 arch/mips/include/asm/uaccess.h raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) n 629 arch/mips/include/asm/uaccess.h return ___invoke_copy_in_kernel(to, from, n); n 631 arch/mips/include/asm/uaccess.h return ___invoke_copy_in_user(to, from, n); n 686 arch/mips/include/asm/uaccess.h #define clear_user(addr,n) \ n 689 arch/mips/include/asm/uaccess.h unsigned long __cl_size = (n); \ n 747 arch/mips/include/asm/uaccess.h extern long __strnlen_kernel_asm(const char __user *s, long n); n 748 arch/mips/include/asm/uaccess.h extern long __strnlen_user_asm(const char __user *s, long n); n 763 arch/mips/include/asm/uaccess.h static inline long strnlen_user(const char __user *s, long n) n 775 arch/mips/include/asm/uaccess.h : "r" (s), "r" (n) n 784 arch/mips/include/asm/uaccess.h : "r" (s), "r" (n) n 130 arch/mips/include/uapi/asm/kvm.h #define KVM_REG_MIPS_CP0_MAAR(n) (KVM_REG_MIPS_MAAR | \ n 131 arch/mips/include/uapi/asm/kvm.h KVM_REG_SIZE_U64 | (n)) n 184 arch/mips/include/uapi/asm/kvm.h #define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n)) n 185 arch/mips/include/uapi/asm/kvm.h #define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n)) n 186 arch/mips/include/uapi/asm/kvm.h #define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n)) n 17 arch/mips/kernel/early_printk.c static void early_console_write(struct console *con, const char *s, unsigned n) n 19 arch/mips/kernel/early_printk.c while (n-- && *s) { n 127 arch/mips/kernel/irq-msc01.c int n = imp->im_irq; n 131 arch/mips/kernel/irq-msc01.c irq_set_chip_and_handler_name(irqbase + n, n 136 arch/mips/kernel/irq-msc01.c MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); n 138 arch/mips/kernel/irq-msc01.c MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); n 141 arch/mips/kernel/irq-msc01.c irq_set_chip_and_handler_name(irqbase + n, n 146 arch/mips/kernel/irq-msc01.c MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); n 148 arch/mips/kernel/irq-msc01.c MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); n 82 arch/mips/kernel/module.c struct mips_hi16 *n; n 95 arch/mips/kernel/module.c n = kmalloc(sizeof *n, GFP_KERNEL); n 96 arch/mips/kernel/module.c if (!n) n 99 arch/mips/kernel/module.c n->addr = (Elf_Addr *)location; n 100 arch/mips/kernel/module.c n->value = v; n 101 arch/mips/kernel/module.c n->next = me->arch.r_mips_hi16_list; n 102 arch/mips/kernel/module.c me->arch.r_mips_hi16_list = n; n 1387 arch/mips/kernel/perf_event_mipsxx.c int n, handled = IRQ_NONE; n 1408 arch/mips/kernel/perf_event_mipsxx.c for (n = counters - 1; n >= 0; n--) { n 1409 arch/mips/kernel/perf_event_mipsxx.c if (!test_bit(n, cpuc->used_mask)) n 1412 arch/mips/kernel/perf_event_mipsxx.c counter = mipspmu.read_counter(n); n 1416 arch/mips/kernel/perf_event_mipsxx.c handle_associated_event(cpuc, n, &data, regs); n 39 arch/mips/kernel/proc.c unsigned long n = (unsigned long) v - 1; n 40 arch/mips/kernel/proc.c unsigned int version = cpu_data[n].processor_id; n 41 arch/mips/kernel/proc.c unsigned int fp_vers = cpu_data[n].fpu_id; n 46 arch/mips/kernel/proc.c if (!cpu_online(n)) n 53 arch/mips/kernel/proc.c if (n == 0) { n 60 arch/mips/kernel/proc.c seq_printf(m, "processor\t\t: %ld\n", n); n 62 arch/mips/kernel/proc.c cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); n 63 arch/mips/kernel/proc.c seq_printf(m, fmt, __cpu_name[n], n 67 arch/mips/kernel/proc.c cpu_data[n].udelay_val / (500000/HZ), n 68 arch/mips/kernel/proc.c (cpu_data[n].udelay_val / (5000/HZ)) % 100); n 72 arch/mips/kernel/proc.c seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); n 79 arch/mips/kernel/proc.c cpu_data[n].watch_reg_count); n 80 arch/mips/kernel/proc.c for (i = 0; i < cpu_data[n].watch_reg_count; i++) n 82 arch/mips/kernel/proc.c cpu_data[n].watch_reg_masks[i]); n 138 arch/mips/kernel/proc.c cpu_data[n].srsets); n 140 arch/mips/kernel/proc.c hweight8(cpu_data[n].kscratch_mask)); n 141 arch/mips/kernel/proc.c seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); n 142 arch/mips/kernel/proc.c seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n])); n 146 arch/mips/kernel/proc.c seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); n 148 arch/mips/kernel/proc.c seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); n 157 arch/mips/kernel/proc.c proc_cpuinfo_notifier_args.n = n; n 1019 arch/mips/kernel/ptrace.c .n = ELF_NGREG, n 1027 arch/mips/kernel/ptrace.c .n = NUM_DSP_REGS + 1, n 1037 arch/mips/kernel/ptrace.c .n = ELF_NFPREG, n 1045 arch/mips/kernel/ptrace.c .n = 1, n 1055 arch/mips/kernel/ptrace.c .n = NUM_FPU_REGS + 1, n 1069 arch/mips/kernel/ptrace.c .n = ARRAY_SIZE(mips_regsets), n 1079 arch/mips/kernel/ptrace.c .n = ELF_NGREG, n 1087 arch/mips/kernel/ptrace.c .n = NUM_DSP_REGS + 1, n 1097 arch/mips/kernel/ptrace.c .n = 1, n 1105 arch/mips/kernel/ptrace.c .n = ELF_NFPREG, n 1115 arch/mips/kernel/ptrace.c .n = NUM_FPU_REGS + 1, n 1129 arch/mips/kernel/ptrace.c .n = ARRAY_SIZE(mips64_regsets), n 1140 arch/mips/kernel/ptrace.c .n = ARRAY_SIZE(mips64_regsets), n 1930 arch/mips/kernel/traps.c void __init *set_except_vector(int n, void *addr) n 1946 arch/mips/kernel/traps.c old_handler = xchg(&exception_handlers[n], handler); n 1948 arch/mips/kernel/traps.c if (n == 0 && cpu_has_divec) { n 1975 arch/mips/kernel/traps.c static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) n 1978 arch/mips/kernel/traps.c unsigned long old_handler = vi_handlers[n]; n 1990 arch/mips/kernel/traps.c vi_handlers[n] = handler; n 1992 arch/mips/kernel/traps.c b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); n 1999 arch/mips/kernel/traps.c board_bind_eic_interrupt(n, srs); n 2003 arch/mips/kernel/traps.c change_c0_srsmap(0xf << n*4, srs << n*4); n 2072 arch/mips/kernel/traps.c void *set_vi_handler(int n, vi_handler_t addr) n 2074 arch/mips/kernel/traps.c return set_vi_srs_handler(n, addr, 0); n 170 arch/mips/kernel/vpe-cmp.c struct vpe *v, *n; n 177 arch/mips/kernel/vpe-cmp.c list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) n 510 arch/mips/kernel/vpe-mt.c struct vpe *v, *n; n 517 arch/mips/kernel/vpe-mt.c list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { n 314 arch/mips/kernel/vpe.c struct mips_hi16 *n; n 321 arch/mips/kernel/vpe.c n = kmalloc(sizeof(*n), GFP_KERNEL); n 322 arch/mips/kernel/vpe.c if (!n) n 325 arch/mips/kernel/vpe.c n->addr = location; n 326 arch/mips/kernel/vpe.c n->value = v; n 327 arch/mips/kernel/vpe.c n->next = mips_hi16_list; n 328 arch/mips/kernel/vpe.c mips_hi16_list = n; n 485 arch/mips/kernel/vpe.c unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); n 496 arch/mips/kernel/vpe.c for (i = 1; i < n; i++) { n 540 arch/mips/kernel/vpe.c unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); n 542 arch/mips/kernel/vpe.c pr_debug("dump_elfsymbols: n %d\n", n); n 543 arch/mips/kernel/vpe.c for (i = 1; i < n; i++) { n 555 arch/mips/kernel/vpe.c unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); n 557 arch/mips/kernel/vpe.c for (i = 1; i < n; i++) { n 942 arch/mips/kvm/mips.c unsigned n; n 947 arch/mips/kvm/mips.c n = reg_list.n; n 948 arch/mips/kvm/mips.c reg_list.n = kvm_mips_num_regs(vcpu); n 952 arch/mips/kvm/mips.c if (n < reg_list.n) n 23 arch/mips/lasat/prom.c #define PROM_JUMP_TABLE_ENTRY(n) (*((u32 *)(RESET_VECTOR + 0x20) + n)) n 13 arch/mips/loongson32/common/irq.c #define LS1X_INTC_REG(n, x) \ n 14 arch/mips/loongson32/common/irq.c ((void __iomem *)KSEG1ADDR(LS1X_INTC_BASE + (n * 0x18) + (x))) n 16 arch/mips/loongson32/common/irq.c #define LS1X_INTC_INTISR(n) LS1X_INTC_REG(n, 0x0) n 17 arch/mips/loongson32/common/irq.c #define LS1X_INTC_INTIEN(n) LS1X_INTC_REG(n, 0x4) n 18 arch/mips/loongson32/common/irq.c #define LS1X_INTC_INTSET(n) LS1X_INTC_REG(n, 0x8) n 19 arch/mips/loongson32/common/irq.c #define LS1X_INTC_INTCLR(n) LS1X_INTC_REG(n, 0xc) n 20 arch/mips/loongson32/common/irq.c #define LS1X_INTC_INTPOL(n) LS1X_INTC_REG(n, 0x10) n 21 arch/mips/loongson32/common/irq.c #define LS1X_INTC_INTEDGE(n) LS1X_INTC_REG(n, 0x14) n 26 arch/mips/loongson32/common/irq.c unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5; n 28 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTCLR(n)) n 29 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTCLR(n)); n 35 arch/mips/loongson32/common/irq.c unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5; n 37 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n)) n 38 arch/mips/loongson32/common/irq.c & ~(1 << bit), LS1X_INTC_INTIEN(n)); n 44 arch/mips/loongson32/common/irq.c unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5; n 46 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n)) n 47 arch/mips/loongson32/common/irq.c & ~(1 << bit), LS1X_INTC_INTIEN(n)); n 48 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTCLR(n)) n 49 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTCLR(n)); n 55 arch/mips/loongson32/common/irq.c unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5; n 57 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n)) n 58 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTIEN(n)); n 64 arch/mips/loongson32/common/irq.c unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5; n 68 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n)) n 69 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTPOL(n)); n 70 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n)) n 71 arch/mips/loongson32/common/irq.c & ~(1 << bit), LS1X_INTC_INTEDGE(n)); n 74 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n)) n 75 arch/mips/loongson32/common/irq.c & ~(1 << bit), LS1X_INTC_INTPOL(n)); n 76 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n)) n 77 arch/mips/loongson32/common/irq.c & ~(1 << bit), LS1X_INTC_INTEDGE(n)); n 80 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n)) n 81 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTPOL(n)); n 82 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n)) n 83 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTEDGE(n)); n 86 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n)) n 87 arch/mips/loongson32/common/irq.c & ~(1 << bit), LS1X_INTC_INTPOL(n)); n 88 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n)) n 89 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTEDGE(n)); n 92 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n)) n 93 arch/mips/loongson32/common/irq.c & ~(1 << bit), LS1X_INTC_INTPOL(n)); n 94 arch/mips/loongson32/common/irq.c __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n)) n 95 arch/mips/loongson32/common/irq.c | (1 << bit), LS1X_INTC_INTEDGE(n)); n 115 arch/mips/loongson32/common/irq.c static void ls1x_irq_dispatch(int n) n 120 arch/mips/loongson32/common/irq.c int_status = __raw_readl(LS1X_INTC_INTISR(n)) & n 121 arch/mips/loongson32/common/irq.c __raw_readl(LS1X_INTC_INTIEN(n)); n 124 arch/mips/loongson32/common/irq.c irq = LS1X_IRQ(n, __ffs(int_status)); n 160 arch/mips/loongson32/common/irq.c int n; n 165 arch/mips/loongson32/common/irq.c for (n = 0; n < INTN; n++) { n 166 arch/mips/loongson32/common/irq.c __raw_writel(0x0, LS1X_INTC_INTIEN(n)); n 167 arch/mips/loongson32/common/irq.c __raw_writel(0xffffffff, LS1X_INTC_INTCLR(n)); n 168 arch/mips/loongson32/common/irq.c __raw_writel(0xffffffff, LS1X_INTC_INTPOL(n)); n 170 arch/mips/loongson32/common/irq.c __raw_writel(n ? 0x0 : 0xe000, LS1X_INTC_INTEDGE(n)); n 174 arch/mips/loongson32/common/irq.c for (n = base; n < NR_IRQS; n++) { n 175 arch/mips/loongson32/common/irq.c irq_set_chip_and_handler(n, &ls1x_irq_chip, n 84 arch/mips/loongson64/common/cs5536/cs5536_isa.c void pci_isa_write_bar(int n, u32 value) n 90 arch/mips/loongson64/common/cs5536/cs5536_isa.c lo |= soft_bar_flag[n]; n 95 arch/mips/loongson64/common/cs5536/cs5536_isa.c lo &= bar_space_range[n]; n 96 arch/mips/loongson64/common/cs5536/cs5536_isa.c _wrmsr(divil_msr_reg[n], hi, lo); n 100 arch/mips/loongson64/common/cs5536/cs5536_isa.c ((bar_space_len[n] - 4) << 12) | 0x01; n 102 arch/mips/loongson64/common/cs5536/cs5536_isa.c _wrmsr(sb_msr_reg[n], hi, lo); n 110 arch/mips/loongson64/common/cs5536/cs5536_isa.c u32 pci_isa_read_bar(int n) n 116 arch/mips/loongson64/common/cs5536/cs5536_isa.c if (lo & soft_bar_flag[n]) { n 117 arch/mips/loongson64/common/cs5536/cs5536_isa.c conf_data = bar_space_range[n] | PCI_BASE_ADDRESS_SPACE_IO; n 118 arch/mips/loongson64/common/cs5536/cs5536_isa.c lo &= ~soft_bar_flag[n]; n 121 arch/mips/loongson64/common/cs5536/cs5536_isa.c _rdmsr(divil_msr_reg[n], &hi, &lo); n 122 arch/mips/loongson64/common/cs5536/cs5536_isa.c conf_data = lo & bar_space_range[n]; n 176 arch/mips/math-emu/ieee754.h static inline int ieee754_cxtest(unsigned int n) n 178 arch/mips/math-emu/ieee754.h return (ieee754_csr.cx & n); n 198 arch/mips/math-emu/ieee754.h static inline int ieee754_sxtest(unsigned int n) n 200 arch/mips/math-emu/ieee754.h return (ieee754_csr.sx & n); n 28 arch/mips/mm/sc-ip22.c #define SC_INDEX(n) ((n) & CI_MASK) n 78 arch/mips/mti-malta/malta-amon.c struct vpe_notifications *n; n 83 arch/mips/mti-malta/malta-amon.c list_for_each_entry(n, &v->notify, list) n 84 arch/mips/mti-malta/malta-amon.c n->start(VPE_MODULE_MINOR); n 257 arch/mips/netlogic/xlp/ahci-init-xlp2.c int n; n 313 arch/mips/netlogic/xlp/ahci-init-xlp2.c n = 10000; n 319 arch/mips/netlogic/xlp/ahci-init-xlp2.c } while (--n > 0); n 471 arch/mips/netlogic/xlp/nlm_hal.c int i, n, rv; n 491 arch/mips/netlogic/xlp/nlm_hal.c n = (val >> 1) & 0x3; n 492 arch/mips/netlogic/xlp/nlm_hal.c if (n != node) n 82 arch/mips/netlogic/xlp/setup.c int i, n; n 84 arch/mips/netlogic/xlp/setup.c n = nlm_get_dram_map(-1, map, ARRAY_SIZE(map)); /* -1 : all nodes */ n 85 arch/mips/netlogic/xlp/setup.c for (i = 0; i < n; i += 2) { n 114 arch/mips/netlogic/xlp/wakeup.c int core, n, cpu, ncores; n 116 arch/mips/netlogic/xlp/wakeup.c for (n = 0; n < NLM_NR_NODES; n++) { n 117 arch/mips/netlogic/xlp/wakeup.c if (n != 0) { n 120 arch/mips/netlogic/xlp/wakeup.c int b = xlp9xx_get_socbus(n); n 121 arch/mips/netlogic/xlp/wakeup.c pr_info("Node %d SoC PCI bus %d.\n", n, b); n 125 arch/mips/netlogic/xlp/wakeup.c syspcibase = nlm_get_sys_pcibase(n); n 129 arch/mips/netlogic/xlp/wakeup.c nlm_node_init(n); n 133 arch/mips/netlogic/xlp/wakeup.c nodep = nlm_get_node(n); n 136 arch/mips/netlogic/xlp/wakeup.c fusebase = nlm_get_fuse_regbase(n); n 170 arch/mips/netlogic/xlp/wakeup.c pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask); n 174 arch/mips/netlogic/xlp/wakeup.c if (n == 0 && core == 0) n 182 arch/mips/netlogic/xlp/wakeup.c cpu = (n * ncores + core) * NLM_THREADS_PER_CORE; n 187 arch/mips/netlogic/xlp/wakeup.c if (!xlp_wakeup_core(nodep->sysbase, n, core)) n 195 arch/mips/netlogic/xlp/wakeup.c pr_err("Node %d : timeout core %d\n", n, core); n 84 arch/mips/netlogic/xlr/fmn-config.c int bkt, n, total_credits, ncores; n 89 arch/mips/netlogic/xlr/fmn-config.c for (n = 0; n < ncores; n++) n 90 arch/mips/netlogic/xlr/fmn-config.c total_credits += cfg->cpu[n].credit_config[bkt]; n 122 arch/mips/netlogic/xlr/fmn-config.c int i, j, num_core, n, credits_per_cpu; n 130 arch/mips/netlogic/xlr/fmn-config.c n = num_core; n 132 arch/mips/netlogic/xlr/fmn-config.c n = 4; n 138 arch/mips/netlogic/xlr/fmn-config.c credits_per_cpu = size / n; n 107 arch/mips/netlogic/xlr/platform.c #define USB_PLATFORM_DEV(n, i, irq) \ n 109 arch/mips/netlogic/xlr/platform.c .name = n, \ n 82 arch/mips/oprofile/op_model_mipsxx.c #define __define_perf_accessors(r, n, np) \ n 84 arch/mips/oprofile/op_model_mipsxx.c static inline unsigned int r_c0_ ## r ## n(void) \ n 90 arch/mips/oprofile/op_model_mipsxx.c return read_c0_ ## r ## n(); \ n 99 arch/mips/oprofile/op_model_mipsxx.c static inline void w_c0_ ## r ## n(unsigned int value) \ n 105 arch/mips/oprofile/op_model_mipsxx.c write_c0_ ## r ## n(value); \ n 247 arch/mips/oprofile/op_model_mipsxx.c #define HANDLE_COUNTER(n) \ n 249 arch/mips/oprofile/op_model_mipsxx.c case n + 1: \ n 250 arch/mips/oprofile/op_model_mipsxx.c control = r_c0_perfctrl ## n(); \ n 251 arch/mips/oprofile/op_model_mipsxx.c counter = r_c0_perfcntr ## n(); \ n 254 arch/mips/oprofile/op_model_mipsxx.c oprofile_add_sample(get_irq_regs(), n); \ n 255 arch/mips/oprofile/op_model_mipsxx.c w_c0_perfcntr ## n(reg.counter[n]); \ n 292 arch/mips/pci/pci-xlp.c int link, n; n 303 arch/mips/pci/pci-xlp.c for (n = 0; n < NLM_NR_NODES; n++) { n 304 arch/mips/pci/pci-xlp.c if (!nlm_node_present(n)) n 308 arch/mips/pci/pci-xlp.c pciebase = nlm_get_pcie_base(n, link); n 311 arch/mips/pci/pci-xlp.c xlp_config_pci_bswap(n, link); n 312 arch/mips/pci/pci-xlp.c xlp_init_node_msi_irqs(n, link); n 319 arch/mips/pci/pci-xlp.c pr_info("XLP PCIe: Link %d-%d initialized.\n", n, link); n 15 arch/mips/sgi-ip27/ip27-nmi.c #define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n) n 17 arch/mips/sgi-ip27/ip27-nmi.c #define NODE_NUM_CPUS(n) CPUS_PER_NODE n 223 arch/mips/sgi-ip27/ip27-nmi.c for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) { n 16 arch/nds32/include/asm/string.h extern void *memzero(void *ptr, __kernel_size_t n); n 262 arch/nds32/include/asm/uaccess.h extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); n 265 arch/nds32/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user * str, long n); n 267 arch/nds32/include/asm/uaccess.h unsigned long n); n 269 arch/nds32/include/asm/uaccess.h unsigned long n); n 276 arch/nds32/include/asm/uaccess.h static inline unsigned long clear_user(void __user * to, unsigned long n) n 278 arch/nds32/include/asm/uaccess.h if (access_ok(to, n)) n 279 arch/nds32/include/asm/uaccess.h n = __arch_clear_user(to, n); n 280 arch/nds32/include/asm/uaccess.h return n; n 283 arch/nds32/include/asm/uaccess.h static inline unsigned long __clear_user(void __user * to, unsigned long n) n 285 arch/nds32/include/asm/uaccess.h return __arch_clear_user(to, n); n 41 arch/nds32/kernel/ptrace.c .n = sizeof(struct user_pt_regs) / sizeof(u32), n 52 arch/nds32/kernel/ptrace.c .n = ARRAY_SIZE(nds32_regsets) n 200 arch/nds32/kernel/traps.c int bad_syscall(int n, struct pt_regs *regs) n 209 arch/nds32/kernel/traps.c die_if_kernel("Oops - bad syscall", regs, n); n 27 arch/nios2/boot/compressed/misc.c #define memzero(s, n) memset((s), 0, (n)) n 94 arch/nios2/boot/compressed/misc.c void *memset(void *s, int c, size_t n) n 99 arch/nios2/boot/compressed/misc.c for (i = 0; i < n; i++) n 136 arch/nios2/boot/compressed/misc.c unsigned n; n 141 arch/nios2/boot/compressed/misc.c for (n = 0; n < outcnt; n++) { n 51 arch/nios2/include/asm/uaccess.h unsigned long n) n 62 arch/nios2/include/asm/uaccess.h : "=r" (n), "=r" (to) n 63 arch/nios2/include/asm/uaccess.h : "0" (n), "1" (to) n 66 arch/nios2/include/asm/uaccess.h return n; n 70 arch/nios2/include/asm/uaccess.h unsigned long n) n 72 arch/nios2/include/asm/uaccess.h if (!access_ok(to, n)) n 73 arch/nios2/include/asm/uaccess.h return n; n 74 arch/nios2/include/asm/uaccess.h return __clear_user(to, n); n 78 arch/nios2/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n); n 80 arch/nios2/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n); n 87 arch/nios2/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *s, long n); n 22 arch/nios2/kernel/cpuinfo.c static inline u32 fcpu(struct device_node *cpu, const char *n) n 26 arch/nios2/kernel/cpuinfo.c of_property_read_u32(cpu, n, &val); n 121 arch/nios2/kernel/ptrace.c .n = NUM_PTRACE_REG, n 134 arch/nios2/kernel/ptrace.c .n = ARRAY_SIZE(nios2_regsets) n 135 arch/openrisc/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 139 arch/openrisc/include/asm/cmpxchg.h (unsigned long)(n), \ n 201 arch/openrisc/include/asm/spr_defs.h #define MATCHPOINTS_TO_NDP(n) (1 == n ? SPR_DCFGR_NDP1 : \ n 202 arch/openrisc/include/asm/spr_defs.h 2 == n ? SPR_DCFGR_NDP2 : \ n 203 arch/openrisc/include/asm/spr_defs.h 3 == n ? SPR_DCFGR_NDP3 : \ n 204 arch/openrisc/include/asm/spr_defs.h 4 == n ? SPR_DCFGR_NDP4 : \ n 205 arch/openrisc/include/asm/spr_defs.h 5 == n ? SPR_DCFGR_NDP5 : \ n 206 arch/openrisc/include/asm/spr_defs.h 6 == n ? SPR_DCFGR_NDP6 : \ n 207 arch/openrisc/include/asm/spr_defs.h 7 == n ? SPR_DCFGR_NDP7 : SPR_DCFGR_NDP8) n 6 arch/openrisc/include/asm/string.h extern void *memset(void *s, int c, __kernel_size_t n); n 9 arch/openrisc/include/asm/string.h extern void *memcpy(void *dest, __const void *src, __kernel_size_t n); n 266 arch/openrisc/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *str, long n); n 115 arch/openrisc/kernel/ptrace.c .n = ELF_NGREG, n 127 arch/openrisc/kernel/ptrace.c .n = ARRAY_SIZE(or1k_regsets), n 26 arch/openrisc/lib/memcpy.c void *memcpy(void *dest, __const void *src, __kernel_size_t n) n 35 arch/openrisc/lib/memcpy.c for (i = n >> 5; i > 0; i--) { n 46 arch/openrisc/lib/memcpy.c if (n & 1 << 4) { n 53 arch/openrisc/lib/memcpy.c if (n & 1 << 3) { n 58 arch/openrisc/lib/memcpy.c if (n & 1 << 2) n 68 arch/openrisc/lib/memcpy.c for (i = n >> 3; i > 0; i--) { n 79 arch/openrisc/lib/memcpy.c if (n & 1 << 2) { n 87 arch/openrisc/lib/memcpy.c if (n & 1 << 1) { n 92 arch/openrisc/lib/memcpy.c if (n & 1) n 102 arch/openrisc/lib/memcpy.c void *memcpy(void *dest, __const void *src, __kernel_size_t n) n 109 arch/openrisc/lib/memcpy.c for (; n >= 4; n -= 4) n 117 arch/openrisc/lib/memcpy.c for (; n >= 1; n -= 1) n 20 arch/parisc/boot/compressed/misc.c #define memzero(s, n) memset((s), 0, (n)) n 65 arch/parisc/boot/compressed/misc.c void *memmove(void *dest, const void *src, size_t n) n 71 arch/parisc/boot/compressed/misc.c while (n--) n 74 arch/parisc/boot/compressed/misc.c d += n; n 75 arch/parisc/boot/compressed/misc.c s += n; n 76 arch/parisc/boot/compressed/misc.c while (n--) n 111 arch/parisc/include/asm/assembly.h addib,UV,n -1,1,. n 112 arch/parisc/include/asm/assembly.h addib,NUV,n -1,1,.+8 n 77 arch/parisc/include/asm/atomic.h #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) n 222 arch/parisc/include/asm/atomic.h #define atomic64_cmpxchg(v, o, n) \ n 223 arch/parisc/include/asm/atomic.h ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) n 79 arch/parisc/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 82 arch/parisc/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 107 arch/parisc/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 109 arch/parisc/include/asm/cmpxchg.h (unsigned long)(n), sizeof(*(ptr)))) n 111 arch/parisc/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) \ n 114 arch/parisc/include/asm/cmpxchg.h cmpxchg_local((ptr), (o), (n)); \ n 117 arch/parisc/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 120 arch/parisc/include/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) n 26 arch/parisc/include/asm/ftrace.h #define ftrace_return_address(n) return_address(n) n 60 arch/parisc/include/asm/page.h #define __pmd_val_set(x,n) (x).pmd = (n) n 61 arch/parisc/include/asm/page.h #define __pgd_val_set(x,n) (x).pgd = (n) n 82 arch/parisc/include/asm/page.h #define __pmd_val_set(x,n) (x) = (n) n 83 arch/parisc/include/asm/page.h #define __pgd_val_set(x,n) (x) = (n) n 77 arch/parisc/include/asm/psw.h unsigned int n:1; n 50 arch/parisc/include/asm/ptrace.h unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); n 21 arch/parisc/kernel/kexec.c unsigned long n) n 24 arch/parisc/kernel/kexec.c n, n 25 arch/parisc/kernel/kexec.c kimage->segment[n].mem, n 26 arch/parisc/kernel/kexec.c kimage->segment[n].mem + kimage->segment[n].memsz, n 27 arch/parisc/kernel/kexec.c (unsigned long)kimage->segment[n].memsz, n 28 arch/parisc/kernel/kexec.c (unsigned long)kimage->segment[n].memsz / PAGE_SIZE); n 207 arch/parisc/kernel/module.c static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n) n 212 arch/parisc/kernel/module.c static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n) n 217 arch/parisc/kernel/module.c static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n) n 221 arch/parisc/kernel/module.c for (; n > 0; n--, rela++) n 233 arch/parisc/kernel/module.c static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n) n 237 arch/parisc/kernel/module.c for (; n > 0; n--, rela++) n 250 arch/parisc/kernel/module.c static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n) n 254 arch/parisc/kernel/module.c for (; n > 0; n--, rela++) n 265 arch/parisc/kernel/module.c static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n) n 269 arch/parisc/kernel/module.c for (; n > 0; n--, rela++) n 78 arch/parisc/kernel/ptrace.c if (pa_psw(task)->n) { n 83 arch/parisc/kernel/ptrace.c pa_psw(task)->n = 0; n 590 arch/parisc/kernel/ptrace.c .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, n 595 arch/parisc/kernel/ptrace.c .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, n 603 arch/parisc/kernel/ptrace.c .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) n 674 arch/parisc/kernel/ptrace.c .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, n 679 arch/parisc/kernel/ptrace.c .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, n 687 arch/parisc/kernel/ptrace.c .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) n 825 arch/parisc/kernel/ptrace.c unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) n 829 arch/parisc/kernel/ptrace.c addr -= n; n 111 arch/parisc/kernel/unaligned.c #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0)) n 117 arch/parisc/lib/iomap.c static void ioport_write8r(void __iomem *addr, const void *s, unsigned long n) n 119 arch/parisc/lib/iomap.c outsb(ADDR2PORT(addr), s, n); n 122 arch/parisc/lib/iomap.c static void ioport_write16r(void __iomem *addr, const void *s, unsigned long n) n 124 arch/parisc/lib/iomap.c outsw(ADDR2PORT(addr), s, n); n 127 arch/parisc/lib/iomap.c static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n) n 129 arch/parisc/lib/iomap.c outsl(ADDR2PORT(addr), s, n); n 247 arch/parisc/lib/iomap.c static void iomem_write8r(void __iomem *addr, const void *s, unsigned long n) n 249 arch/parisc/lib/iomap.c while (n--) { n 255 arch/parisc/lib/iomap.c static void iomem_write16r(void __iomem *addr, const void *s, unsigned long n) n 257 arch/parisc/lib/iomap.c while (n--) { n 263 arch/parisc/lib/iomap.c static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n) n 265 arch/parisc/lib/iomap.c while (n--) { n 117 arch/powerpc/boot/addnote.c int fd, n, i; n 134 arch/powerpc/boot/addnote.c n = read(fd, buf, sizeof(buf)); n 135 arch/powerpc/boot/addnote.c if (n < 0) { n 148 arch/powerpc/boot/addnote.c if (n < E_HSIZE) n 156 arch/powerpc/boot/addnote.c if (ph + (np + 2) * ps + nnote + nnote2 > n) n 227 arch/powerpc/boot/addnote.c i = write(fd, buf, n); n 232 arch/powerpc/boot/addnote.c if (i < n) { n 60 arch/powerpc/boot/dcr.h #define EBC_BXCR(n) (n) n 79 arch/powerpc/boot/dcr.h #define EBC_BXAP(n) (0x10+(n)) n 353 arch/powerpc/boot/devtree.c int n; n 355 arch/powerpc/boot/devtree.c n = getprop(node, "virtual-reg", addr, nres * 4); n 356 arch/powerpc/boot/devtree.c if (n > 0) n 357 arch/powerpc/boot/devtree.c return n / 4; n 359 arch/powerpc/boot/devtree.c for (n = 0; n < nres; n++) { n 360 arch/powerpc/boot/devtree.c if (!dt_xlate_reg(node, n, &xaddr, NULL)) n 363 arch/powerpc/boot/devtree.c addr[n] = (void *)xaddr; n 366 arch/powerpc/boot/devtree.c return n; n 200 arch/powerpc/boot/main.c int n; n 203 arch/powerpc/boot/main.c n = getprop(chosen, "linux,cmdline-timeout", &v, sizeof(v)); n 204 arch/powerpc/boot/main.c if (n == sizeof(v)) n 58 arch/powerpc/boot/ns16550.c int n; n 64 arch/powerpc/boot/ns16550.c n = getprop(devp, "reg-offset", ®_offset, sizeof(reg_offset)); n 65 arch/powerpc/boot/ns16550.c if (n == sizeof(reg_offset)) n 68 arch/powerpc/boot/ns16550.c n = getprop(devp, "reg-shift", ®_shift, sizeof(reg_shift)); n 69 arch/powerpc/boot/ns16550.c if (n != sizeof(reg_shift)) n 85 arch/powerpc/boot/opal.c int n = getprop(devp, "reg", &opal_con_id, sizeof(u32)); n 86 arch/powerpc/boot/opal.c if (n != sizeof(u32)) n 258 arch/powerpc/boot/ops.h int __ilog2_u32(u32 n) n 261 arch/powerpc/boot/ops.h asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n)); n 32 arch/powerpc/boot/stdio.c # define do_div(n, base) ({ \ n 35 arch/powerpc/boot/stdio.c __rem = ((unsigned long long)(n)) % __base; \ n 36 arch/powerpc/boot/stdio.c (n) = ((unsigned long long)(n)) / __base; \ n 48 arch/powerpc/boot/stdio.c # define do_div(n,base) ({ \ n 51 arch/powerpc/boot/stdio.c (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \ n 52 arch/powerpc/boot/stdio.c if (((n) >> 32) == 0) { \ n 53 arch/powerpc/boot/stdio.c __rem = (unsigned int)(n) % __base; \ n 54 arch/powerpc/boot/stdio.c (n) = (unsigned int)(n) / __base; \ n 56 arch/powerpc/boot/stdio.c __rem = __div64_32(&(n), __base); \ n 346 arch/powerpc/boot/stdio.c int n; n 349 arch/powerpc/boot/stdio.c n = vsprintf(sprint_buf, fmt, args); n 352 arch/powerpc/boot/stdio.c console_ops.write(sprint_buf, n); n 353 arch/powerpc/boot/stdio.c return n; n 7 arch/powerpc/boot/string.h extern char *strncpy(char *dest, const char *src, size_t n); n 12 arch/powerpc/boot/string.h extern int strncmp(const char *s1, const char *s2, size_t n); n 16 arch/powerpc/boot/string.h extern void *memset(void *s, int c, size_t n); n 17 arch/powerpc/boot/string.h extern void *memmove(void *dest, const void *src, unsigned long n); n 18 arch/powerpc/boot/string.h extern void *memcpy(void *dest, const void *src, unsigned long n); n 19 arch/powerpc/boot/string.h extern void *memchr(const void *s, int c, size_t n); n 20 arch/powerpc/boot/string.h extern int memcmp(const void *s1, const void *s2, size_t n); n 62 arch/powerpc/boot/uartlite.c int n; n 65 arch/powerpc/boot/uartlite.c n = getprop(devp, "virtual-reg", ®_base, sizeof(reg_base)); n 66 arch/powerpc/boot/uartlite.c if (n != sizeof(reg_base)) { n 30 arch/powerpc/boot/virtex.c int n; n 35 arch/powerpc/boot/virtex.c n = getprop(devp, "reg-offset", ®_offset, sizeof(reg_offset)); n 36 arch/powerpc/boot/virtex.c if (n == sizeof(reg_offset)) n 39 arch/powerpc/boot/virtex.c n = getprop(devp, "reg-shift", ®_shift, sizeof(reg_shift)); n 40 arch/powerpc/boot/virtex.c if (n != sizeof(reg_shift)) n 43 arch/powerpc/boot/virtex.c n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd)); n 44 arch/powerpc/boot/virtex.c if (n != sizeof(spd)) n 48 arch/powerpc/boot/virtex.c n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk)); n 49 arch/powerpc/boot/virtex.c if (n != sizeof(clk)) n 95 arch/powerpc/include/asm/asm-prototypes.h ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); n 195 arch/powerpc/include/asm/atomic.h #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) n 196 arch/powerpc/include/asm/atomic.h #define atomic_cmpxchg_relaxed(v, o, n) \ n 197 arch/powerpc/include/asm/atomic.h cmpxchg_relaxed(&((v)->counter), (o), (n)) n 198 arch/powerpc/include/asm/atomic.h #define atomic_cmpxchg_acquire(v, o, n) \ n 199 arch/powerpc/include/asm/atomic.h cmpxchg_acquire(&((v)->counter), (o), (n)) n 487 arch/powerpc/include/asm/atomic.h #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) n 488 arch/powerpc/include/asm/atomic.h #define atomic64_cmpxchg_relaxed(v, o, n) \ n 489 arch/powerpc/include/asm/atomic.h cmpxchg_relaxed(&((v)->counter), (o), (n)) n 490 arch/powerpc/include/asm/atomic.h #define atomic64_cmpxchg_acquire(v, o, n) \ n 491 arch/powerpc/include/asm/atomic.h cmpxchg_acquire(&((v)->counter), (o), (n)) n 481 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 484 arch/powerpc/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 490 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 493 arch/powerpc/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 498 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg_relaxed(ptr, o, n) \ n 501 arch/powerpc/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 507 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg_acquire(ptr, o, n) \ n 510 arch/powerpc/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 516 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) \ n 519 arch/powerpc/include/asm/cmpxchg.h cmpxchg((ptr), (o), (n)); \ n 521 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) \ n 524 arch/powerpc/include/asm/cmpxchg.h cmpxchg_local((ptr), (o), (n)); \ n 526 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg64_relaxed(ptr, o, n) \ n 529 arch/powerpc/include/asm/cmpxchg.h cmpxchg_relaxed((ptr), (o), (n)); \ n 531 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg64_acquire(ptr, o, n) \ n 534 arch/powerpc/include/asm/cmpxchg.h cmpxchg_acquire((ptr), (o), (n)); \ n 538 arch/powerpc/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 506 arch/powerpc/include/asm/cpm1.h #define TM_CMD_NUM(n) (((n)&0xF)<<16) /* Timer Number */ n 30 arch/powerpc/include/asm/delay.h #define mdelay(n) udelay((n) * 1000) n 441 arch/powerpc/include/asm/eeh.h unsigned long n) n 443 arch/powerpc/include/asm/eeh.h _memcpy_fromio(dest, src, n); n 448 arch/powerpc/include/asm/eeh.h if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32)) n 56 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n), n 57 arch/powerpc/include/asm/io-defs.h (a, c, n), mem, a) n 58 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(memcpy_fromio, (void *d, const PCI_IO_ADDR s, unsigned long n), n 59 arch/powerpc/include/asm/io-defs.h (d, s, n), mem, s) n 60 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(memcpy_toio, (PCI_IO_ADDR d, const void *s, unsigned long n), n 61 arch/powerpc/include/asm/io-defs.h (d, s, n), mem, d) n 221 arch/powerpc/include/asm/io.h extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n); n 223 arch/powerpc/include/asm/io.h unsigned long n); n 225 arch/powerpc/include/asm/io.h unsigned long n); n 530 arch/powerpc/include/asm/io.h #define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n)) n 531 arch/powerpc/include/asm/io.h #define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n)) n 532 arch/powerpc/include/asm/io.h #define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n)) n 534 arch/powerpc/include/asm/io.h #define __do_readsb(a, b, n) _insb(PCI_FIX_ADDR(a), (b), (n)) n 535 arch/powerpc/include/asm/io.h #define __do_readsw(a, b, n) _insw(PCI_FIX_ADDR(a), (b), (n)) n 536 arch/powerpc/include/asm/io.h #define __do_readsl(a, b, n) _insl(PCI_FIX_ADDR(a), (b), (n)) n 538 arch/powerpc/include/asm/io.h #define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n)) n 539 arch/powerpc/include/asm/io.h #define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n)) n 540 arch/powerpc/include/asm/io.h #define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n)) n 542 arch/powerpc/include/asm/io.h #define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) n 543 arch/powerpc/include/asm/io.h #define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) n 544 arch/powerpc/include/asm/io.h #define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n)) n 545 arch/powerpc/include/asm/io.h #define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) n 546 arch/powerpc/include/asm/io.h #define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) n 547 arch/powerpc/include/asm/io.h #define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n)) n 549 arch/powerpc/include/asm/io.h #define __do_memset_io(addr, c, n) \ n 550 arch/powerpc/include/asm/io.h _memset_io(PCI_FIX_ADDR(addr), c, n) n 551 arch/powerpc/include/asm/io.h #define __do_memcpy_toio(dst, src, n) \ n 552 arch/powerpc/include/asm/io.h _memcpy_toio(PCI_FIX_ADDR(dst), src, n) n 555 arch/powerpc/include/asm/io.h #define __do_memcpy_fromio(dst, src, n) \ n 556 arch/powerpc/include/asm/io.h eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n) n 558 arch/powerpc/include/asm/io.h #define __do_memcpy_fromio(dst, src, n) \ n 559 arch/powerpc/include/asm/io.h _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) n 182 arch/powerpc/include/asm/kvm_book3s.h unsigned long n); n 184 arch/powerpc/include/asm/kvm_book3s.h void *to, unsigned long n); n 186 arch/powerpc/include/asm/kvm_book3s.h void *from, unsigned long n); n 79 arch/powerpc/include/asm/local.h static __inline__ long local_cmpxchg(local_t *l, long o, long n) n 87 arch/powerpc/include/asm/local.h l->v = n; n 93 arch/powerpc/include/asm/local.h static __inline__ long local_xchg(local_t *l, long n) n 100 arch/powerpc/include/asm/local.h l->v = n; n 85 arch/powerpc/include/asm/perf_event_server.h extern unsigned long int read_bhrb(int n); n 570 arch/powerpc/include/asm/ppc-opcode.h #define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \ n 572 arch/powerpc/include/asm/ppc-opcode.h (((n) & 0x3ff) << 11)) n 77 arch/powerpc/include/asm/ppc_asm.h #define SAVE_GPR(n, base) std n,GPR0+8*(n)(base) n 78 arch/powerpc/include/asm/ppc_asm.h #define REST_GPR(n, base) ld n,GPR0+8*(n)(base) n 82 arch/powerpc/include/asm/ppc_asm.h #define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) n 83 arch/powerpc/include/asm/ppc_asm.h #define REST_GPR(n, base) lwz n,GPR0+4*(n)(base) n 88 arch/powerpc/include/asm/ppc_asm.h #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) n 89 arch/powerpc/include/asm/ppc_asm.h #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) n 90 arch/powerpc/include/asm/ppc_asm.h #define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) n 91 arch/powerpc/include/asm/ppc_asm.h #define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base) n 92 arch/powerpc/include/asm/ppc_asm.h #define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base) n 93 arch/powerpc/include/asm/ppc_asm.h #define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base) n 94 arch/powerpc/include/asm/ppc_asm.h #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) n 95 arch/powerpc/include/asm/ppc_asm.h #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) n 97 arch/powerpc/include/asm/ppc_asm.h #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) n 98 arch/powerpc/include/asm/ppc_asm.h #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) n 99 arch/powerpc/include/asm/ppc_asm.h #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) n 100 arch/powerpc/include/asm/ppc_asm.h #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) n 101 arch/powerpc/include/asm/ppc_asm.h #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) n 102 arch/powerpc/include/asm/ppc_asm.h #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) n 103 arch/powerpc/include/asm/ppc_asm.h #define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base) n 104 arch/powerpc/include/asm/ppc_asm.h #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) n 105 arch/powerpc/include/asm/ppc_asm.h #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) n 106 arch/powerpc/include/asm/ppc_asm.h #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) n 107 arch/powerpc/include/asm/ppc_asm.h #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) n 108 arch/powerpc/include/asm/ppc_asm.h #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) n 110 arch/powerpc/include/asm/ppc_asm.h #define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b n 111 arch/powerpc/include/asm/ppc_asm.h #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) n 112 arch/powerpc/include/asm/ppc_asm.h #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) n 113 arch/powerpc/include/asm/ppc_asm.h #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) n 114 arch/powerpc/include/asm/ppc_asm.h #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) n 115 arch/powerpc/include/asm/ppc_asm.h #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) n 116 arch/powerpc/include/asm/ppc_asm.h #define REST_VR(n,b,base) li b,16*(n); lvx n,base,b n 117 arch/powerpc/include/asm/ppc_asm.h #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) n 118 arch/powerpc/include/asm/ppc_asm.h #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) n 119 arch/powerpc/include/asm/ppc_asm.h #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) n 120 arch/powerpc/include/asm/ppc_asm.h #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) n 121 arch/powerpc/include/asm/ppc_asm.h #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) n 124 arch/powerpc/include/asm/ppc_asm.h #define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base) n 125 arch/powerpc/include/asm/ppc_asm.h #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base) n 127 arch/powerpc/include/asm/ppc_asm.h #define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \ n 128 arch/powerpc/include/asm/ppc_asm.h STXVD2X(n,b,base); \ n 129 arch/powerpc/include/asm/ppc_asm.h XXSWAPD(n,n) n 131 arch/powerpc/include/asm/ppc_asm.h #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \ n 132 arch/powerpc/include/asm/ppc_asm.h XXSWAPD(n,n) n 135 arch/powerpc/include/asm/ppc_asm.h #define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b) n 136 arch/powerpc/include/asm/ppc_asm.h #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) n 137 arch/powerpc/include/asm/ppc_asm.h #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) n 138 arch/powerpc/include/asm/ppc_asm.h #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) n 139 arch/powerpc/include/asm/ppc_asm.h #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) n 140 arch/powerpc/include/asm/ppc_asm.h #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) n 141 arch/powerpc/include/asm/ppc_asm.h #define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b) n 142 arch/powerpc/include/asm/ppc_asm.h #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) n 143 arch/powerpc/include/asm/ppc_asm.h #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) n 144 arch/powerpc/include/asm/ppc_asm.h #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) n 145 arch/powerpc/include/asm/ppc_asm.h #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) n 146 arch/powerpc/include/asm/ppc_asm.h #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) n 152 arch/powerpc/include/asm/ppc_asm.h #define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b) n 153 arch/powerpc/include/asm/ppc_asm.h #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o) n 154 arch/powerpc/include/asm/ppc_asm.h #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o) n 155 arch/powerpc/include/asm/ppc_asm.h #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o) n 156 arch/powerpc/include/asm/ppc_asm.h #define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o) n 157 arch/powerpc/include/asm/ppc_asm.h #define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o) n 158 arch/powerpc/include/asm/ppc_asm.h #define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n n 159 arch/powerpc/include/asm/ppc_asm.h #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o) n 160 arch/powerpc/include/asm/ppc_asm.h #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o) n 161 arch/powerpc/include/asm/ppc_asm.h #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o) n 162 arch/powerpc/include/asm/ppc_asm.h #define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o) n 163 arch/powerpc/include/asm/ppc_asm.h #define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o) n 179 arch/powerpc/include/asm/ppc_asm.h #define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) n 180 arch/powerpc/include/asm/ppc_asm.h #define VCPU_GPR(n) __VCPU_GPR(__REG_##n) n 243 arch/powerpc/include/asm/ppc_asm.h #define _ENTRY(n) \ n 244 arch/powerpc/include/asm/ppc_asm.h .globl n; \ n 245 arch/powerpc/include/asm/ppc_asm.h n: n 247 arch/powerpc/include/asm/ppc_asm.h #define _GLOBAL(n) \ n 248 arch/powerpc/include/asm/ppc_asm.h .stabs __stringify(n:F-1),N_FUN,0,0,n;\ n 249 arch/powerpc/include/asm/ppc_asm.h .globl n; \ n 250 arch/powerpc/include/asm/ppc_asm.h n: n 266 arch/powerpc/include/asm/ptrace.h unsigned int n) n 269 arch/powerpc/include/asm/ptrace.h addr += n; n 285 arch/powerpc/include/asm/spu.h int spu_switch_event_register(struct notifier_block * n); n 286 arch/powerpc/include/asm/spu.h int spu_switch_event_unregister(struct notifier_block * n); n 93 arch/powerpc/include/asm/sstep.h #define SIZE(n) ((n) << 12) n 34 arch/powerpc/include/asm/string.h void *__memcpy(void *to, const void *from, __kernel_size_t n); n 35 arch/powerpc/include/asm/string.h void *__memmove(void *to, const void *from, __kernel_size_t n); n 44 arch/powerpc/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) n 63 arch/powerpc/include/asm/string.h static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n) n 65 arch/powerpc/include/asm/string.h return __memset16(p, v, n * 2); n 68 arch/powerpc/include/asm/string.h static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n) n 70 arch/powerpc/include/asm/string.h return __memset32(p, v, n * 4); n 73 arch/powerpc/include/asm/string.h static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) n 75 arch/powerpc/include/asm/string.h return __memset64(p, v, n * 8); n 78 arch/powerpc/include/asm/syscall.h unsigned int n = 6; n 84 arch/powerpc/include/asm/syscall.h while (n--) { n 85 arch/powerpc/include/asm/syscall.h if (n == 0) n 88 arch/powerpc/include/asm/syscall.h val = regs->gpr[3 + n]; n 90 arch/powerpc/include/asm/syscall.h args[n] = val & mask; n 311 arch/powerpc/include/asm/uaccess.h raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) n 316 arch/powerpc/include/asm/uaccess.h allow_read_write_user(to, from, n); n 317 arch/powerpc/include/asm/uaccess.h ret = __copy_tofrom_user(to, from, n); n 318 arch/powerpc/include/asm/uaccess.h prevent_read_write_user(to, from, n); n 324 arch/powerpc/include/asm/uaccess.h const void __user *from, unsigned long n) n 327 arch/powerpc/include/asm/uaccess.h if (__builtin_constant_p(n) && (n <= 8)) { n 330 arch/powerpc/include/asm/uaccess.h switch (n) { n 353 arch/powerpc/include/asm/uaccess.h allow_read_from_user(from, n); n 354 arch/powerpc/include/asm/uaccess.h ret = __copy_tofrom_user((__force void __user *)to, from, n); n 355 arch/powerpc/include/asm/uaccess.h prevent_read_from_user(from, n); n 360 arch/powerpc/include/asm/uaccess.h const void *from, unsigned long n) n 363 arch/powerpc/include/asm/uaccess.h if (__builtin_constant_p(n) && (n <= 8)) { n 366 arch/powerpc/include/asm/uaccess.h switch (n) { n 384 arch/powerpc/include/asm/uaccess.h allow_write_to_user(to, n); n 385 arch/powerpc/include/asm/uaccess.h ret = __copy_tofrom_user(to, (__force const void __user *)from, n); n 386 arch/powerpc/include/asm/uaccess.h prevent_write_to_user(to, n); n 391 arch/powerpc/include/asm/uaccess.h copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) n 393 arch/powerpc/include/asm/uaccess.h if (likely(check_copy_size(from, n, true))) { n 394 arch/powerpc/include/asm/uaccess.h if (access_ok(to, n)) { n 395 arch/powerpc/include/asm/uaccess.h allow_write_to_user(to, n); n 396 arch/powerpc/include/asm/uaccess.h n = memcpy_mcsafe((void *)to, from, n); n 397 arch/powerpc/include/asm/uaccess.h prevent_write_to_user(to, n); n 401 arch/powerpc/include/asm/uaccess.h return n; n 424 arch/powerpc/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *str, long n); n 19 arch/powerpc/include/asm/udbg.h extern int udbg_write(const char *s, int n); n 38 arch/powerpc/include/asm/vga.h static inline void scr_memsetw(u16 *s, u16 v, unsigned int n) n 40 arch/powerpc/include/asm/vga.h memset16(s, cpu_to_le16(v), n / 2); n 531 arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n)) n 536 arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n)) n 542 arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n)) n 649 arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n)) n 653 arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n)) n 268 arch/powerpc/include/uapi/asm/ptrace.h #define PPC_BREAKPOINT_CONDITION_BE(n) \ n 269 arch/powerpc/include/uapi/asm/ptrace.h (1<<((n)+PPC_BREAKPOINT_CONDITION_BE_SHIFT)) n 173 arch/powerpc/kernel/eeh.c int n = 0, l = 0; n 181 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", n 189 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); n 193 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); n 199 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); n 203 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); n 211 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); n 215 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); n 222 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); n 227 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); n 249 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "pci-e AER:\n"); n 254 arch/powerpc/kernel/eeh.c n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); n 272 arch/powerpc/kernel/eeh.c return n; n 1150 arch/powerpc/kernel/eeh.c struct pci_dn *n; n 1155 arch/powerpc/kernel/eeh.c list_for_each_entry(n, &pdn->child_list, list) n 1156 arch/powerpc/kernel/eeh.c eeh_add_device_tree_early(n); n 56 arch/powerpc/kernel/eeh_cache.c struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; n 58 arch/powerpc/kernel/eeh_cache.c while (n) { n 60 arch/powerpc/kernel/eeh_cache.c piar = rb_entry(n, struct pci_io_addr_range, rb_node); n 63 arch/powerpc/kernel/eeh_cache.c n = n->rb_left; n 65 arch/powerpc/kernel/eeh_cache.c n = n->rb_right; n 100 arch/powerpc/kernel/eeh_cache.c struct rb_node *n; n 103 arch/powerpc/kernel/eeh_cache.c n = rb_first(&cache->rb_root); n 104 arch/powerpc/kernel/eeh_cache.c while (n) { n 106 arch/powerpc/kernel/eeh_cache.c piar = rb_entry(n, struct pci_io_addr_range, rb_node); n 111 arch/powerpc/kernel/eeh_cache.c n = rb_next(n); n 223 arch/powerpc/kernel/eeh_cache.c struct rb_node *n; n 226 arch/powerpc/kernel/eeh_cache.c n = rb_first(&pci_io_addr_cache_root.rb_root); n 227 arch/powerpc/kernel/eeh_cache.c while (n) { n 229 arch/powerpc/kernel/eeh_cache.c piar = rb_entry(n, struct pci_io_addr_range, rb_node); n 234 arch/powerpc/kernel/eeh_cache.c rb_erase(n, &pci_io_addr_cache_root.rb_root); n 238 arch/powerpc/kernel/eeh_cache.c n = rb_next(n); n 274 arch/powerpc/kernel/eeh_cache.c struct rb_node *n; n 277 arch/powerpc/kernel/eeh_cache.c for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { n 278 arch/powerpc/kernel/eeh_cache.c piar = rb_entry(n, struct pci_io_addr_range, rb_node); n 156 arch/powerpc/kernel/head_32.h #define START_EXCEPTION(n, label) \ n 157 arch/powerpc/kernel/head_32.h . = n; \ n 158 arch/powerpc/kernel/head_32.h DO_KVM n; \ n 162 arch/powerpc/kernel/head_32.h #define START_EXCEPTION(n, label) \ n 163 arch/powerpc/kernel/head_32.h . = n; \ n 168 arch/powerpc/kernel/head_32.h #define EXCEPTION(n, label, hdlr, xfer) \ n 169 arch/powerpc/kernel/head_32.h START_EXCEPTION(n, label) \ n 172 arch/powerpc/kernel/head_32.h xfer(n, hdlr) n 182 arch/powerpc/kernel/head_32.h #define EXC_XFER_STD(n, hdlr) \ n 183 arch/powerpc/kernel/head_32.h EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \ n 186 arch/powerpc/kernel/head_32.h #define EXC_XFER_LITE(n, hdlr) \ n 187 arch/powerpc/kernel/head_32.h EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \ n 306 arch/powerpc/kernel/head_booke.h #define EXCEPTION(n, intno, label, hdlr, xfer) \ n 310 arch/powerpc/kernel/head_booke.h xfer(n, hdlr) n 312 arch/powerpc/kernel/head_booke.h #define CRITICAL_EXCEPTION(n, intno, label, hdlr) \ n 316 arch/powerpc/kernel/head_booke.h EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ n 319 arch/powerpc/kernel/head_booke.h #define MCHECK_EXCEPTION(n, label, hdlr) \ n 325 arch/powerpc/kernel/head_booke.h EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ n 337 arch/powerpc/kernel/head_booke.h #define EXC_XFER_STD(n, hdlr) \ n 338 arch/powerpc/kernel/head_booke.h EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \ n 341 arch/powerpc/kernel/head_booke.h #define EXC_XFER_LITE(n, hdlr) \ n 342 arch/powerpc/kernel/head_booke.h EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \ n 123 arch/powerpc/kernel/io.c _memset_io(volatile void __iomem *addr, int c, unsigned long n) n 131 arch/powerpc/kernel/io.c while(n && !IO_CHECK_ALIGN(p, 4)) { n 134 arch/powerpc/kernel/io.c n--; n 136 arch/powerpc/kernel/io.c while(n >= 4) { n 139 arch/powerpc/kernel/io.c n -= 4; n 141 arch/powerpc/kernel/io.c while(n) { n 144 arch/powerpc/kernel/io.c n--; n 151 arch/powerpc/kernel/io.c unsigned long n) n 156 arch/powerpc/kernel/io.c while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) { n 161 arch/powerpc/kernel/io.c n--; n 163 arch/powerpc/kernel/io.c while(n >= 4) { n 168 arch/powerpc/kernel/io.c n -= 4; n 170 arch/powerpc/kernel/io.c while(n) { n 175 arch/powerpc/kernel/io.c n--; n 181 arch/powerpc/kernel/io.c void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) n 186 arch/powerpc/kernel/io.c while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) { n 190 arch/powerpc/kernel/io.c n--; n 192 arch/powerpc/kernel/io.c while(n >= 4) { n 196 arch/powerpc/kernel/io.c n-=4; n 198 arch/powerpc/kernel/io.c while(n) { n 202 arch/powerpc/kernel/io.c n--; n 170 arch/powerpc/kernel/iommu.c unsigned long n, end, start; n 246 arch/powerpc/kernel/iommu.c n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, n 248 arch/powerpc/kernel/iommu.c if (n == -1) { n 272 arch/powerpc/kernel/iommu.c end = n + npages; n 290 arch/powerpc/kernel/iommu.c return n; n 330 arch/powerpc/kernel/mce.c int n = 0; n 519 arch/powerpc/kernel/mce.c n = sprintf(dar_str, "DAR: %016llx ", ea); n 521 arch/powerpc/kernel/mce.c sprintf(dar_str + n, "paddr: %016llx ", pa); n 1258 arch/powerpc/kernel/process.c int n = NR_INSN_TO_PRINT; n 1276 arch/powerpc/kernel/process.c while (n) { n 1281 arch/powerpc/kernel/process.c for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) { n 479 arch/powerpc/kernel/prom_init.c int n = 0; n 500 arch/powerpc/kernel/prom_init.c ++n; n 510 arch/powerpc/kernel/prom_init.c switch (n) { n 526 arch/powerpc/kernel/prom_init.c switch (n) { n 542 arch/powerpc/kernel/prom_init.c switch (n) { n 843 arch/powerpc/kernel/prom_init.c #define NUM_VECTORS(n) ((n) - 1) n 849 arch/powerpc/kernel/prom_init.c #define VECTOR_LENGTH(n) (1 + (n) - 2) n 536 arch/powerpc/kernel/ptrace.c return target->thread.used_vr ? regset->n : 0; n 653 arch/powerpc/kernel/ptrace.c return target->thread.used_vsr ? regset->n : 0; n 742 arch/powerpc/kernel/ptrace.c return target->thread.used_spe ? regset->n : 0; n 810 arch/powerpc/kernel/ptrace.c return regset->n; n 975 arch/powerpc/kernel/ptrace.c return regset->n; n 1094 arch/powerpc/kernel/ptrace.c return regset->n; n 1240 arch/powerpc/kernel/ptrace.c return target->thread.used_vsr ? regset->n : 0; n 1357 arch/powerpc/kernel/ptrace.c return regset->n; n 1479 arch/powerpc/kernel/ptrace.c return regset->n; n 1527 arch/powerpc/kernel/ptrace.c return regset->n; n 1576 arch/powerpc/kernel/ptrace.c return regset->n; n 1679 arch/powerpc/kernel/ptrace.c return regset->n; n 1741 arch/powerpc/kernel/ptrace.c return regset->n; n 1813 arch/powerpc/kernel/ptrace.c return regset->n; n 1901 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, n 1906 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, n 1912 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_VMX, .n = 34, n 1919 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_VSX, .n = 32, n 1926 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_SPE, .n = 35, n 1933 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG, n 1938 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG, n 1943 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX, n 1948 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX, n 1953 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG, n 1958 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CTAR, .n = 1, n 1963 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CPPR, .n = 1, n 1968 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CDSCR, .n = 1, n 1975 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_PPR, .n = 1, n 1980 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_DSCR, .n = 1, n 1987 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TAR, .n = 1, n 1992 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_EBB, .n = ELF_NEBB, n 1997 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_PMU, .n = ELF_NPMU, n 2004 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY, n 2013 arch/powerpc/kernel/ptrace.c .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) n 2194 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, n 2199 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, n 2205 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_VMX, .n = 34, n 2212 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_SPE, .n = 35, n 2219 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG, n 2225 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG, n 2230 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX, n 2235 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX, n 2240 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG, n 2245 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CTAR, .n = 1, n 2250 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CPPR, .n = 1, n 2255 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TM_CDSCR, .n = 1, n 2262 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_PPR, .n = 1, n 2267 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_DSCR, .n = 1, n 2274 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_TAR, .n = 1, n 2279 arch/powerpc/kernel/ptrace.c .core_note_type = NT_PPC_EBB, .n = ELF_NEBB, n 2288 arch/powerpc/kernel/ptrace.c .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) n 468 arch/powerpc/kernel/rtas_flash.c int n; n 471 arch/powerpc/kernel/rtas_flash.c n = sprintf(msg, "%d\n", args_buf->update_results); n 474 arch/powerpc/kernel/rtas_flash.c n += snprintf(msg + n, msglen - n, "%s\n", n 477 arch/powerpc/kernel/rtas_flash.c n = sprintf(msg, "%d\n", args_buf->status); n 479 arch/powerpc/kernel/rtas_flash.c return n; n 113 arch/powerpc/kernel/rtasd.c int i,j,n = 0; n 132 arch/powerpc/kernel/rtasd.c n = sprintf(buffer, "RTAS %d:", i/perline); n 136 arch/powerpc/kernel/rtasd.c n += sprintf(buffer+n, " "); n 138 arch/powerpc/kernel/rtasd.c n += sprintf(buffer+n, "%02x", (unsigned char)buf[i]); n 82 arch/powerpc/kernel/syscalls.c ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp) n 84 arch/powerpc/kernel/syscalls.c if ( (unsigned long)n >= 4096 ) n 86 arch/powerpc/kernel/syscalls.c unsigned long __user *buffer = (unsigned long __user *)n; n 88 arch/powerpc/kernel/syscalls.c || __get_user(n, buffer) n 95 arch/powerpc/kernel/syscalls.c return sys_select(n, inp, outp, exp, tvp); n 100 arch/powerpc/kernel/udbg.c int udbg_write(const char *s, int n) n 102 arch/powerpc/kernel/udbg.c int remain = n; n 117 arch/powerpc/kernel/udbg.c return n - remain; n 142 arch/powerpc/kernel/udbg.c unsigned int n) n 144 arch/powerpc/kernel/udbg.c udbg_write(s, n); n 904 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long n; n 914 arch/powerpc/kvm/book3s_64_mmu_hv.c for (n = memslot->npages; n; --n, ++gfn) { n 1052 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long n; n 1114 arch/powerpc/kvm/book3s_64_mmu_hv.c n = kvmppc_actual_pgsz(v, r); n 1115 arch/powerpc/kvm/book3s_64_mmu_hv.c n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; n 1116 arch/powerpc/kvm/book3s_64_mmu_hv.c if (n > npages_dirty) n 1117 arch/powerpc/kvm/book3s_64_mmu_hv.c npages_dirty = n; n 2058 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long i, n; n 2072 arch/powerpc/kvm/book3s_64_mmu_hv.c n = p->chars_left; n 2073 arch/powerpc/kvm/book3s_64_mmu_hv.c if (n > len) n 2074 arch/powerpc/kvm/book3s_64_mmu_hv.c n = len; n 2075 arch/powerpc/kvm/book3s_64_mmu_hv.c r = copy_to_user(buf, p->buf + p->buf_index, n); n 2076 arch/powerpc/kvm/book3s_64_mmu_hv.c n -= r; n 2077 arch/powerpc/kvm/book3s_64_mmu_hv.c p->chars_left -= n; n 2078 arch/powerpc/kvm/book3s_64_mmu_hv.c p->buf_index += n; n 2079 arch/powerpc/kvm/book3s_64_mmu_hv.c buf += n; n 2080 arch/powerpc/kvm/book3s_64_mmu_hv.c len -= n; n 2081 arch/powerpc/kvm/book3s_64_mmu_hv.c ret = n; n 2083 arch/powerpc/kvm/book3s_64_mmu_hv.c if (!n) n 2109 arch/powerpc/kvm/book3s_64_mmu_hv.c n = scnprintf(p->buf, sizeof(p->buf), n 2112 arch/powerpc/kvm/book3s_64_mmu_hv.c p->chars_left = n; n 2113 arch/powerpc/kvm/book3s_64_mmu_hv.c if (n > len) n 2114 arch/powerpc/kvm/book3s_64_mmu_hv.c n = len; n 2115 arch/powerpc/kvm/book3s_64_mmu_hv.c r = copy_to_user(buf, p->buf, n); n 2116 arch/powerpc/kvm/book3s_64_mmu_hv.c n -= r; n 2117 arch/powerpc/kvm/book3s_64_mmu_hv.c p->chars_left -= n; n 2118 arch/powerpc/kvm/book3s_64_mmu_hv.c p->buf_index = n; n 2119 arch/powerpc/kvm/book3s_64_mmu_hv.c buf += n; n 2120 arch/powerpc/kvm/book3s_64_mmu_hv.c len -= n; n 2121 arch/powerpc/kvm/book3s_64_mmu_hv.c ret += n; n 32 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long n) n 35 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long quadrant, ret = n; n 41 arch/powerpc/kvm/book3s_64_mmu_radix.c __pa(to), __pa(from), n); n 66 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = raw_copy_from_user(to, from, n); n 68 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = raw_copy_to_user(to, from, n); n 85 arch/powerpc/kvm/book3s_64_mmu_radix.c void *to, void *from, unsigned long n) n 104 arch/powerpc/kvm/book3s_64_mmu_radix.c return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n); n 108 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long n) n 112 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n); n 114 arch/powerpc/kvm/book3s_64_mmu_radix.c memset(to + (n - ret), 0, ret); n 121 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long n) n 123 arch/powerpc/kvm/book3s_64_mmu_radix.c return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n); n 1080 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long n; n 1087 arch/powerpc/kvm/book3s_64_mmu_radix.c for (n = memslot->npages; n; --n) { n 1194 arch/powerpc/kvm/book3s_64_mmu_radix.c unsigned long n; n 1215 arch/powerpc/kvm/book3s_64_mmu_radix.c n = p->chars_left; n 1216 arch/powerpc/kvm/book3s_64_mmu_radix.c if (n > len) n 1217 arch/powerpc/kvm/book3s_64_mmu_radix.c n = len; n 1218 arch/powerpc/kvm/book3s_64_mmu_radix.c r = copy_to_user(buf, p->buf + p->buf_index, n); n 1219 arch/powerpc/kvm/book3s_64_mmu_radix.c n -= r; n 1220 arch/powerpc/kvm/book3s_64_mmu_radix.c p->chars_left -= n; n 1221 arch/powerpc/kvm/book3s_64_mmu_radix.c p->buf_index += n; n 1222 arch/powerpc/kvm/book3s_64_mmu_radix.c buf += n; n 1223 arch/powerpc/kvm/book3s_64_mmu_radix.c len -= n; n 1224 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = n; n 1226 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!n) n 1260 arch/powerpc/kvm/book3s_64_mmu_radix.c n = 0; n 1263 arch/powerpc/kvm/book3s_64_mmu_radix.c n = scnprintf(p->buf, sizeof(p->buf), n 1265 arch/powerpc/kvm/book3s_64_mmu_radix.c n += scnprintf(p->buf + n, sizeof(p->buf) - n, n 1310 arch/powerpc/kvm/book3s_64_mmu_radix.c n = scnprintf(p->buf, sizeof(p->buf), n 1314 arch/powerpc/kvm/book3s_64_mmu_radix.c p->chars_left = n; n 1315 arch/powerpc/kvm/book3s_64_mmu_radix.c if (n > len) n 1316 arch/powerpc/kvm/book3s_64_mmu_radix.c n = len; n 1317 arch/powerpc/kvm/book3s_64_mmu_radix.c r = copy_to_user(buf, p->buf, n); n 1318 arch/powerpc/kvm/book3s_64_mmu_radix.c n -= r; n 1319 arch/powerpc/kvm/book3s_64_mmu_radix.c p->chars_left -= n; n 1320 arch/powerpc/kvm/book3s_64_mmu_radix.c p->buf_index = n; n 1321 arch/powerpc/kvm/book3s_64_mmu_radix.c buf += n; n 1322 arch/powerpc/kvm/book3s_64_mmu_radix.c len -= n; n 1323 arch/powerpc/kvm/book3s_64_mmu_radix.c ret += n; n 2166 arch/powerpc/kvm/book3s_hv.c ssize_t n; n 2212 arch/powerpc/kvm/book3s_hv.c n = copy_to_user(buf, p->buf + pos, len); n 2213 arch/powerpc/kvm/book3s_hv.c if (n) { n 2214 arch/powerpc/kvm/book3s_hv.c if (n == len) n 2216 arch/powerpc/kvm/book3s_hv.c len -= n; n 4398 arch/powerpc/kvm/book3s_hv.c unsigned long n; n 4418 arch/powerpc/kvm/book3s_hv.c n = kvm_dirty_bitmap_bytes(memslot); n 4419 arch/powerpc/kvm/book3s_hv.c buf = memslot->dirty_bitmap + n / sizeof(long); n 4420 arch/powerpc/kvm/book3s_hv.c memset(buf, 0, n); n 4436 arch/powerpc/kvm/book3s_hv.c for (i = 0; i < n / sizeof(long); ++i) n 4449 arch/powerpc/kvm/book3s_hv.c if (copy_to_user(log->dirty_bitmap, buf, n)) n 482 arch/powerpc/kvm/book3s_hv_nested.c unsigned long n = kvmppc_get_gpr(vcpu, 9); n 492 arch/powerpc/kvm/book3s_hv_nested.c buf = kzalloc(n, GFP_KERNEL); n 507 arch/powerpc/kvm/book3s_hv_nested.c eaddr, buf, NULL, n); n 512 arch/powerpc/kvm/book3s_hv_nested.c rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n); n 517 arch/powerpc/kvm/book3s_hv_nested.c rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n); n 523 arch/powerpc/kvm/book3s_hv_nested.c eaddr, NULL, buf, n); n 39 arch/powerpc/kvm/book3s_hv_ras.c unsigned long i, n; n 50 arch/powerpc/kvm/book3s_hv_ras.c n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE); n 51 arch/powerpc/kvm/book3s_hv_ras.c if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) n 55 arch/powerpc/kvm/book3s_hv_ras.c for (i = 0; i < n; ++i) { n 573 arch/powerpc/kvm/book3s_hv_rm_mmu.c long int i, j, k, n, found, indexes[4]; n 584 arch/powerpc/kvm/book3s_hv_rm_mmu.c n = 0; n 606 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (n) n 655 arch/powerpc/kvm/book3s_hv_rm_mmu.c tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index); n 656 arch/powerpc/kvm/book3s_hv_rm_mmu.c indexes[n] = j; n 657 arch/powerpc/kvm/book3s_hv_rm_mmu.c hptes[n] = hp; n 658 arch/powerpc/kvm/book3s_hv_rm_mmu.c revs[n] = rev; n 659 arch/powerpc/kvm/book3s_hv_rm_mmu.c ++n; n 662 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!n) n 666 arch/powerpc/kvm/book3s_hv_rm_mmu.c do_tlbies(kvm, tlbrb, n, global, true); n 669 arch/powerpc/kvm/book3s_hv_rm_mmu.c for (k = 0; k < n; ++k) { n 762 arch/powerpc/kvm/book3s_hv_rm_mmu.c int i, n = 1; n 771 arch/powerpc/kvm/book3s_hv_rm_mmu.c n = 4; n 774 arch/powerpc/kvm/book3s_hv_rm_mmu.c for (i = 0; i < n; ++i, ++pte_index) { n 1871 arch/powerpc/kvm/book3s_pr.c unsigned long n; n 1887 arch/powerpc/kvm/book3s_pr.c kvm_for_each_vcpu(n, vcpu, kvm) n 1890 arch/powerpc/kvm/book3s_pr.c n = kvm_dirty_bitmap_bytes(memslot); n 1891 arch/powerpc/kvm/book3s_pr.c memset(memslot->dirty_bitmap, 0, n); n 2018 arch/powerpc/kvm/booke.c int n, b = 0, w = 0; n 2062 arch/powerpc/kvm/booke.c for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) { n 2063 arch/powerpc/kvm/booke.c uint64_t addr = dbg->arch.bp[n].addr; n 2064 arch/powerpc/kvm/booke.c uint32_t type = dbg->arch.bp[n].type; n 1152 arch/powerpc/lib/sstep.c #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) n 59 arch/powerpc/lib/test_emulate_step.c #define IGNORE_GPR(n) (0x1UL << (n)) n 85 arch/powerpc/mm/book3s32/mmu.c int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; n 87 arch/powerpc/mm/book3s32/mmu.c for (b = 0; b < n; b++) { n 104 arch/powerpc/mm/book3s64/iommu_api.c unsigned long n = min(entries - entry, chunk); n 106 arch/powerpc/mm/book3s64/iommu_api.c ret = get_user_pages(ua + (entry << PAGE_SHIFT), n, n 109 arch/powerpc/mm/book3s64/iommu_api.c if (ret == n) { n 110 arch/powerpc/mm/book3s64/iommu_api.c pinned += n; n 211 arch/powerpc/mm/book3s64/slb.c int i, n; n 251 arch/powerpc/mm/book3s64/slb.c n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); n 252 arch/powerpc/mm/book3s64/slb.c for (i = 0; i < n; i++) n 255 arch/powerpc/mm/book3s64/slb.c for (i = n; i < SLB_CACHE_ENTRIES; i++) n 302 arch/powerpc/mm/nohash/fsl_booke.c int n; n 306 arch/powerpc/mm/nohash/fsl_booke.c n = switch_to_as1(); n 315 arch/powerpc/mm/nohash/fsl_booke.c restore_to_as0(n, offset, __va(dt_ptr), 1); n 370 arch/powerpc/mm/numa.c static unsigned long read_n_cells(int n, const __be32 **buf) n 374 arch/powerpc/mm/numa.c while (n--) { n 1050 arch/powerpc/oprofile/op_model_cell.c static int calculate_lfsr(int n) n 1058 arch/powerpc/oprofile/op_model_cell.c if ((n >> 16) == 0) n 1060 arch/powerpc/oprofile/op_model_cell.c else if (((n - V2_16) >> 19) == 0) n 1061 arch/powerpc/oprofile/op_model_cell.c index = ((n - V2_16) >> 12) + 1; n 1062 arch/powerpc/oprofile/op_model_cell.c else if (((n - V2_16 - V2_19) >> 22) == 0) n 1063 arch/powerpc/oprofile/op_model_cell.c index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128; n 1064 arch/powerpc/oprofile/op_model_cell.c else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0) n 1065 arch/powerpc/oprofile/op_model_cell.c index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256; n 992 arch/powerpc/perf/core-book3s.c int i, n, first; n 1003 arch/powerpc/perf/core-book3s.c n = n_prev + n_new; n 1004 arch/powerpc/perf/core-book3s.c if (n <= 1) n 1008 arch/powerpc/perf/core-book3s.c for (i = 0; i < n; ++i) { n 1027 arch/powerpc/perf/core-book3s.c for (i = 0; i < n; ++i) n 1440 arch/powerpc/perf/core-book3s.c int n = 0; n 1444 arch/powerpc/perf/core-book3s.c if (n >= max_count) n 1446 arch/powerpc/perf/core-book3s.c ctrs[n] = group; n 1447 arch/powerpc/perf/core-book3s.c flags[n] = group->hw.event_base; n 1448 arch/powerpc/perf/core-book3s.c events[n++] = group->hw.config; n 1453 arch/powerpc/perf/core-book3s.c if (n >= max_count) n 1455 arch/powerpc/perf/core-book3s.c ctrs[n] = event; n 1456 arch/powerpc/perf/core-book3s.c flags[n] = event->hw.event_base; n 1457 arch/powerpc/perf/core-book3s.c events[n++] = event->hw.config; n 1460 arch/powerpc/perf/core-book3s.c return n; n 1699 arch/powerpc/perf/core-book3s.c long i, n; n 1712 arch/powerpc/perf/core-book3s.c n = cpuhw->n_events; n 1713 arch/powerpc/perf/core-book3s.c if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) n 1715 arch/powerpc/perf/core-book3s.c i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); n 1719 arch/powerpc/perf/core-book3s.c for (i = cpuhw->n_txn_start; i < n; ++i) n 1737 arch/powerpc/perf/core-book3s.c int n; n 1757 arch/powerpc/perf/core-book3s.c n = ppmu->get_alternatives(ev, flags, alt); n 1759 arch/powerpc/perf/core-book3s.c return n > 0; n 1770 arch/powerpc/perf/core-book3s.c int n; n 1773 arch/powerpc/perf/core-book3s.c n = ppmu->get_alternatives(ev, flags, alt); n 1774 arch/powerpc/perf/core-book3s.c if (!n) n 1846 arch/powerpc/perf/core-book3s.c int n; n 1938 arch/powerpc/perf/core-book3s.c n = 0; n 1940 arch/powerpc/perf/core-book3s.c n = collect_events(event->group_leader, ppmu->n_counter - 1, n 1942 arch/powerpc/perf/core-book3s.c if (n < 0) n 1945 arch/powerpc/perf/core-book3s.c events[n] = ev; n 1946 arch/powerpc/perf/core-book3s.c ctrs[n] = event; n 1947 arch/powerpc/perf/core-book3s.c cflags[n] = flags; n 1948 arch/powerpc/perf/core-book3s.c if (check_excludes(ctrs, cflags, n, 1)) n 1952 arch/powerpc/perf/core-book3s.c err = power_check_constraints(cpuhw, events, cflags, n + 1); n 1969 arch/powerpc/perf/core-book3s.c event->hw.config = events[n]; n 1970 arch/powerpc/perf/core-book3s.c event->hw.event_base = cflags[n]; n 267 arch/powerpc/perf/core-fsl-emb.c int n = 0; n 271 arch/powerpc/perf/core-fsl-emb.c if (n >= max_count) n 273 arch/powerpc/perf/core-fsl-emb.c ctrs[n] = group; n 274 arch/powerpc/perf/core-fsl-emb.c n++; n 279 arch/powerpc/perf/core-fsl-emb.c if (n >= max_count) n 281 arch/powerpc/perf/core-fsl-emb.c ctrs[n] = event; n 282 arch/powerpc/perf/core-fsl-emb.c n++; n 285 arch/powerpc/perf/core-fsl-emb.c return n; n 486 arch/powerpc/perf/core-fsl-emb.c int n; n 528 arch/powerpc/perf/core-fsl-emb.c n = 0; n 530 arch/powerpc/perf/core-fsl-emb.c n = collect_events(event->group_leader, n 532 arch/powerpc/perf/core-fsl-emb.c if (n < 0) n 538 arch/powerpc/perf/core-fsl-emb.c for (i = 0; i < n; i++) { n 36 arch/powerpc/perf/hv-24x7.c #define DOMAIN(n, v, x, c) \ n 37 arch/powerpc/perf/hv-24x7.c case HV_PERF_DOMAIN_##n: \ n 50 arch/powerpc/perf/hv-24x7.c #define DOMAIN(n, v, x, c) \ n 51 arch/powerpc/perf/hv-24x7.c case HV_PERF_DOMAIN_##n: \ n 419 arch/powerpc/perf/hv-24x7.c char *n; n 427 arch/powerpc/perf/hv-24x7.c n = kasprintf(GFP_KERNEL, "%.*s", name_max, name); n 429 arch/powerpc/perf/hv-24x7.c n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name, n 431 arch/powerpc/perf/hv-24x7.c if (!n) n 434 arch/powerpc/perf/hv-24x7.c a = device_str_attr_create_(n, s); n 440 arch/powerpc/perf/hv-24x7.c kfree(n); n 612 arch/powerpc/perf/hv-24x7.c struct event_uniq *pos, *n; n 614 arch/powerpc/perf/hv-24x7.c rbtree_postorder_for_each_entry_safe(pos, n, root, node) n 988 arch/powerpc/perf/hv-24x7.c int d, n, count = 0; n 996 arch/powerpc/perf/hv-24x7.c n = sprintf(page, "%d: %s\n", d, str); n 997 arch/powerpc/perf/hv-24x7.c if (n < 0) n 1000 arch/powerpc/perf/hv-24x7.c count += n; n 1001 arch/powerpc/perf/hv-24x7.c page += n; n 68 arch/powerpc/perf/power5+-pmu.c #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) n 68 arch/powerpc/perf/power5-pmu.c #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) n 36 arch/powerpc/perf/power6-pmu.c #define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4) n 38 arch/powerpc/perf/power6-pmu.c #define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK) n 46 arch/powerpc/perf/power6-pmu.c #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) n 46 arch/powerpc/perf/power7-pmu.c #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) n 144 arch/powerpc/platforms/4xx/cpm.c const char *buf, size_t n) n 150 arch/powerpc/platforms/4xx/cpm.c p = memchr(buf, '\n', n); n 151 arch/powerpc/platforms/4xx/cpm.c len = p ? p - buf : n; n 156 arch/powerpc/platforms/4xx/cpm.c return n; n 21 arch/powerpc/platforms/85xx/socrates_fpga_pic.c #define FPGA_PIC_IRQMASK(n) (0x4 + 0x4 * (n)) n 60 arch/powerpc/platforms/86xx/mpc86xx_smp.c int n = 0; n 81 arch/powerpc/platforms/86xx/mpc86xx_smp.c while ((__secondary_hold_acknowledge != nr) && (n++, n < 1000)) n 89 arch/powerpc/platforms/86xx/mpc86xx_smp.c pr_debug("wait CPU #%d for %d msecs.\n", nr, n); n 83 arch/powerpc/platforms/8xx/tqm8xx_setup.c static void __init init_pins(int n, struct cpm_pin *pin) n 87 arch/powerpc/platforms/8xx/tqm8xx_setup.c for (i = 0; i < n; i++) { n 134 arch/powerpc/platforms/cell/iommu.c long n; n 140 arch/powerpc/platforms/cell/iommu.c n = min(n_ptes, 1l << 11); n 141 arch/powerpc/platforms/cell/iommu.c val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) n 149 arch/powerpc/platforms/cell/iommu.c n_ptes -= n; n 150 arch/powerpc/platforms/cell/iommu.c pte += n; n 63 arch/powerpc/platforms/cell/spider-pci.c unsigned long n) n 65 arch/powerpc/platforms/cell/spider-pci.c __do_memcpy_fromio(dest, src, n); n 103 arch/powerpc/platforms/cell/spu_manage.c struct device_node *n, n 112 arch/powerpc/platforms/cell/spu_manage.c prop = of_get_property(n, name, &proplen); n 267 arch/powerpc/platforms/cell/spu_manage.c unsigned int n = 0; n 278 arch/powerpc/platforms/cell/spu_manage.c n++; n 280 arch/powerpc/platforms/cell/spu_manage.c return ret ? ret : n; n 25 arch/powerpc/platforms/cell/spu_notify.c int spu_switch_event_register(struct notifier_block *n) n 28 arch/powerpc/platforms/cell/spu_notify.c ret = blocking_notifier_chain_register(&spu_switch_notifier, n); n 35 arch/powerpc/platforms/cell/spu_notify.c int spu_switch_event_unregister(struct notifier_block *n) n 37 arch/powerpc/platforms/cell/spu_notify.c return blocking_notifier_chain_unregister(&spu_switch_notifier, n); n 86 arch/powerpc/platforms/cell/spufs/coredump.c int n = iterate_fd(current->files, *fd, match_context, NULL); n 87 arch/powerpc/platforms/cell/spufs/coredump.c if (!n) n 89 arch/powerpc/platforms/cell/spufs/coredump.c *fd = n - 1; n 2361 arch/powerpc/platforms/cell/spufs/file.c static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) n 2367 arch/powerpc/platforms/cell/spufs/file.c return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n", n 296 arch/powerpc/platforms/cell/spufs/sched.c int node, n; n 303 arch/powerpc/platforms/cell/spufs/sched.c for (n = 0; n < MAX_NUMNODES; n++, node++) { n 562 arch/powerpc/platforms/cell/spufs/sched.c int node, n; n 586 arch/powerpc/platforms/cell/spufs/sched.c for (n = 0; n < MAX_NUMNODES; n++, node++) { n 621 arch/powerpc/platforms/cell/spufs/sched.c int node, n; n 634 arch/powerpc/platforms/cell/spufs/sched.c for (n = 0; n < MAX_NUMNODES; n++, node++) { n 206 arch/powerpc/platforms/maple/setup.c int naddr, n, i, opplen, has_isus = 0; n 263 arch/powerpc/platforms/maple/setup.c for (n = 0, i = naddr; i < opplen; i += naddr, n++) { n 265 arch/powerpc/platforms/maple/setup.c mpic_assign_isu(mpic, n, isuaddr); n 63 arch/powerpc/platforms/powernv/pci-ioda-tce.c int n = (idx & mask) >> (level * shift); n 64 arch/powerpc/platforms/powernv/pci-ioda-tce.c unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n])); n 78 arch/powerpc/platforms/powernv/pci-ioda-tce.c oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0, n 267 arch/powerpc/platforms/powernv/vas.h #define VREG_SFX(n, s) __stringify(n), VAS_##n##s n 738 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c, *n; n 741 arch/powerpc/platforms/ps3/mm.c list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { n 24 arch/powerpc/platforms/ps3/repository.c static void _dump_field(const char *hdr, u64 n, const char *func, int line) n 28 arch/powerpc/platforms/ps3/repository.c const char *const in = (const char *)&n; n 35 arch/powerpc/platforms/ps3/repository.c pr_devel("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s); n 76 arch/powerpc/platforms/ps3/repository.c u64 n; n 78 arch/powerpc/platforms/ps3/repository.c strncpy((char *)&n, text, 8); n 79 arch/powerpc/platforms/ps3/repository.c return PS3_VENDOR_ID_NONE + (n >> 32) + index; n 92 arch/powerpc/platforms/ps3/repository.c u64 n = 0; n 94 arch/powerpc/platforms/ps3/repository.c memcpy((char *)&n, text, strnlen(text, sizeof(n))); n 95 arch/powerpc/platforms/ps3/repository.c return n + index; n 30 arch/powerpc/platforms/pseries/power.c const char *buf, size_t n) n 38 arch/powerpc/platforms/pseries/power.c return n; n 196 arch/powerpc/sysdev/fsl_pci.c int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4; n 255 arch/powerpc/sysdev/fsl_pci.c n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset); n 257 arch/powerpc/sysdev/fsl_pci.c if (n < 0 || j >= 5) { n 261 arch/powerpc/sysdev/fsl_pci.c j += n; n 493 arch/powerpc/sysdev/mpic.c int i, irq, n; n 512 arch/powerpc/sysdev/mpic.c n = (readl(base + 4) >> 16) & 0xff; n 516 arch/powerpc/sysdev/mpic.c devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); n 518 arch/powerpc/sysdev/mpic.c for (i = 0; i <= n; i++) { n 171 arch/powerpc/xmon/nonstdio.c int rc, n; n 174 arch/powerpc/xmon/nonstdio.c n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args); n 177 arch/powerpc/xmon/nonstdio.c rc = xmon_write(xmon_outbuf, n); n 179 arch/powerpc/xmon/nonstdio.c if (n && rc == 0) { n 1726 arch/powerpc/xmon/xmon.c int n, trap; n 1749 arch/powerpc/xmon/xmon.c for (n = 0; n < 16; ++n) n 1751 arch/powerpc/xmon/xmon.c n, fp->gpr[n], n+16, fp->gpr[n+16]); n 1753 arch/powerpc/xmon/xmon.c for (n = 0; n < 7; ++n) n 1755 arch/powerpc/xmon/xmon.c n, fp->gpr[n], n+7, fp->gpr[n+7]); n 1758 arch/powerpc/xmon/xmon.c for (n = 0; n < 32; ++n) { n 1759 arch/powerpc/xmon/xmon.c printf("R%.2d = %.8lx%s", n, fp->gpr[n], n 1760 arch/powerpc/xmon/xmon.c (n & 3) == 3? "\n": " "); n 1761 arch/powerpc/xmon/xmon.c if (n == 12 && !FULL_REGS(fp)) { n 1819 arch/powerpc/xmon/xmon.c read_spr(int n, unsigned long *vp) n 1828 arch/powerpc/xmon/xmon.c ret = xmon_mfspr(n, *vp); n 1840 arch/powerpc/xmon/xmon.c write_spr(int n, unsigned long val) n 1851 arch/powerpc/xmon/xmon.c xmon_mtspr(n, val); n 1855 arch/powerpc/xmon/xmon.c printf("SPR 0x%03x (%4d) Faulted during write\n", n, n); n 2044 arch/powerpc/xmon/xmon.c volatile int n; n 2047 arch/powerpc/xmon/xmon.c n = 0; n 2064 arch/powerpc/xmon/xmon.c for( ; n < size; ++n) { n 2072 arch/powerpc/xmon/xmon.c n = size; n 2075 arch/powerpc/xmon/xmon.c return n; n 2081 arch/powerpc/xmon/xmon.c volatile int n; n 2084 arch/powerpc/xmon/xmon.c n = 0; n 2088 arch/powerpc/xmon/xmon.c return n; n 2107 arch/powerpc/xmon/xmon.c for ( ; n < size; ++n) { n 2115 arch/powerpc/xmon/xmon.c n = size; n 2117 arch/powerpc/xmon/xmon.c printf("*** Error writing address "REG"\n", adrs + n); n 2120 arch/powerpc/xmon/xmon.c return n; n 2215 arch/powerpc/xmon/xmon.c unsigned long n; n 2244 arch/powerpc/xmon/xmon.c n = mread(adrs, val, size); n 2250 arch/powerpc/xmon/xmon.c for (i = 0; i < n; ++i) n 2259 arch/powerpc/xmon/xmon.c if( scanhex(&n) ){ n 2261 arch/powerpc/xmon/xmon.c val[i] = n >> (i * 8); n 2274 arch/powerpc/xmon/xmon.c n = inchar(); n 2275 arch/powerpc/xmon/xmon.c if( n == '\\' ) n 2276 arch/powerpc/xmon/xmon.c n = bsesc(); n 2277 arch/powerpc/xmon/xmon.c else if( n == '\'' ) n 2280 arch/powerpc/xmon/xmon.c val[i] = n >> (i * 8); n 2343 arch/powerpc/xmon/xmon.c n = size; n 2344 arch/powerpc/xmon/xmon.c scanhex(&n); n 2345 arch/powerpc/xmon/xmon.c adrs -= n; n 2348 arch/powerpc/xmon/xmon.c n = size; n 2349 arch/powerpc/xmon/xmon.c scanhex(&n); n 2350 arch/powerpc/xmon/xmon.c adrs += n; n 2378 arch/powerpc/xmon/xmon.c long n, m, r, nr; n 2381 arch/powerpc/xmon/xmon.c for (n = ndump; n > 0;) { n 2382 arch/powerpc/xmon/xmon.c r = n < 16? n: 16; n 2391 arch/powerpc/xmon/xmon.c n -= r; n 2797 arch/powerpc/xmon/xmon.c long n, m, c, r, nr; n 2800 arch/powerpc/xmon/xmon.c for (n = ndump; n > 0;) { n 2803 arch/powerpc/xmon/xmon.c r = n < 16? n: 16; n 2827 arch/powerpc/xmon/xmon.c n -= r; n 3002 arch/powerpc/xmon/xmon.c unsigned n, prt; n 3005 arch/powerpc/xmon/xmon.c for( n = nb; n > 0; --n ) n 3020 arch/powerpc/xmon/xmon.c unsigned a, n; n 3036 arch/powerpc/xmon/xmon.c n = 0; n 3041 arch/powerpc/xmon/xmon.c if (++n >= 10) n 249 arch/riscv/include/asm/atomic.h c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ n 251 arch/riscv/include/asm/atomic.h return __xchg_relaxed(&(v->counter), n, size); \ n 254 arch/riscv/include/asm/atomic.h c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ n 256 arch/riscv/include/asm/atomic.h return __xchg_acquire(&(v->counter), n, size); \ n 259 arch/riscv/include/asm/atomic.h c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ n 261 arch/riscv/include/asm/atomic.h return __xchg_release(&(v->counter), n, size); \ n 264 arch/riscv/include/asm/atomic.h c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ n 266 arch/riscv/include/asm/atomic.h return __xchg(&(v->counter), n, size); \ n 270 arch/riscv/include/asm/atomic.h c_t o, c_t n) \ n 272 arch/riscv/include/asm/atomic.h return __cmpxchg_relaxed(&(v->counter), o, n, size); \ n 276 arch/riscv/include/asm/atomic.h c_t o, c_t n) \ n 278 arch/riscv/include/asm/atomic.h return __cmpxchg_acquire(&(v->counter), o, n, size); \ n 282 arch/riscv/include/asm/atomic.h c_t o, c_t n) \ n 284 arch/riscv/include/asm/atomic.h return __cmpxchg_release(&(v->counter), o, n, size); \ n 287 arch/riscv/include/asm/atomic.h c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ n 289 arch/riscv/include/asm/atomic.h return __cmpxchg(&(v->counter), o, n, size); \ n 202 arch/riscv/include/asm/cmpxchg.h #define cmpxchg_relaxed(ptr, o, n) \ n 205 arch/riscv/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 248 arch/riscv/include/asm/cmpxchg.h #define cmpxchg_acquire(ptr, o, n) \ n 251 arch/riscv/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 294 arch/riscv/include/asm/cmpxchg.h #define cmpxchg_release(ptr, o, n) \ n 297 arch/riscv/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 340 arch/riscv/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 343 arch/riscv/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 348 arch/riscv/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 349 arch/riscv/include/asm/cmpxchg.h (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) n 351 arch/riscv/include/asm/cmpxchg.h #define cmpxchg32(ptr, o, n) \ n 354 arch/riscv/include/asm/cmpxchg.h cmpxchg((ptr), (o), (n)); \ n 357 arch/riscv/include/asm/cmpxchg.h #define cmpxchg32_local(ptr, o, n) \ n 360 arch/riscv/include/asm/cmpxchg.h cmpxchg_relaxed((ptr), (o), (n)) \ n 363 arch/riscv/include/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) \ n 366 arch/riscv/include/asm/cmpxchg.h cmpxchg((ptr), (o), (n)); \ n 369 arch/riscv/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) \ n 372 arch/riscv/include/asm/cmpxchg.h cmpxchg_relaxed((ptr), (o), (n)); \ n 371 arch/riscv/include/asm/uaccess.h const void *from, unsigned long n); n 373 arch/riscv/include/asm/uaccess.h const void __user *from, unsigned long n); n 376 arch/riscv/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 378 arch/riscv/include/asm/uaccess.h return __asm_copy_from_user(to, from, n); n 382 arch/riscv/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 384 arch/riscv/include/asm/uaccess.h return __asm_copy_to_user(to, from, n); n 390 arch/riscv/include/asm/uaccess.h extern long __must_check strnlen_user(const char __user *str, long n); n 393 arch/riscv/include/asm/uaccess.h unsigned long __must_check __clear_user(void __user *addr, unsigned long n); n 396 arch/riscv/include/asm/uaccess.h unsigned long __must_check clear_user(void __user *to, unsigned long n) n 399 arch/riscv/include/asm/uaccess.h return access_ok(to, n) ? n 400 arch/riscv/include/asm/uaccess.h __clear_user(to, n) : n; n 98 arch/riscv/kernel/ptrace.c .n = ELF_NGREG, n 107 arch/riscv/kernel/ptrace.c .n = ELF_NFPREG, n 120 arch/riscv/kernel/ptrace.c .n = ARRAY_SIZE(riscv_user_regset), n 84 arch/riscv/lib/delay.c u64 n; n 87 arch/riscv/lib/delay.c n = (u64)usecs * riscv_timebase; n 88 arch/riscv/lib/delay.c do_div(n, 1000000); n 90 arch/riscv/lib/delay.c __delay(n); n 25 arch/s390/boot/compressed/decompressor.c #define memzero(s, n) memset((s), 0, (n)) n 35 arch/s390/boot/mem_detect.c static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n) n 37 arch/s390/boot/mem_detect.c if (n < MEM_INLINED_ENTRIES) n 38 arch/s390/boot/mem_detect.c return &mem_detect.entries[n]; n 41 arch/s390/boot/mem_detect.c return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES]; n 262 arch/s390/crypto/aes_s390.c unsigned int nbytes, n; n 268 arch/s390/crypto/aes_s390.c n = nbytes & ~(AES_BLOCK_SIZE - 1); n 270 arch/s390/crypto/aes_s390.c walk->dst.virt.addr, walk->src.virt.addr, n); n 271 arch/s390/crypto/aes_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 377 arch/s390/crypto/aes_s390.c unsigned int nbytes, n; n 389 arch/s390/crypto/aes_s390.c n = nbytes & ~(AES_BLOCK_SIZE - 1); n 391 arch/s390/crypto/aes_s390.c walk->dst.virt.addr, walk->src.virt.addr, n); n 392 arch/s390/crypto/aes_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 546 arch/s390/crypto/aes_s390.c unsigned int offset, nbytes, n; n 574 arch/s390/crypto/aes_s390.c n = nbytes & ~(AES_BLOCK_SIZE - 1); n 576 arch/s390/crypto/aes_s390.c walk->dst.virt.addr, walk->src.virt.addr, n); n 577 arch/s390/crypto/aes_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 686 arch/s390/crypto/aes_s390.c unsigned int i, n; n 690 arch/s390/crypto/aes_s390.c n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); n 691 arch/s390/crypto/aes_s390.c for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { n 696 arch/s390/crypto/aes_s390.c return n; n 704 arch/s390/crypto/aes_s390.c unsigned int n, nbytes; n 711 arch/s390/crypto/aes_s390.c n = AES_BLOCK_SIZE; n 713 arch/s390/crypto/aes_s390.c n = __ctrblk_init(ctrblk, walk->iv, nbytes); n 714 arch/s390/crypto/aes_s390.c ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; n 717 arch/s390/crypto/aes_s390.c n, ctrptr); n 719 arch/s390/crypto/aes_s390.c memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE, n 722 arch/s390/crypto/aes_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 872 arch/s390/crypto/aes_s390.c int n; n 899 arch/s390/crypto/aes_s390.c n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); n 900 arch/s390/crypto/aes_s390.c memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); n 901 arch/s390/crypto/aes_s390.c gw->buf_bytes += n; n 902 arch/s390/crypto/aes_s390.c _gcm_sg_unmap_and_advance(gw, n); n 955 arch/s390/crypto/aes_s390.c int n = gw->buf_bytes - bytesdone; n 956 arch/s390/crypto/aes_s390.c if (n > 0) { n 957 arch/s390/crypto/aes_s390.c memmove(gw->buf, gw->buf + bytesdone, n); n 958 arch/s390/crypto/aes_s390.c gw->buf_bytes = n; n 969 arch/s390/crypto/aes_s390.c int i, n; n 975 arch/s390/crypto/aes_s390.c for (i = 0; i < bytesdone; i += n) { n 978 arch/s390/crypto/aes_s390.c n = min(gw->walk_bytes, bytesdone - i); n 979 arch/s390/crypto/aes_s390.c memcpy(gw->walk_ptr, gw->buf + i, n); n 980 arch/s390/crypto/aes_s390.c _gcm_sg_unmap_and_advance(gw, n); n 998 arch/s390/crypto/aes_s390.c unsigned int n, len, in_bytes, out_bytes, n 1069 arch/s390/crypto/aes_s390.c n = aad_bytes + pc_bytes; n 1070 arch/s390/crypto/aes_s390.c if (gcm_in_walk_done(&gw_in, n) != n) n 1072 arch/s390/crypto/aes_s390.c if (gcm_out_walk_done(&gw_out, n) != n) n 86 arch/s390/crypto/des_s390.c unsigned int nbytes, n; n 92 arch/s390/crypto/des_s390.c n = nbytes & ~(DES_BLOCK_SIZE - 1); n 94 arch/s390/crypto/des_s390.c walk->src.virt.addr, n); n 95 arch/s390/crypto/des_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 104 arch/s390/crypto/des_s390.c unsigned int nbytes, n; n 116 arch/s390/crypto/des_s390.c n = nbytes & ~(DES_BLOCK_SIZE - 1); n 118 arch/s390/crypto/des_s390.c walk->src.virt.addr, n); n 119 arch/s390/crypto/des_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 354 arch/s390/crypto/des_s390.c unsigned int i, n; n 357 arch/s390/crypto/des_s390.c n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); n 359 arch/s390/crypto/des_s390.c for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) { n 364 arch/s390/crypto/des_s390.c return n; n 372 arch/s390/crypto/des_s390.c unsigned int n, nbytes; n 379 arch/s390/crypto/des_s390.c n = DES_BLOCK_SIZE; n 381 arch/s390/crypto/des_s390.c n = __ctrblk_init(ctrblk, walk->iv, nbytes); n 382 arch/s390/crypto/des_s390.c ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv; n 384 arch/s390/crypto/des_s390.c walk->src.virt.addr, n, ctrptr); n 386 arch/s390/crypto/des_s390.c memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE, n 389 arch/s390/crypto/des_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 60 arch/s390/crypto/ghash_s390.c unsigned int n; n 66 arch/s390/crypto/ghash_s390.c n = min(srclen, dctx->bytes); n 67 arch/s390/crypto/ghash_s390.c dctx->bytes -= n; n 68 arch/s390/crypto/ghash_s390.c srclen -= n; n 70 arch/s390/crypto/ghash_s390.c memcpy(pos, src, n); n 71 arch/s390/crypto/ghash_s390.c src += n; n 79 arch/s390/crypto/ghash_s390.c n = srclen & ~(GHASH_BLOCK_SIZE - 1); n 80 arch/s390/crypto/ghash_s390.c if (n) { n 81 arch/s390/crypto/ghash_s390.c cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n); n 82 arch/s390/crypto/ghash_s390.c src += n; n 83 arch/s390/crypto/ghash_s390.c srclen -= n; n 165 arch/s390/crypto/paes_s390.c unsigned int nbytes, n, k; n 171 arch/s390/crypto/paes_s390.c n = nbytes & ~(AES_BLOCK_SIZE - 1); n 173 arch/s390/crypto/paes_s390.c walk->dst.virt.addr, walk->src.virt.addr, n); n 176 arch/s390/crypto/paes_s390.c if (k < n) { n 283 arch/s390/crypto/paes_s390.c unsigned int nbytes, n, k; n 295 arch/s390/crypto/paes_s390.c n = nbytes & ~(AES_BLOCK_SIZE - 1); n 297 arch/s390/crypto/paes_s390.c walk->dst.virt.addr, walk->src.virt.addr, n); n 300 arch/s390/crypto/paes_s390.c if (k < n) { n 437 arch/s390/crypto/paes_s390.c unsigned int keylen, offset, nbytes, n, k; n 465 arch/s390/crypto/paes_s390.c n = nbytes & ~(AES_BLOCK_SIZE - 1); n 467 arch/s390/crypto/paes_s390.c walk->dst.virt.addr, walk->src.virt.addr, n); n 470 arch/s390/crypto/paes_s390.c if (k < n) { n 578 arch/s390/crypto/paes_s390.c unsigned int i, n; n 582 arch/s390/crypto/paes_s390.c n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); n 583 arch/s390/crypto/paes_s390.c for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { n 588 arch/s390/crypto/paes_s390.c return n; n 596 arch/s390/crypto/paes_s390.c unsigned int nbytes, n, k; n 603 arch/s390/crypto/paes_s390.c n = AES_BLOCK_SIZE; n 605 arch/s390/crypto/paes_s390.c n = __ctrblk_init(ctrblk, walk->iv, nbytes); n 606 arch/s390/crypto/paes_s390.c ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; n 609 arch/s390/crypto/paes_s390.c n, ctrptr); n 615 arch/s390/crypto/paes_s390.c ret = blkcipher_walk_done(desc, walk, nbytes - n); n 617 arch/s390/crypto/paes_s390.c if (k < n) { n 137 arch/s390/crypto/prng.c int n, ret = 0; n 164 arch/s390/crypto/prng.c for (n = 0; n < 512; n++) { n 165 arch/s390/crypto/prng.c int offset = (PAGE_SIZE / 2) + (n * 4) - 4; n 171 arch/s390/crypto/prng.c n = (nbytes < 64) ? nbytes : 64; n 172 arch/s390/crypto/prng.c memcpy(ebuf, pblock, n); n 173 arch/s390/crypto/prng.c ret += n; n 174 arch/s390/crypto/prng.c ebuf += n; n 175 arch/s390/crypto/prng.c nbytes -= n; n 515 arch/s390/crypto/prng.c int chunk, n, ret = 0; n 546 arch/s390/crypto/prng.c n = (chunk + 7) & -8; n 568 arch/s390/crypto/prng.c prng_data->buf, prng_data->buf, n); n 570 arch/s390/crypto/prng.c prng_data->prngws.byte_counter += n; n 571 arch/s390/crypto/prng.c prng_data->prngws.reseed_counter += n; n 593 arch/s390/crypto/prng.c int n, ret = 0; n 624 arch/s390/crypto/prng.c n = (nbytes < prng_data->rest) ? n 626 arch/s390/crypto/prng.c prng_data->rest -= n; n 630 arch/s390/crypto/prng.c n = prng_sha512_generate(p, prng_chunk_size); n 631 arch/s390/crypto/prng.c if (n < 0) { n 632 arch/s390/crypto/prng.c ret = n; n 636 arch/s390/crypto/prng.c n = nbytes; n 637 arch/s390/crypto/prng.c prng_data->rest = prng_chunk_size - n; n 639 arch/s390/crypto/prng.c n = prng_chunk_size; n 643 arch/s390/crypto/prng.c if (copy_to_user(ubuf, p, n)) { n 647 arch/s390/crypto/prng.c memzero_explicit(p, n); n 648 arch/s390/crypto/prng.c ubuf += n; n 649 arch/s390/crypto/prng.c nbytes -= n; n 650 arch/s390/crypto/prng.c ret += n; n 20 arch/s390/crypto/sha_common.c unsigned int index, n; n 40 arch/s390/crypto/sha_common.c n = (len / bsize) * bsize; n 41 arch/s390/crypto/sha_common.c cpacf_kimd(ctx->func, ctx->state, data, n); n 42 arch/s390/crypto/sha_common.c data += n; n 43 arch/s390/crypto/sha_common.c len -= n; n 77 arch/s390/crypto/sha_common.c unsigned int n; n 80 arch/s390/crypto/sha_common.c n = ctx->count % bsize; n 112 arch/s390/crypto/sha_common.c cpacf_klmd(ctx->func, ctx->state, ctx->buf, n); n 220 arch/s390/include/asm/ccwdev.h #define to_ccwdev(n) container_of(n, struct ccw_device, dev) n 221 arch/s390/include/asm/ccwdev.h #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) n 15 arch/s390/include/asm/cmpxchg.h #define cmpxchg(ptr, o, n) \ n 18 arch/s390/include/asm/cmpxchg.h __typeof__(*(ptr)) __n = (n); \ n 21 arch/s390/include/asm/delay.h #define ndelay(n) __ndelay((unsigned long long) (n)) n 22 arch/s390/include/asm/delay.h #define udelay(n) __udelay((unsigned long long) (n)) n 23 arch/s390/include/asm/delay.h #define mdelay(n) __udelay((unsigned long long) (n) * 1000) n 20 arch/s390/include/asm/ftrace.h #define ftrace_return_address(n) 0UL n 22 arch/s390/include/asm/ftrace.h #define ftrace_return_address(n) __builtin_return_address(n) n 82 arch/s390/include/asm/gmap.h #define gmap_for_each_rmap_safe(pos, n, head) \ n 83 arch/s390/include/asm/gmap.h for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n) n 40 arch/s390/include/asm/mem_detect.h static inline int __get_mem_detect_block(u32 n, unsigned long *start, n 43 arch/s390/include/asm/mem_detect.h if (n >= mem_detect.count) { n 49 arch/s390/include/asm/mem_detect.h if (n < MEM_INLINED_ENTRIES) { n 50 arch/s390/include/asm/mem_detect.h *start = (unsigned long)mem_detect.entries[n].start; n 51 arch/s390/include/asm/mem_detect.h *end = (unsigned long)mem_detect.entries[n].end; n 53 arch/s390/include/asm/mem_detect.h *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start; n 54 arch/s390/include/asm/mem_detect.h *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end; n 141 arch/s390/include/asm/pci_io.h unsigned long n) n 145 arch/s390/include/asm/pci_io.h while (n > 0) { n 147 arch/s390/include/asm/pci_io.h (u64) dst, n, n 154 arch/s390/include/asm/pci_io.h n -= size; n 160 arch/s390/include/asm/pci_io.h const void *src, unsigned long n) n 167 arch/s390/include/asm/pci_io.h while (n > 0) { n 169 arch/s390/include/asm/pci_io.h (u64) src, n, n 179 arch/s390/include/asm/pci_io.h n -= size; n 180 arch/s390/include/asm/ptrace.h unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); n 22 arch/s390/include/asm/string.h void *memcpy(void *dest, const void *src, size_t n); n 23 arch/s390/include/asm/string.h void *memset(void *s, int c, size_t n); n 24 arch/s390/include/asm/string.h void *memmove(void *dest, const void *src, size_t n); n 43 arch/s390/include/asm/string.h int memcmp(const void *s1, const void *s2, size_t n); n 45 arch/s390/include/asm/string.h size_t strlcat(char *dest, const char *src, size_t n); n 47 arch/s390/include/asm/string.h char *strncat(char *dest, const char *src, size_t n); n 48 arch/s390/include/asm/string.h char *strncpy(char *dest, const char *src, size_t n); n 62 arch/s390/include/asm/string.h extern void *__memcpy(void *dest, const void *src, size_t n); n 63 arch/s390/include/asm/string.h extern void *__memset(void *s, int c, size_t n); n 64 arch/s390/include/asm/string.h extern void *__memmove(void *dest, const void *src, size_t n); n 73 arch/s390/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) n 108 arch/s390/include/asm/string.h static inline void *memchr(const void * s, int c, size_t n) n 111 arch/s390/include/asm/string.h const void *ret = s + n; n 125 arch/s390/include/asm/string.h static inline void *memscan(void *s, int c, size_t n) n 128 arch/s390/include/asm/string.h const void *ret = s + n; n 186 arch/s390/include/asm/string.h static inline size_t strnlen(const char * s, size_t n) n 190 arch/s390/include/asm/string.h const char *end = s + n; n 200 arch/s390/include/asm/string.h void *memchr(const void * s, int c, size_t n); n 201 arch/s390/include/asm/string.h void *memscan(void *s, int c, size_t n); n 205 arch/s390/include/asm/string.h size_t strnlen(const char * s, size_t n); n 57 arch/s390/include/asm/syscall.h unsigned int n = 6; n 63 arch/s390/include/asm/syscall.h while (n-- > 0) n 64 arch/s390/include/asm/syscall.h if (n > 0) n 65 arch/s390/include/asm/syscall.h args[n] = regs->gprs[2 + n] & mask; n 74 arch/s390/include/asm/syscall.h unsigned int n = 6; n 76 arch/s390/include/asm/syscall.h while (n-- > 0) n 77 arch/s390/include/asm/syscall.h if (n > 0) n 78 arch/s390/include/asm/syscall.h regs->gprs[2 + n] = args[n]; n 53 arch/s390/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n); n 56 arch/s390/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n); n 244 arch/s390/include/asm/uaccess.h raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); n 261 arch/s390/include/asm/uaccess.h static inline unsigned long strnlen_user(const char __user *src, unsigned long n) n 264 arch/s390/include/asm/uaccess.h return __strnlen_user(src, n); n 272 arch/s390/include/asm/uaccess.h static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) n 275 arch/s390/include/asm/uaccess.h return __clear_user(to, n); n 35 arch/s390/include/uapi/asm/runtime_instr.h __u32 n : 1; n 59 arch/s390/kernel/diag.c unsigned long n = (unsigned long) v - 1; n 63 arch/s390/kernel/diag.c if (n == 0) { n 73 arch/s390/kernel/diag.c } else if (n <= NR_DIAG_STAT) { n 74 arch/s390/kernel/diag.c seq_printf(m, "diag %03x:", diag_map[n-1].code); n 77 arch/s390/kernel/diag.c seq_printf(m, " %10u", stat->counter[n-1]); n 79 arch/s390/kernel/diag.c seq_printf(m, " %s\n", diag_map[n-1].name); n 1607 arch/s390/kernel/ipl.c static void __init strncpy_skip_quote(char *dst, char *src, int n) n 1616 arch/s390/kernel/ipl.c if (dx >= n) n 422 arch/s390/kernel/module.c unsigned long i, n; n 430 arch/s390/kernel/module.c n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); n 432 arch/s390/kernel/module.c for (i = 0; i < n; i++, rela++) { n 154 arch/s390/kernel/processor.c static void show_cpu_mhz(struct seq_file *m, unsigned long n) n 156 arch/s390/kernel/processor.c struct cpu_info *c = per_cpu_ptr(&cpu_info, n); n 167 arch/s390/kernel/processor.c unsigned long n = (unsigned long) v - 1; n 170 arch/s390/kernel/processor.c if (n == first) n 174 arch/s390/kernel/processor.c seq_printf(m, "\ncpu number : %ld\n", n); n 175 arch/s390/kernel/processor.c show_cpu_mhz(m, n); n 1338 arch/s390/kernel/ptrace.c .n = sizeof(s390_regs) / sizeof(long), n 1346 arch/s390/kernel/ptrace.c .n = sizeof(s390_fp_regs) / sizeof(long), n 1354 arch/s390/kernel/ptrace.c .n = 1, n 1362 arch/s390/kernel/ptrace.c .n = 1, n 1370 arch/s390/kernel/ptrace.c .n = 1, n 1378 arch/s390/kernel/ptrace.c .n = __NUM_VXRS_LOW, n 1386 arch/s390/kernel/ptrace.c .n = __NUM_VXRS_HIGH, n 1394 arch/s390/kernel/ptrace.c .n = sizeof(struct gs_cb) / sizeof(__u64), n 1402 arch/s390/kernel/ptrace.c .n = sizeof(struct gs_cb) / sizeof(__u64), n 1410 arch/s390/kernel/ptrace.c .n = sizeof(struct runtime_instr_cb) / sizeof(__u64), n 1422 arch/s390/kernel/ptrace.c .n = ARRAY_SIZE(s390_regsets) n 1582 arch/s390/kernel/ptrace.c .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), n 1590 arch/s390/kernel/ptrace.c .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), n 1598 arch/s390/kernel/ptrace.c .n = 1, n 1606 arch/s390/kernel/ptrace.c .n = 1, n 1614 arch/s390/kernel/ptrace.c .n = 1, n 1622 arch/s390/kernel/ptrace.c .n = __NUM_VXRS_LOW, n 1630 arch/s390/kernel/ptrace.c .n = __NUM_VXRS_HIGH, n 1638 arch/s390/kernel/ptrace.c .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), n 1646 arch/s390/kernel/ptrace.c .n = sizeof(struct gs_cb) / sizeof(__u64), n 1654 arch/s390/kernel/ptrace.c .n = sizeof(struct gs_cb) / sizeof(__u64), n 1662 arch/s390/kernel/ptrace.c .n = sizeof(struct runtime_instr_cb) / sizeof(__u64), n 1674 arch/s390/kernel/ptrace.c .n = ARRAY_SIZE(s390_compat_regsets) n 1735 arch/s390/kernel/ptrace.c unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) n 1739 arch/s390/kernel/ptrace.c addr = kernel_stack_pointer(regs) + n * sizeof(long); n 2055 arch/s390/kvm/interrupt.c struct kvm_s390_interrupt_info *inti, *n; n 2057 arch/s390/kvm/interrupt.c list_for_each_entry_safe(inti, n, _list, list) { n 2105 arch/s390/kvm/interrupt.c int n = 0; n 2124 arch/s390/kvm/interrupt.c if (n == max_irqs) { n 2130 arch/s390/kvm/interrupt.c irq = (struct kvm_s390_irq *) &buf[n]; n 2133 arch/s390/kvm/interrupt.c n++; n 2141 arch/s390/kvm/interrupt.c if (n == max_irqs) { n 2146 arch/s390/kvm/interrupt.c inti_to_irq(inti, &buf[n]); n 2147 arch/s390/kvm/interrupt.c n++; n 2151 arch/s390/kvm/interrupt.c if (n == max_irqs) { n 2156 arch/s390/kvm/interrupt.c irq = (struct kvm_s390_irq *) &buf[n]; n 2159 arch/s390/kvm/interrupt.c n++; n 2162 arch/s390/kvm/interrupt.c if (n == max_irqs) { n 2167 arch/s390/kvm/interrupt.c irq = (struct kvm_s390_irq *) &buf[n]; n 2170 arch/s390/kvm/interrupt.c n++; n 2176 arch/s390/kvm/interrupt.c if (!ret && n > 0) { n 2177 arch/s390/kvm/interrupt.c if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) n 2182 arch/s390/kvm/interrupt.c return ret < 0 ? ret : n; n 2852 arch/s390/kvm/interrupt.c int n; n 2873 arch/s390/kvm/interrupt.c for (n = 0; n < len / sizeof(*buf); n++) { n 2874 arch/s390/kvm/interrupt.c r = do_inject_vcpu(vcpu, &buf[n]); n 2938 arch/s390/kvm/interrupt.c int n = 0; n 2950 arch/s390/kvm/interrupt.c if (n + sizeof(irq) > len) n 2953 arch/s390/kvm/interrupt.c if (copy_to_user(&buf[n], &irq, sizeof(irq))) n 2955 arch/s390/kvm/interrupt.c n += sizeof(irq); n 2961 arch/s390/kvm/interrupt.c if (n + sizeof(irq) > len) n 2965 arch/s390/kvm/interrupt.c if (copy_to_user(&buf[n], &irq, sizeof(irq))) n 2967 arch/s390/kvm/interrupt.c n += sizeof(irq); n 2972 arch/s390/kvm/interrupt.c if (n + sizeof(irq) > len) n 2977 arch/s390/kvm/interrupt.c if (copy_to_user(&buf[n], &irq, sizeof(irq))) n 2979 arch/s390/kvm/interrupt.c n += sizeof(irq); n 2982 arch/s390/kvm/interrupt.c return n; n 616 arch/s390/kvm/kvm-s390.c unsigned long n; n 643 arch/s390/kvm/kvm-s390.c n = kvm_dirty_bitmap_bytes(memslot); n 644 arch/s390/kvm/kvm-s390.c memset(memslot->dirty_bitmap, 0, n); n 809 arch/s390/kvm/priv.c int n; n 818 arch/s390/kvm/priv.c for (n = mem->count - 1; n > 0 ; n--) n 819 arch/s390/kvm/priv.c memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); n 29 arch/s390/lib/string.c static inline char *__strnend(const char *s, size_t n) n 32 arch/s390/lib/string.c const char *p = s + n; n 62 arch/s390/lib/string.c size_t strnlen(const char *s, size_t n) n 64 arch/s390/lib/string.c return __strnend(s, n) - s; n 127 arch/s390/lib/string.c char *strncpy(char *dest, const char *src, size_t n) n 129 arch/s390/lib/string.c size_t len = __strnend(src, n) - src; n 130 arch/s390/lib/string.c memset(dest + len, 0, n - len); n 169 arch/s390/lib/string.c size_t strlcat(char *dest, const char *src, size_t n) n 175 arch/s390/lib/string.c if (dsize < n) { n 177 arch/s390/lib/string.c n -= dsize; n 178 arch/s390/lib/string.c if (len >= n) n 179 arch/s390/lib/string.c len = n - 1; n 200 arch/s390/lib/string.c char *strncat(char *dest, const char *src, size_t n) n 202 arch/s390/lib/string.c size_t len = __strnend(src, n) - src; n 316 arch/s390/lib/string.c void *memchr(const void *s, int c, size_t n) n 319 arch/s390/lib/string.c const void *ret = s + n; n 339 arch/s390/lib/string.c int memcmp(const void *s1, const void *s2, size_t n) n 343 arch/s390/lib/string.c ret = clcle(s1, n, s2, n); n 361 arch/s390/lib/string.c void *memscan(void *s, int c, size_t n) n 364 arch/s390/lib/string.c const void *ret = s + n; n 171 arch/s390/lib/uaccess.c unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 174 arch/s390/lib/uaccess.c return copy_from_user_mvcos(to, from, n); n 175 arch/s390/lib/uaccess.c return copy_from_user_mvcp(to, from, n); n 246 arch/s390/lib/uaccess.c unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 249 arch/s390/lib/uaccess.c return copy_to_user_mvcos(to, from, n); n 250 arch/s390/lib/uaccess.c return copy_to_user_mvcs(to, from, n); n 311 arch/s390/lib/uaccess.c unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) n 314 arch/s390/lib/uaccess.c return copy_in_user_mvcos(to, from, n); n 315 arch/s390/lib/uaccess.c return copy_in_user_mvc(to, from, n); n 92 arch/s390/pci/pci_mmio.c const void __user *src, size_t n) n 102 arch/s390/pci/pci_mmio.c while (n > 0) { n 104 arch/s390/pci/pci_mmio.c (u64 __force) src, n, n 114 arch/s390/pci/pci_mmio.c n -= size; n 245 arch/s390/pci/pci_mmio.c unsigned long n) n 252 arch/s390/pci/pci_mmio.c while (n > 0) { n 254 arch/s390/pci/pci_mmio.c (u64 __force) dst, n, n 261 arch/s390/pci/pci_mmio.c n -= size; n 27 arch/sh/boot/compressed/misc.c #define memzero(s, n) memset ((s), 0, (n)) n 79 arch/sh/boot/compressed/misc.c void* memset(void* s, int c, size_t n) n 84 arch/sh/boot/compressed/misc.c for (i=0;i<n;i++) ss[i] = c; n 257 arch/sh/drivers/dma/dma-sh.c #define dmaor_read_reg(n) __raw_readw(dma_find_base((n)*6)) n 258 arch/sh/drivers/dma/dma-sh.c #define dmaor_write_reg(n, data) __raw_writew(data, dma_find_base(n)*6) n 297 arch/sh/drivers/dma/dma-sh.c static inline unsigned int get_dma_error_irq(int n) n 299 arch/sh/drivers/dma/dma-sh.c return get_dmte_irq(n * 6); n 311 arch/sh/drivers/dma/dma-sh.c static inline unsigned int get_dma_error_irq(int n) n 313 arch/sh/drivers/dma/dma-sh.c return dmae_irq_map[n]; n 331 arch/sh/drivers/dma/dma-sh.c int n; n 333 arch/sh/drivers/dma/dma-sh.c for (n = 0; n < NR_DMAE; n++) { n 334 arch/sh/drivers/dma/dma-sh.c int i = request_irq(get_dma_error_irq(n), dma_err, n 335 arch/sh/drivers/dma/dma-sh.c IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]); n 337 arch/sh/drivers/dma/dma-sh.c printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); n 347 arch/sh/drivers/dma/dma-sh.c int n; n 349 arch/sh/drivers/dma/dma-sh.c for (n = 0; n < NR_DMAE; n++) n 350 arch/sh/drivers/dma/dma-sh.c free_irq(get_dma_error_irq(n), NULL); n 36 arch/sh/include/asm/atomic.h #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) n 66 arch/sh/include/asm/cmpxchg.h #define cmpxchg(ptr,o,n) \ n 69 arch/sh/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 44 arch/sh/include/asm/ftrace.h #define ftrace_return_address(n) return_address(n) n 110 arch/sh/include/asm/ptrace.h unsigned int n) n 113 arch/sh/include/asm/ptrace.h addr += n; n 107 arch/sh/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *str, long n); n 111 arch/sh/include/asm/uaccess.h __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); n 114 arch/sh/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 116 arch/sh/include/asm/uaccess.h return __copy_user(to, (__force void *)from, n); n 120 arch/sh/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 122 arch/sh/include/asm/uaccess.h return __copy_user((__force void *)to, from, n); n 133 arch/sh/include/asm/uaccess.h #define clear_user(addr,n) \ n 136 arch/sh/include/asm/uaccess.h unsigned long __cl_size = (n); \ n 26 arch/sh/include/mach-common/mach/microdev.h #define MICRODEV_FPGA_INTC_MASK(n) (1ul<<(n)) /* Interrupt mask to enable/disable INTC in CPU-board FPGA */ n 27 arch/sh/include/mach-common/mach/microdev.h #define MICRODEV_FPGA_INTPRI_REG(n) (MICRODEV_FPGA_INTC_BASE+0x10+((n)/8)*8)/* Interrupt Priority Register on INTC on CPU-board FPGA */ n 28 arch/sh/include/mach-common/mach/microdev.h #define MICRODEV_FPGA_INTPRI_LEVEL(n,x) ((x)<<(((n)%8)*4)) /* MICRODEV_FPGA_INTPRI_LEVEL(int_number, int_level) */ n 29 arch/sh/include/mach-common/mach/microdev.h #define MICRODEV_FPGA_INTPRI_MASK(n) (MICRODEV_FPGA_INTPRI_LEVEL((n),0xful)) /* Interrupt Priority Mask on INTC on CPU-board FPGA */ n 24 arch/sh/include/mach-dreamcast/mach/maple.h #define MAPLE_TIMEOUT(n) ((n)<<15) n 97 arch/sh/kernel/cpu/sh2a/fpu.c unsigned long long m, n; n 109 arch/sh/kernel/cpu/sh2a/fpu.c n = m; n 111 arch/sh/kernel/cpu/sh2a/fpu.c while (n) { n >>= 1; w++; } n 154 arch/sh/kernel/cpu/sh2a/fpu.c unsigned long long ml, int n) n 156 arch/sh/kernel/cpu/sh2a/fpu.c if (n >= 64) n 157 arch/sh/kernel/cpu/sh2a/fpu.c return mh >> (n - 64); n 158 arch/sh/kernel/cpu/sh2a/fpu.c return (mh << (64 - n)) | (ml >> n); n 377 arch/sh/kernel/cpu/sh2a/fpu.c denormal_to_double (struct sh_fpu_hard_struct *fpu, int n) n 393 arch/sh/kernel/cpu/sh2a/fpu.c fpu->fp_regs[n] = du; n 394 arch/sh/kernel/cpu/sh2a/fpu.c fpu->fp_regs[n+1] = dl; n 468 arch/sh/kernel/cpu/sh2a/fpu.c int n, m, prec; n 471 arch/sh/kernel/cpu/sh2a/fpu.c n = (finsn >> 8) & 0xf; n 473 arch/sh/kernel/cpu/sh2a/fpu.c hx = tsk->thread.xstate->hardfpu.fp_regs[n]; n 485 arch/sh/kernel/cpu/sh2a/fpu.c | tsk->thread.xstate->hardfpu.fp_regs[n+1]; n 492 arch/sh/kernel/cpu/sh2a/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; n 493 arch/sh/kernel/cpu/sh2a/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; n 502 arch/sh/kernel/cpu/sh2a/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = hx; n 511 arch/sh/kernel/cpu/sh2a/fpu.c int n, m, prec; n 514 arch/sh/kernel/cpu/sh2a/fpu.c n = (finsn >> 8) & 0xf; n 516 arch/sh/kernel/cpu/sh2a/fpu.c hx = tsk->thread.xstate->hardfpu.fp_regs[n]; n 528 arch/sh/kernel/cpu/sh2a/fpu.c | tsk->thread.xstate->hardfpu.fp_regs[n+1]; n 535 arch/sh/kernel/cpu/sh2a/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; n 536 arch/sh/kernel/cpu/sh2a/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; n 545 arch/sh/kernel/cpu/sh2a/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = hx; n 146 arch/sh/kernel/cpu/sh4/fpu.c static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n) n 162 arch/sh/kernel/cpu/sh4/fpu.c fpu->fp_regs[n] = du; n 163 arch/sh/kernel/cpu/sh4/fpu.c fpu->fp_regs[n + 1] = dl; n 243 arch/sh/kernel/cpu/sh4/fpu.c int n, m, prec; n 246 arch/sh/kernel/cpu/sh4/fpu.c n = (finsn >> 8) & 0xf; n 248 arch/sh/kernel/cpu/sh4/fpu.c hx = tsk->thread.xstate->hardfpu.fp_regs[n]; n 260 arch/sh/kernel/cpu/sh4/fpu.c | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; n 264 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; n 265 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; n 271 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = hx; n 281 arch/sh/kernel/cpu/sh4/fpu.c int n, m, prec; n 284 arch/sh/kernel/cpu/sh4/fpu.c n = (finsn >> 8) & 0xf; n 286 arch/sh/kernel/cpu/sh4/fpu.c hx = tsk->thread.xstate->hardfpu.fp_regs[n]; n 298 arch/sh/kernel/cpu/sh4/fpu.c | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; n 305 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; n 306 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; n 315 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = hx; n 325 arch/sh/kernel/cpu/sh4/fpu.c int n, m, prec; n 328 arch/sh/kernel/cpu/sh4/fpu.c n = (finsn >> 8) & 0xf; n 330 arch/sh/kernel/cpu/sh4/fpu.c hx = tsk->thread.xstate->hardfpu.fp_regs[n]; n 342 arch/sh/kernel/cpu/sh4/fpu.c | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; n 348 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; n 349 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; n 355 arch/sh/kernel/cpu/sh4/fpu.c tsk->thread.xstate->hardfpu.fp_regs[n] = hx; n 17 arch/sh/kernel/cpu/sh4/perf_event.c #define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) n 18 arch/sh/kernel/cpu/sh4/perf_event.c #define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) n 19 arch/sh/kernel/cpu/sh4/perf_event.c #define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) n 84 arch/sh/kernel/cpu/shmobile/pm.c int n; n 103 arch/sh/kernel/cpu/shmobile/pm.c n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start; n 104 arch/sh/kernel/cpu/shmobile/pm.c memcpy(vp, &sh_mobile_sleep_enter_start, n); n 105 arch/sh/kernel/cpu/shmobile/pm.c vp += roundup(n, 4); n 108 arch/sh/kernel/cpu/shmobile/pm.c n = pre_end - pre_start; n 109 arch/sh/kernel/cpu/shmobile/pm.c memcpy(vp, pre_start, n); n 111 arch/sh/kernel/cpu/shmobile/pm.c vp += roundup(n, 4); n 114 arch/sh/kernel/cpu/shmobile/pm.c n = post_end - post_start; n 115 arch/sh/kernel/cpu/shmobile/pm.c memcpy(vp, post_start, n); n 117 arch/sh/kernel/cpu/shmobile/pm.c vp += roundup(n, 4); n 122 arch/sh/kernel/cpu/shmobile/pm.c n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start; n 123 arch/sh/kernel/cpu/shmobile/pm.c memcpy(vp, &sh_mobile_sleep_resume_start, n); n 299 arch/sh/kernel/disassemble.c int n; n 307 arch/sh/kernel/disassemble.c for (n = 0; n < 4; n++) { n 308 arch/sh/kernel/disassemble.c int i = op->nibbles[n]; n 311 arch/sh/kernel/disassemble.c if (nibs[n] == i) n 361 arch/sh/kernel/disassemble.c rn = nibs[n]; n 364 arch/sh/kernel/disassemble.c rm = nibs[n]; n 367 arch/sh/kernel/disassemble.c rn = (nibs[n] & 0xc) >> 2; n 368 arch/sh/kernel/disassemble.c rm = (nibs[n] & 0x3); n 371 arch/sh/kernel/disassemble.c rb = nibs[n] & 0x07; n 382 arch/sh/kernel/disassemble.c for (n = 0; n < 6 && op->arg[n] != A_END; n++) { n 383 arch/sh/kernel/disassemble.c if (n && op->arg[1] != A_END) n 385 arch/sh/kernel/disassemble.c switch (op->arg[n]) { n 88 arch/sh/kernel/dwarf.c struct dwarf_reg *reg, *n; n 90 arch/sh/kernel/dwarf.c list_for_each_entry_safe(reg, n, &frame->reg_list, link) { n 46 arch/sh/kernel/io_trapped.c int k, n; n 65 arch/sh/kernel/io_trapped.c n = len >> PAGE_SHIFT; n 67 arch/sh/kernel/io_trapped.c if (n >= TRAPPED_PAGES_MAX) n 70 arch/sh/kernel/io_trapped.c for (k = 0; k < n; k++) n 73 arch/sh/kernel/io_trapped.c tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); n 227 arch/sh/kernel/ptrace_32.c return tsk_used_math(target) ? regset->n : 0; n 273 arch/sh/kernel/ptrace_32.c return regs->sr & SR_DSP ? regset->n : 0; n 325 arch/sh/kernel/ptrace_32.c .n = ELF_NGREG, n 335 arch/sh/kernel/ptrace_32.c .n = sizeof(struct user_fpu_struct) / sizeof(long), n 346 arch/sh/kernel/ptrace_32.c .n = sizeof(struct pt_dspregs) / sizeof(long), n 360 arch/sh/kernel/ptrace_32.c .n = ARRAY_SIZE(sh_regsets), n 249 arch/sh/kernel/ptrace_64.c return tsk_used_math(target) ? regset->n : 0; n 351 arch/sh/kernel/ptrace_64.c .n = ELF_NGREG, n 361 arch/sh/kernel/ptrace_64.c .n = sizeof(struct user_fpu_struct) / n 376 arch/sh/kernel/ptrace_64.c .n = ARRAY_SIZE(sh_regsets), n 55 arch/sh/kernel/signal_32.c #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ n 9 arch/sh/lib/div64-generic.c extern uint64_t __xdiv64_32(u64 n, u32 d); n 35 arch/sh/math-emu/math.c #define BANK(n) (n^(FPSCR_FR?16:0)) n 38 arch/sh/math-emu/math.c #define FRn (FR[BANK(n)]) n 41 arch/sh/math-emu/math.c #define DRn (DR[BANK(n)/2]) n 44 arch/sh/math-emu/math.c #define XREG(n) (n^16) n 45 arch/sh/math-emu/math.c #define XFn (FR[BANK(XREG(n))]) n 47 arch/sh/math-emu/math.c #define XDn (DR[BANK(XREG(n))/2]) n 51 arch/sh/math-emu/math.c #define Rn (regs->regs[n]) n 79 arch/sh/math-emu/math.c fcmp_gt(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 90 arch/sh/math-emu/math.c fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 106 arch/sh/math-emu/math.c fadd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 113 arch/sh/math-emu/math.c fsub(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 120 arch/sh/math-emu/math.c fmul(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 127 arch/sh/math-emu/math.c fdiv(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 134 arch/sh/math-emu/math.c fmac(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 156 arch/sh/math-emu/math.c int n) n 159 arch/sh/math-emu/math.c FMOV_EXT(n); n 161 arch/sh/math-emu/math.c n++; n 172 arch/sh/math-emu/math.c int n) n 175 arch/sh/math-emu/math.c FMOV_EXT(n); n 177 arch/sh/math-emu/math.c n++; n 188 arch/sh/math-emu/math.c int n) n 191 arch/sh/math-emu/math.c FMOV_EXT(n); n 193 arch/sh/math-emu/math.c n++; n 206 arch/sh/math-emu/math.c int n) n 222 arch/sh/math-emu/math.c int n) n 238 arch/sh/math-emu/math.c int n) n 256 arch/sh/math-emu/math.c int n) n 260 arch/sh/math-emu/math.c FMOV_EXT(n); n 270 arch/sh/math-emu/math.c fnop_mn(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n) n 276 arch/sh/math-emu/math.c #define NOTYETn(i) static int i(struct sh_fpu_soft_struct *fregs, int n) \ n 289 arch/sh/math-emu/math.c static int ffloat(struct sh_fpu_soft_struct *fregs, int n) n 305 arch/sh/math-emu/math.c static int ftrc(struct sh_fpu_soft_struct *fregs, int n) n 317 arch/sh/math-emu/math.c static int fcnvsd(struct sh_fpu_soft_struct *fregs, int n) n 328 arch/sh/math-emu/math.c static int fcnvds(struct sh_fpu_soft_struct *fregs, int n) n 345 arch/sh/math-emu/math.c static int fsts(struct sh_fpu_soft_struct *fregs, int n) n 351 arch/sh/math-emu/math.c static int flds(struct sh_fpu_soft_struct *fregs, int n) n 357 arch/sh/math-emu/math.c static int fneg(struct sh_fpu_soft_struct *fregs, int n) n 363 arch/sh/math-emu/math.c static int fabs(struct sh_fpu_soft_struct *fregs, int n) n 369 arch/sh/math-emu/math.c static int fld0(struct sh_fpu_soft_struct *fregs, int n) n 375 arch/sh/math-emu/math.c static int fld1(struct sh_fpu_soft_struct *fregs, int n) n 381 arch/sh/math-emu/math.c static int fnop_n(struct sh_fpu_soft_struct *fregs, int n) n 418 arch/sh/math-emu/math.c id_fnxd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int x, int n) n 420 arch/sh/math-emu/math.c return (fnxd[x])(fregs, n); n 426 arch/sh/math-emu/math.c int n = (code >> 8) & 0xf, m = (code >> 4) & 0xf, x = code & 0xf; n 427 arch/sh/math-emu/math.c return (fnmx[x])(fregs, regs, m, n); n 433 arch/sh/math-emu/math.c int n = ((code >> 8) & 0xf); n 477 arch/sh/math-emu/math.c static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n) n 493 arch/sh/math-emu/math.c fpu->fp_regs[n] = du; n 494 arch/sh/math-emu/math.c fpu->fp_regs[n+1] = dl; n 69 arch/sh/mm/cache-sh4.c int j, n; n 77 arch/sh/mm/cache-sh4.c n = boot_cpu_data.icache.n_aliases; n 79 arch/sh/mm/cache-sh4.c for (j = 0; j < n; j++) n 26 arch/sh/mm/nommu.c __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) n 28 arch/sh/mm/nommu.c memcpy(to, from, n); n 32 arch/sh/mm/nommu.c __kernel_size_t __clear_user(void *to, __kernel_size_t n) n 34 arch/sh/mm/nommu.c memset(to, 0, n); n 34 arch/sparc/boot/piggyback.c static int align(int n) n 37 arch/sparc/boot/piggyback.c return (n + 0x1fff) & ~0x1fff; n 39 arch/sparc/boot/piggyback.c return (n + 0xfff) & ~0xfff; n 22 arch/sparc/include/asm/asm-prototypes.h void *memcpy(void *dest, const void *src, size_t n); n 23 arch/sparc/include/asm/asm-prototypes.h void *memset(void *s, int c, size_t n); n 53 arch/sparc/include/asm/atomic_64.h #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) n 60 arch/sparc/include/asm/atomic_64.h #define atomic64_cmpxchg(v, o, n) \ n 61 arch/sparc/include/asm/atomic_64.h ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) n 58 arch/sparc/include/asm/cmpxchg_32.h #define cmpxchg(ptr, o, n) \ n 61 arch/sparc/include/asm/cmpxchg_32.h __typeof__(*(ptr)) _n_ = (n); \ n 75 arch/sparc/include/asm/cmpxchg_32.h #define cmpxchg_local(ptr, o, n) \ n 77 arch/sparc/include/asm/cmpxchg_32.h (unsigned long)(n), sizeof(*(ptr)))) n 78 arch/sparc/include/asm/cmpxchg_32.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 171 arch/sparc/include/asm/cmpxchg_64.h #define cmpxchg(ptr,o,n) \ n 174 arch/sparc/include/asm/cmpxchg_64.h __typeof__(*(ptr)) _n_ = (n); \ n 198 arch/sparc/include/asm/cmpxchg_64.h #define cmpxchg_local(ptr, o, n) \ n 200 arch/sparc/include/asm/cmpxchg_64.h (unsigned long)(n), sizeof(*(ptr)))) n 201 arch/sparc/include/asm/cmpxchg_64.h #define cmpxchg64_local(ptr, o, n) \ n 204 arch/sparc/include/asm/cmpxchg_64.h cmpxchg_local((ptr), (o), (n)); \ n 206 arch/sparc/include/asm/cmpxchg_64.h #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) n 14 arch/sparc/include/asm/delay_64.h #define mdelay(n) udelay((n) * 1000) n 17 arch/sparc/include/asm/io_32.h int c, __kernel_size_t n) n 21 arch/sparc/include/asm/io_32.h while (n--) { n 28 arch/sparc/include/asm/io_32.h __kernel_size_t n) n 32 arch/sparc/include/asm/io_32.h while (n--) { n 40 arch/sparc/include/asm/io_32.h __kernel_size_t n) n 45 arch/sparc/include/asm/io_32.h while (n--) { n 89 arch/sparc/include/asm/io_32.h __kernel_size_t n) n 91 arch/sparc/include/asm/io_32.h while(n--) { n 99 arch/sparc/include/asm/io_32.h __kernel_size_t n) n 103 arch/sparc/include/asm/io_32.h while (n--) { n 112 arch/sparc/include/asm/io_32.h __kernel_size_t n) n 117 arch/sparc/include/asm/io_32.h while (n--) { n 330 arch/sparc/include/asm/io_64.h static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) n 332 arch/sparc/include/asm/io_64.h while(n--) { n 338 arch/sparc/include/asm/io_64.h static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) n 342 arch/sparc/include/asm/io_64.h while (n--) { n 349 arch/sparc/include/asm/io_64.h __kernel_size_t n) n 353 arch/sparc/include/asm/io_64.h while (n--) { n 362 arch/sparc/include/asm/io_64.h __kernel_size_t n) n 366 arch/sparc/include/asm/io_64.h while (n--) { n 374 arch/sparc/include/asm/io_64.h __kernel_size_t n) n 379 arch/sparc/include/asm/io_64.h while (n--) { n 387 arch/sparc/include/asm/io_64.h __kernel_size_t n) n 392 arch/sparc/include/asm/io_64.h while (n--) { n 89 arch/sparc/include/asm/ptrace.h unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); n 15 arch/sparc/include/asm/string.h #define memcpy(t, f, n) __builtin_memcpy(t, f, n) n 103 arch/sparc/include/asm/syscall.h unsigned int n = 6; n 110 arch/sparc/include/asm/syscall.h for (j = 0; j < n; j++) { n 237 arch/sparc/include/asm/uaccess_32.h static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 239 arch/sparc/include/asm/uaccess_32.h return __copy_user(to, (__force void __user *) from, n); n 242 arch/sparc/include/asm/uaccess_32.h static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 244 arch/sparc/include/asm/uaccess_32.h return __copy_user((__force void __user *) to, from, n); n 271 arch/sparc/include/asm/uaccess_32.h static inline unsigned long clear_user(void __user *addr, unsigned long n) n 273 arch/sparc/include/asm/uaccess_32.h if (n && __access_ok((unsigned long) addr, n)) n 274 arch/sparc/include/asm/uaccess_32.h return __clear_user(addr, n); n 276 arch/sparc/include/asm/uaccess_32.h return n; n 279 arch/sparc/include/asm/uaccess_32.h __must_check long strnlen_user(const char __user *str, long n); n 196 arch/sparc/include/asm/uaccess_64.h __must_check long strnlen_user(const char __user *str, long n); n 37 arch/sparc/include/asm/vga.h static inline void scr_memsetw(u16 *p, u16 v, unsigned int n) n 41 arch/sparc/include/asm/vga.h memset16(p, cpu_to_le16(v), n / 2); n 44 arch/sparc/include/asm/vga.h static inline void scr_memcpyw(u16 *d, u16 *s, unsigned int n) n 48 arch/sparc/include/asm/vga.h memcpy(d, s, n); n 51 arch/sparc/include/asm/vga.h static inline void scr_memmovew(u16 *d, u16 *s, unsigned int n) n 55 arch/sparc/include/asm/vga.h memmove(d, s, n); n 298 arch/sparc/kernel/btext.c unsigned int n) n 300 arch/sparc/kernel/btext.c btext_drawtext(s, n); n 127 arch/sparc/kernel/cpumap.c int i, n, num_nodes; n 142 arch/sparc/kernel/cpumap.c n = cpuinfo_id(i, CPUINFO_LVL_NODE); n 143 arch/sparc/kernel/cpumap.c if (n > prev_id[CPUINFO_LVL_NODE]) { n 145 arch/sparc/kernel/cpumap.c prev_id[CPUINFO_LVL_NODE] = n; n 148 arch/sparc/kernel/cpumap.c n = cpuinfo_id(i, CPUINFO_LVL_CORE); n 149 arch/sparc/kernel/cpumap.c if (n > prev_id[CPUINFO_LVL_CORE]) { n 151 arch/sparc/kernel/cpumap.c prev_id[CPUINFO_LVL_CORE] = n; n 154 arch/sparc/kernel/cpumap.c n = cpuinfo_id(i, CPUINFO_LVL_PROC); n 155 arch/sparc/kernel/cpumap.c if (n > prev_id[CPUINFO_LVL_PROC]) { n 157 arch/sparc/kernel/cpumap.c prev_id[CPUINFO_LVL_PROC] = n; n 164 arch/sparc/kernel/cpumap.c n = tree_level[CPUINFO_LVL_NODE].num_nodes; n 166 arch/sparc/kernel/cpumap.c tree_level[CPUINFO_LVL_NODE].end_index = n; n 168 arch/sparc/kernel/cpumap.c n++; n 169 arch/sparc/kernel/cpumap.c tree_level[CPUINFO_LVL_CORE].start_index = n; n 170 arch/sparc/kernel/cpumap.c n += tree_level[CPUINFO_LVL_CORE].num_nodes; n 171 arch/sparc/kernel/cpumap.c tree_level[CPUINFO_LVL_CORE].end_index = n - 1; n 173 arch/sparc/kernel/cpumap.c tree_level[CPUINFO_LVL_PROC].start_index = n; n 174 arch/sparc/kernel/cpumap.c n += tree_level[CPUINFO_LVL_PROC].num_nodes; n 175 arch/sparc/kernel/cpumap.c tree_level[CPUINFO_LVL_PROC].end_index = n - 1; n 193 arch/sparc/kernel/cpumap.c int n, id, cpu, prev_cpu, last_cpu, level; n 195 arch/sparc/kernel/cpumap.c n = enumerate_cpuinfo_nodes(tmp_level); n 197 arch/sparc/kernel/cpumap.c new_tree = kzalloc(struct_size(new_tree, nodes, n), GFP_ATOMIC); n 201 arch/sparc/kernel/cpumap.c new_tree->total_nodes = n; n 208 arch/sparc/kernel/cpumap.c n = new_tree->level[level].start_index; n 210 arch/sparc/kernel/cpumap.c level_rover[level] = n; n 211 arch/sparc/kernel/cpumap.c node = &new_tree->nodes[n]; n 275 arch/sparc/kernel/cpumap.c n = ++level_rover[level]; n 276 arch/sparc/kernel/cpumap.c if (n <= new_tree->level[level].end_index) { n 277 arch/sparc/kernel/cpumap.c node = &new_tree->nodes[n]; n 105 arch/sparc/kernel/iommu-common.c unsigned long n, end, start, limit, boundary_size; n 185 arch/sparc/kernel/iommu-common.c n = iommu_area_alloc(iommu->map, limit, start, npages, shift, n 187 arch/sparc/kernel/iommu-common.c if (n == -1) { n 205 arch/sparc/kernel/iommu-common.c n = IOMMU_ERROR_CODE; n 210 arch/sparc/kernel/iommu-common.c (n < pool->hint || need_flush(iommu))) { n 215 arch/sparc/kernel/iommu-common.c end = n + npages; n 224 arch/sparc/kernel/iommu-common.c return n; n 173 arch/sparc/kernel/iommu.c int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); n 175 arch/sparc/kernel/iommu.c if (unlikely(n == IOMMU_NUM_CTXS)) { n 176 arch/sparc/kernel/iommu.c n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); n 177 arch/sparc/kernel/iommu.c if (unlikely(n == lowest)) { n 179 arch/sparc/kernel/iommu.c n = 0; n 182 arch/sparc/kernel/iommu.c if (n) n 183 arch/sparc/kernel/iommu.c __set_bit(n, iommu->ctx_bitmap); n 185 arch/sparc/kernel/iommu.c return n; n 101 arch/sparc/kernel/ioport.c int n; n 104 arch/sparc/kernel/ioport.c for (n = 0; n < XNRES; n++) { n 332 arch/sparc/kernel/irq_64.c unsigned int n = (cpuid >> 5) & 0x1f; n 335 arch/sparc/kernel/irq_64.c (n << IMAP_NID_SHIFT)); n 872 arch/sparc/kernel/mdesc.c u64 n = mdesc_arc_target(hp, arc); n 873 arch/sparc/kernel/mdesc.c const char *name = mdesc_node_name(hp, n); n 876 arch/sparc/kernel/mdesc.c (*func)(hp, n, val); n 878 arch/sparc/kernel/mdesc.c find_back_node_value(hp, n, srch_val, func, val, depth-1); n 1214 arch/sparc/kernel/mdesc.c u64 n = mdesc_arc_target(hp, j); n 1217 arch/sparc/kernel/mdesc.c n_name = mdesc_node_name(hp, n); n 1219 arch/sparc/kernel/mdesc.c fill_in_one_cache(c, hp, n); n 184 arch/sparc/kernel/pci_sun4v.c unsigned long flags, order, first_page, npages, n; n 235 arch/sparc/kernel/pci_sun4v.c for (n = 0; n < npages; n++) { n 236 arch/sparc/kernel/pci_sun4v.c long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); n 1318 arch/sparc/kernel/perf_event.c int i, n, first; n 1323 arch/sparc/kernel/perf_event.c n = n_prev + n_new; n 1324 arch/sparc/kernel/perf_event.c if (n <= 1) n 1328 arch/sparc/kernel/perf_event.c for (i = 0; i < n; i++) { n 1350 arch/sparc/kernel/perf_event.c int n = 0; n 1353 arch/sparc/kernel/perf_event.c if (n >= max_count) n 1355 arch/sparc/kernel/perf_event.c evts[n] = group; n 1356 arch/sparc/kernel/perf_event.c events[n] = group->hw.event_base; n 1357 arch/sparc/kernel/perf_event.c current_idx[n++] = PIC_NO_INDEX; n 1362 arch/sparc/kernel/perf_event.c if (n >= max_count) n 1364 arch/sparc/kernel/perf_event.c evts[n] = event; n 1365 arch/sparc/kernel/perf_event.c events[n] = event->hw.event_base; n 1366 arch/sparc/kernel/perf_event.c current_idx[n++] = PIC_NO_INDEX; n 1369 arch/sparc/kernel/perf_event.c return n; n 1423 arch/sparc/kernel/perf_event.c int n; n 1473 arch/sparc/kernel/perf_event.c n = 0; n 1475 arch/sparc/kernel/perf_event.c n = collect_events(event->group_leader, n 1478 arch/sparc/kernel/perf_event.c if (n < 0) n 1481 arch/sparc/kernel/perf_event.c events[n] = hwc->event_base; n 1482 arch/sparc/kernel/perf_event.c evts[n] = event; n 1484 arch/sparc/kernel/perf_event.c if (check_excludes(evts, n, 1)) n 1487 arch/sparc/kernel/perf_event.c if (sparc_check_constraints(evts, events, n + 1)) n 1553 arch/sparc/kernel/perf_event.c int n; n 1565 arch/sparc/kernel/perf_event.c n = cpuc->n_events; n 1566 arch/sparc/kernel/perf_event.c if (check_excludes(cpuc->event, 0, n)) n 1568 arch/sparc/kernel/perf_event.c if (sparc_check_constraints(cpuc->event, cpuc->events, n)) n 201 arch/sparc/kernel/prom_32.c char tmp_buf[64], *n; n 208 arch/sparc/kernel/prom_32.c n = prom_early_alloc(strlen(tmp_buf) + 1); n 209 arch/sparc/kernel/prom_32.c strcpy(n, tmp_buf); n 211 arch/sparc/kernel/prom_32.c return n; n 363 arch/sparc/kernel/prom_64.c char tmp_buf[64], *n; n 370 arch/sparc/kernel/prom_64.c n = prom_early_alloc(strlen(tmp_buf) + 1); n 371 arch/sparc/kernel/prom_64.c strcpy(n, tmp_buf); n 373 arch/sparc/kernel/prom_64.c return n; n 297 arch/sparc/kernel/ptrace_32.c .n = 38, n 313 arch/sparc/kernel/ptrace_32.c .n = 99, n 321 arch/sparc/kernel/ptrace_32.c .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) n 56 arch/sparc/kernel/ptrace_64.c #define REG_OFFSET_NAME(n, r) \ n 57 arch/sparc/kernel/ptrace_64.c {.name = n, .offset = (PT_V9_##r)} n 492 arch/sparc/kernel/ptrace_64.c .n = 36, n 504 arch/sparc/kernel/ptrace_64.c .n = 35, n 512 arch/sparc/kernel/ptrace_64.c .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets) n 860 arch/sparc/kernel/ptrace_64.c .n = 38, n 876 arch/sparc/kernel/ptrace_64.c .n = 99, n 884 arch/sparc/kernel/ptrace_64.c .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) n 1192 arch/sparc/kernel/ptrace_64.c unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) n 1196 arch/sparc/kernel/ptrace_64.c addr += n; n 112 arch/sparc/kernel/setup_32.c prom_console_write(struct console *con, const char *s, unsigned int n) n 114 arch/sparc/kernel/setup_32.c prom_write(s, n); n 85 arch/sparc/kernel/setup_64.c prom_console_write(struct console *con, const char *s, unsigned int n) n 87 arch/sparc/kernel/setup_64.c prom_write(s, n); n 78 arch/sparc/kernel/sstate.c static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) n 614 arch/sparc/mm/init_64.c int n, node, ents, first, last, i; n 617 arch/sparc/mm/init_64.c n = prom_getproplen(node, "translations"); n 618 arch/sparc/mm/init_64.c if (unlikely(n == 0 || n == -1)) { n 622 arch/sparc/mm/init_64.c if (unlikely(n > sizeof(prom_trans))) { n 623 arch/sparc/mm/init_64.c prom_printf("prom_mappings: Size %d is too big.\n", n); n 627 arch/sparc/mm/init_64.c if ((n = prom_getproperty(node, "translations", n 634 arch/sparc/mm/init_64.c n = n / sizeof(struct linux_prom_translation); n 636 arch/sparc/mm/init_64.c ents = n; n 751 arch/sparc/mm/init_64.c int n = 0; n 755 arch/sparc/mm/init_64.c if (++n >= 512) n 1390 arch/sparc/mm/init_64.c struct node_mem_mask *n = &node_masks[i]; n 1392 arch/sparc/mm/init_64.c if ((grp->mask == n->mask) && (grp->match == n->match)) n 1422 arch/sparc/mm/init_64.c struct node_mem_mask *n; n 1444 arch/sparc/mm/init_64.c n = &node_masks[num_node_masks++]; n 1446 arch/sparc/mm/init_64.c n->mask = candidate->mask; n 1447 arch/sparc/mm/init_64.c n->match = candidate->match; n 1450 arch/sparc/mm/init_64.c index, n->mask, n->match, candidate->latency); n 568 arch/sparc/net/bpf_jit_comp_64.c u64 n = ((~low_bits) & 0xfffffc00) | n 570 arch/sparc/net/bpf_jit_comp_64.c emit_loadimm64(n, dest, ctx); n 50 arch/sparc/prom/console_32.c int n = prom_nbputchar(buf); n 51 arch/sparc/prom/console_32.c if (n < 0) n 40 arch/sparc/prom/console_64.c int n = __prom_console_write_buf(buf, len); n 41 arch/sparc/prom/console_64.c if (n < 0) n 43 arch/sparc/prom/console_64.c len -= n; n 30 arch/sparc/prom/printf.c void notrace prom_write(const char *buf, unsigned int n) n 40 arch/sparc/prom/printf.c while (n-- != 0) { n 253 arch/um/drivers/chan_kern.c int n, ret = 0; n 258 arch/um/drivers/chan_kern.c n = chan->ops->write(chan->fd, buf, len, chan->data); n 260 arch/um/drivers/chan_kern.c ret = n; n 267 arch/um/drivers/chan_kern.c int n, ret = 0; n 272 arch/um/drivers/chan_kern.c n = chan->ops->console_write(chan->fd, buf, len); n 274 arch/um/drivers/chan_kern.c ret = n; n 341 arch/um/drivers/chan_kern.c int n = 0; n 344 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, "none", 1); n 345 arch/um/drivers/chan_kern.c return n; n 348 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, chan->ops->type, 0); n 351 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, "", 1); n 352 arch/um/drivers/chan_kern.c return n; n 355 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, ":", 0); n 356 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, chan->dev, 0); n 358 arch/um/drivers/chan_kern.c return n; n 364 arch/um/drivers/chan_kern.c int n; n 366 arch/um/drivers/chan_kern.c n = one_chan_config_string(in, str, size, error_out); n 367 arch/um/drivers/chan_kern.c str += n; n 368 arch/um/drivers/chan_kern.c size -= n; n 371 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, "", 1); n 372 arch/um/drivers/chan_kern.c return n; n 375 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, ",", 1); n 376 arch/um/drivers/chan_kern.c n = one_chan_config_string(out, str, size, error_out); n 377 arch/um/drivers/chan_kern.c str += n; n 378 arch/um/drivers/chan_kern.c size -= n; n 379 arch/um/drivers/chan_kern.c CONFIG_CHUNK(str, size, n, "", 1); n 381 arch/um/drivers/chan_kern.c return n; n 24 arch/um/drivers/chan_user.c int n; n 26 arch/um/drivers/chan_user.c n = read(fd, c_out, sizeof(*c_out)); n 27 arch/um/drivers/chan_user.c if (n > 0) n 28 arch/um/drivers/chan_user.c return n; n 31 arch/um/drivers/chan_user.c else if (n == 0) n 38 arch/um/drivers/chan_user.c int generic_write(int fd, const char *buf, int n, void *unused) n 42 arch/um/drivers/chan_user.c err = write(fd, buf, n); n 74 arch/um/drivers/chan_user.c int generic_console_write(int fd, const char *buf, int n) n 100 arch/um/drivers/chan_user.c err = generic_write(fd, buf, n, NULL); n 223 arch/um/drivers/chan_user.c int fds[2], n, err; n 249 arch/um/drivers/chan_user.c n = read(fds[0], &c, sizeof(c)); n 250 arch/um/drivers/chan_user.c if (n != sizeof(c)) { n 35 arch/um/drivers/chan_user.h extern int generic_write(int fd, const char *buf, int n, void *unused); n 36 arch/um/drivers/chan_user.h extern int generic_console_write(int fd, const char *buf, int n); n 285 arch/um/drivers/cow_user.c int err, n; n 294 arch/um/drivers/cow_user.c n = (*reader)(0, (char *) header, sizeof(*header), arg); n 295 arch/um/drivers/cow_user.c if (n < offsetof(typeof(header->v1), backing_file)) { n 311 arch/um/drivers/cow_user.c if (n < sizeof(header->v1)) { n 324 arch/um/drivers/cow_user.c if (n < sizeof(header->v2)) { n 338 arch/um/drivers/cow_user.c if (n < sizeof(header->v3)) { n 358 arch/um/drivers/cow_user.c if (n < sizeof(header->v3_b)) { n 53 arch/um/drivers/daemon_user.c int fd, n, err; n 97 arch/um/drivers/daemon_user.c n = write(pri->control, &req, sizeof(req)); n 98 arch/um/drivers/daemon_user.c if (n != sizeof(req)) { n 105 arch/um/drivers/daemon_user.c n = read(pri->control, sun, sizeof(*sun)); n 106 arch/um/drivers/daemon_user.c if (n != sizeof(*sun)) { n 26 arch/um/drivers/fd.c int n; n 34 arch/um/drivers/fd.c n = strtoul(str, &end, 0); n 45 arch/um/drivers/fd.c *data = ((struct fd_chan) { .fd = n, n 33 arch/um/drivers/harddog_user.c int in_fds[2], out_fds[2], pid, n, err; n 78 arch/um/drivers/harddog_user.c n = read(in_fds[0], &c, sizeof(c)); n 79 arch/um/drivers/harddog_user.c if (n == 0) { n 85 arch/um/drivers/harddog_user.c else if (n < 0) { n 89 arch/um/drivers/harddog_user.c err = n; n 114 arch/um/drivers/harddog_user.c int n; n 117 arch/um/drivers/harddog_user.c n = write(fd, &c, sizeof(c)); n 118 arch/um/drivers/harddog_user.c if (n != sizeof(c)) { n 120 arch/um/drivers/harddog_user.c n, errno); n 121 arch/um/drivers/harddog_user.c if (n < 0) n 122 arch/um/drivers/harddog_user.c return n; n 37 arch/um/drivers/line.c int n; n 43 arch/um/drivers/line.c n = line->head - line->tail; n 45 arch/um/drivers/line.c if (n <= 0) n 46 arch/um/drivers/line.c n += LINE_BUFSIZE; /* The other case */ n 47 arch/um/drivers/line.c return n - 1; n 132 arch/um/drivers/line.c int n, count; n 141 arch/um/drivers/line.c n = write_chan(line->chan_out, line->head, count, n 143 arch/um/drivers/line.c if (n < 0) n 144 arch/um/drivers/line.c return n; n 145 arch/um/drivers/line.c if (n == count) { n 152 arch/um/drivers/line.c line->head += n; n 158 arch/um/drivers/line.c n = write_chan(line->chan_out, line->head, count, n 161 arch/um/drivers/line.c if (n < 0) n 162 arch/um/drivers/line.c return n; n 164 arch/um/drivers/line.c line->head += n; n 196 arch/um/drivers/line.c int n, ret = 0; n 202 arch/um/drivers/line.c n = write_chan(line->chan_out, buf, len, n 204 arch/um/drivers/line.c if (n < 0) { n 205 arch/um/drivers/line.c ret = n; n 209 arch/um/drivers/line.c len -= n; n 210 arch/um/drivers/line.c ret += n; n 212 arch/um/drivers/line.c ret += buffer_data(line, buf + n, len); n 366 arch/um/drivers/line.c int setup_one_line(struct line *lines, int n, char *init, n 369 arch/um/drivers/line.c struct line *line = &lines[n]; n 382 arch/um/drivers/line.c tty_unregister_device(driver, n); n 383 arch/um/drivers/line.c parse_chan_pair(NULL, line, n, opts, error_out); n 393 arch/um/drivers/line.c tty_unregister_device(driver, n); n 398 arch/um/drivers/line.c err = parse_chan_pair(new, line, n, opts, error_out); n 401 arch/um/drivers/line.c driver, n, NULL); n 405 arch/um/drivers/line.c parse_chan_pair(NULL, line, n, opts, error_out); n 438 arch/um/drivers/line.c unsigned n = simple_strtoul(init, &end, 0); n 444 arch/um/drivers/line.c if (n >= num) { n 448 arch/um/drivers/line.c conf[n] = end + 1; n 462 arch/um/drivers/line.c int n; n 469 arch/um/drivers/line.c n = simple_strtoul(str, &end, 0); n 474 arch/um/drivers/line.c if (n >= num) { n 479 arch/um/drivers/line.c return setup_one_line(lines, n, end, opts, error_out); n 487 arch/um/drivers/line.c int dev, n = 0; n 503 arch/um/drivers/line.c CONFIG_CHUNK(str, size, n, "none", 1); n 507 arch/um/drivers/line.c CONFIG_CHUNK(str, size, n, line->init_str, 1); n 509 arch/um/drivers/line.c n = chan_config_string(line, str, size, error_out); n 514 arch/um/drivers/line.c return n; n 520 arch/um/drivers/line.c int n; n 522 arch/um/drivers/line.c n = simple_strtoul(*str, &end, 0); n 527 arch/um/drivers/line.c *start_out = n; n 528 arch/um/drivers/line.c *end_out = n; n 529 arch/um/drivers/line.c return n; n 532 arch/um/drivers/line.c int line_remove(struct line *lines, unsigned int num, int n, char **error_out) n 534 arch/um/drivers/line.c if (n >= num) { n 538 arch/um/drivers/line.c return setup_one_line(lines, n, "none", NULL, error_out); n 85 arch/um/drivers/line.h extern int setup_one_line(struct line *lines, int n, char *init, n 93 arch/um/drivers/line.h extern int line_remove(struct line *lines, unsigned int sizeof_lines, int n, n 394 arch/um/drivers/mconsole_kern.c static int mem_remove(int n, char **error_out) n 427 arch/um/drivers/mconsole_kern.c int n, size; n 439 arch/um/drivers/mconsole_kern.c n = (*get_config)(name, buf, size, &error); n 445 arch/um/drivers/mconsole_kern.c if (n <= size) { n 453 arch/um/drivers/mconsole_kern.c size = n; n 496 arch/um/drivers/mconsole_kern.c int err, start, end, n; n 509 arch/um/drivers/mconsole_kern.c n = (*dev->id)(&ptr, &start, &end); n 510 arch/um/drivers/mconsole_kern.c if (n < 0) { n 514 arch/um/drivers/mconsole_kern.c else if ((n < start) || (n > end)) { n 522 arch/um/drivers/mconsole_kern.c err = (*dev->remove)(n, &err_msg); n 555 arch/um/drivers/mconsole_kern.c int n; n 561 arch/um/drivers/mconsole_kern.c n = min((size_t) len, ARRAY_SIZE(console_buf)); n 562 arch/um/drivers/mconsole_kern.c strncpy(console_buf, string, n); n 563 arch/um/drivers/mconsole_kern.c string += n; n 564 arch/um/drivers/mconsole_kern.c len -= n; n 570 arch/um/drivers/mconsole_kern.c mconsole_reply_len(entry->req, console_buf, n, 0, 1); n 135 arch/um/drivers/mconsole_user.c int len, n; n 156 arch/um/drivers/mconsole_user.c n = sendto(req->originating_fd, &reply, len, 0, n 159 arch/um/drivers/mconsole_user.c if (n < 0) n 183 arch/um/drivers/mconsole_user.c int n, err = 0; n 211 arch/um/drivers/mconsole_user.c n = sendto(notify_sock, &packet, len, 0, (struct sockaddr *) &target, n 213 arch/um/drivers/mconsole_user.c if (n < 0) { n 380 arch/um/drivers/net_kern.c static void eth_configure(int n, void *init, char *mac, n 400 arch/um/drivers/net_kern.c "net_device for eth%d\n", n); n 405 arch/um/drivers/net_kern.c device->index = n; n 411 arch/um/drivers/net_kern.c snprintf(dev->name, sizeof(dev->name), "eth%d", n); n 415 arch/um/drivers/net_kern.c printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr); n 428 arch/um/drivers/net_kern.c device->pdev.id = n; n 501 arch/um/drivers/net_kern.c static struct uml_net *find_device(int n) n 509 arch/um/drivers/net_kern.c if (device->index == n) n 522 arch/um/drivers/net_kern.c int n, err = -EINVAL; n 524 arch/um/drivers/net_kern.c n = simple_strtoul(str, &end, 0); n 537 arch/um/drivers/net_kern.c if (find_device(n)) { n 542 arch/um/drivers/net_kern.c *index_out = n; n 559 arch/um/drivers/net_kern.c static int check_transport(struct transport *transport, char *eth, int n, n 642 arch/um/drivers/net_kern.c int n, err; n 644 arch/um/drivers/net_kern.c err = eth_parse(str, &n, &str, &error); n 657 arch/um/drivers/net_kern.c new->index = n; n 672 arch/um/drivers/net_kern.c int n, err; n 674 arch/um/drivers/net_kern.c err = eth_parse(str, &n, &str, error_out); n 686 arch/um/drivers/net_kern.c err = !eth_setup_common(str, n); n 695 arch/um/drivers/net_kern.c int n; n 697 arch/um/drivers/net_kern.c n = simple_strtoul(*str, &end, 0); n 701 arch/um/drivers/net_kern.c *start_out = n; n 702 arch/um/drivers/net_kern.c *end_out = n; n 704 arch/um/drivers/net_kern.c return n; n 707 arch/um/drivers/net_kern.c static int net_remove(int n, char **error_out) n 713 arch/um/drivers/net_kern.c device = find_device(n); n 97 arch/um/drivers/net_user.c int n; n 99 arch/um/drivers/net_user.c n = read(fd, buf, len); n 101 arch/um/drivers/net_user.c if ((n < 0) && (errno == EAGAIN)) n 103 arch/um/drivers/net_user.c else if (n == 0) n 105 arch/um/drivers/net_user.c return n; n 110 arch/um/drivers/net_user.c int n; n 112 arch/um/drivers/net_user.c CATCH_EINTR(n = recvfrom(fd, buf, len, 0, NULL, NULL)); n 113 arch/um/drivers/net_user.c if (n < 0) { n 118 arch/um/drivers/net_user.c else if (n == 0) n 120 arch/um/drivers/net_user.c return n; n 125 arch/um/drivers/net_user.c int n; n 127 arch/um/drivers/net_user.c n = write(fd, buf, len); n 129 arch/um/drivers/net_user.c if ((n < 0) && (errno == EAGAIN)) n 131 arch/um/drivers/net_user.c else if (n == 0) n 133 arch/um/drivers/net_user.c return n; n 138 arch/um/drivers/net_user.c int n; n 140 arch/um/drivers/net_user.c CATCH_EINTR(n = send(fd, buf, len, 0)); n 141 arch/um/drivers/net_user.c if (n < 0) { n 146 arch/um/drivers/net_user.c else if (n == 0) n 148 arch/um/drivers/net_user.c return n; n 153 arch/um/drivers/net_user.c int n; n 155 arch/um/drivers/net_user.c CATCH_EINTR(n = sendto(fd, buf, len, 0, (struct sockaddr *) to, n 157 arch/um/drivers/net_user.c if (n < 0) { n 162 arch/um/drivers/net_user.c else if (n == 0) n 164 arch/um/drivers/net_user.c return n; n 115 arch/um/drivers/pcap_user.c int n; n 117 arch/um/drivers/pcap_user.c n = pcap_dispatch(pri->pcap, 1, handler, (u_char *) &hdata); n 118 arch/um/drivers/pcap_user.c if (n < 0) { n 123 arch/um/drivers/pcap_user.c else if (n == 0) n 52 arch/um/drivers/random.c int n, ret = 0, have_data; n 55 arch/um/drivers/random.c n = os_read_file(random_fd, &data, sizeof(data)); n 56 arch/um/drivers/random.c if (n > 0) { n 57 arch/um/drivers/random.c have_data = n; n 69 arch/um/drivers/random.c else if (n == -EAGAIN) { n 90 arch/um/drivers/random.c return n; n 8 arch/um/drivers/slip_common.c int i, n, size, start; n 26 arch/um/drivers/slip_common.c n = net_read(fd, &slip->ibuf[slip->pos], n 28 arch/um/drivers/slip_common.c if(n <= 0) n 29 arch/um/drivers/slip_common.c return n; n 32 arch/um/drivers/slip_common.c for(i = 0; i < n; i++){ n 38 arch/um/drivers/slip_common.c n - (i + 1)); n 39 arch/um/drivers/slip_common.c slip->more = n - (i + 1); n 48 arch/um/drivers/slip_common.c int actual, n; n 51 arch/um/drivers/slip_common.c n = net_write(fd, slip->obuf, actual); n 52 arch/um/drivers/slip_common.c if(n < 0) n 53 arch/um/drivers/slip_common.c return n; n 39 arch/um/drivers/ssl.c static int ssl_remove(int n, char **error_out); n 83 arch/um/drivers/ssl.c static int ssl_remove(int n, char **error_out) n 85 arch/um/drivers/ssl.c return line_remove(serial_lines, ARRAY_SIZE(serial_lines), n, n 45 arch/um/drivers/stdio_console.c static int con_remove(int n, char **con_remove); n 87 arch/um/drivers/stdio_console.c static int con_remove(int n, char **error_out) n 89 arch/um/drivers/stdio_console.c return line_remove(vts, ARRAY_SIZE(vts), n, error_out); n 77 arch/um/drivers/ubd_kern.c __u64 n; n 81 arch/um/drivers/ubd_kern.c n = bit / bits; n 83 arch/um/drivers/ubd_kern.c return (data[n] & (1 << off)) != 0; n 88 arch/um/drivers/ubd_kern.c __u64 n; n 92 arch/um/drivers/ubd_kern.c n = bit / bits; n 94 arch/um/drivers/ubd_kern.c data[n] |= (1 << off); n 243 arch/um/drivers/ubd_kern.c int n = -1; n 246 arch/um/drivers/ubd_kern.c n = simple_strtoul(str, &end, 0); n 252 arch/um/drivers/ubd_kern.c n = *str - 'a'; n 256 arch/um/drivers/ubd_kern.c return n; n 268 arch/um/drivers/ubd_kern.c int n, err = 0, i; n 271 arch/um/drivers/ubd_kern.c n = *str; n 272 arch/um/drivers/ubd_kern.c if(n == '='){ n 305 arch/um/drivers/ubd_kern.c n = parse_unit(&str); n 306 arch/um/drivers/ubd_kern.c if(n < 0){ n 310 arch/um/drivers/ubd_kern.c if(n >= MAX_DEV){ n 318 arch/um/drivers/ubd_kern.c ubd_dev = &ubd_devs[n]; n 325 arch/um/drivers/ubd_kern.c *index_out = n; n 457 arch/um/drivers/ubd_kern.c int n = 0; n 465 arch/um/drivers/ubd_kern.c n = *remainder_size; n 475 arch/um/drivers/ubd_kern.c n += res; n 476 arch/um/drivers/ubd_kern.c if ((n % sizeof(struct io_thread_req *)) > 0) { n 482 arch/um/drivers/ubd_kern.c *remainder_size = n % sizeof(struct io_thread_req *); n 487 arch/um/drivers/ubd_kern.c (n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *), n 490 arch/um/drivers/ubd_kern.c n = n - *remainder_size; n 493 arch/um/drivers/ubd_kern.c n = res; n 495 arch/um/drivers/ubd_kern.c return n; n 501 arch/um/drivers/ubd_kern.c int n; n 505 arch/um/drivers/ubd_kern.c n = bulk_req_safe_read( n 512 arch/um/drivers/ubd_kern.c if (n < 0) { n 513 arch/um/drivers/ubd_kern.c if(n == -EAGAIN) n 516 arch/um/drivers/ubd_kern.c "err = %d\n", -n); n 519 arch/um/drivers/ubd_kern.c for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { n 905 arch/um/drivers/ubd_kern.c #define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE)) n 911 arch/um/drivers/ubd_kern.c static int ubd_add(int n, char **error_out) n 913 arch/um/drivers/ubd_kern.c struct ubd *ubd_dev = &ubd_devs[n]; n 948 arch/um/drivers/ubd_kern.c err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); n 955 arch/um/drivers/ubd_kern.c ubd_disk_register(fake_major, ubd_dev->size, n, n 956 arch/um/drivers/ubd_kern.c &fake_gendisk[n]); n 963 arch/um/drivers/ubd_kern.c make_ide_entries(ubd_gendisk[n]->disk_name); n 978 arch/um/drivers/ubd_kern.c int n, ret; n 990 arch/um/drivers/ubd_kern.c ret = ubd_setup_common(str, &n, error_out); n 994 arch/um/drivers/ubd_kern.c if (n == -1) { n 1000 arch/um/drivers/ubd_kern.c ret = ubd_add(n, error_out); n 1002 arch/um/drivers/ubd_kern.c ubd_devs[n].file = NULL; n 1016 arch/um/drivers/ubd_kern.c int n, len = 0; n 1018 arch/um/drivers/ubd_kern.c n = parse_unit(&name); n 1019 arch/um/drivers/ubd_kern.c if((n >= MAX_DEV) || (n < 0)){ n 1024 arch/um/drivers/ubd_kern.c ubd_dev = &ubd_devs[n]; n 1047 arch/um/drivers/ubd_kern.c int n; n 1049 arch/um/drivers/ubd_kern.c n = parse_unit(str); n 1052 arch/um/drivers/ubd_kern.c return n; n 1055 arch/um/drivers/ubd_kern.c static int ubd_remove(int n, char **error_out) n 1057 arch/um/drivers/ubd_kern.c struct gendisk *disk = ubd_gendisk[n]; n 1063 arch/um/drivers/ubd_kern.c ubd_dev = &ubd_devs[n]; n 1073 arch/um/drivers/ubd_kern.c ubd_gendisk[n] = NULL; n 1079 arch/um/drivers/ubd_kern.c if(fake_gendisk[n] != NULL){ n 1080 arch/um/drivers/ubd_kern.c del_gendisk(fake_gendisk[n]); n 1081 arch/um/drivers/ubd_kern.c put_disk(fake_gendisk[n]); n 1082 arch/um/drivers/ubd_kern.c fake_gendisk[n] = NULL; n 1487 arch/um/drivers/ubd_kern.c int n; n 1492 arch/um/drivers/ubd_kern.c n = os_pwrite_file(req->fds[1], &req->bitmap_words, n 1494 arch/um/drivers/ubd_kern.c if (n != sizeof(req->bitmap_words)) n 1495 arch/um/drivers/ubd_kern.c return map_error(-n); n 1504 arch/um/drivers/ubd_kern.c int n, nsectors, start, end, bit; n 1533 arch/um/drivers/ubd_kern.c n = 0; n 1535 arch/um/drivers/ubd_kern.c buf = &buf[n]; n 1536 arch/um/drivers/ubd_kern.c len -= n; n 1537 arch/um/drivers/ubd_kern.c n = os_pread_file(req->fds[bit], buf, len, off); n 1538 arch/um/drivers/ubd_kern.c if (n < 0) { n 1539 arch/um/drivers/ubd_kern.c req->error = map_error(-n); n 1542 arch/um/drivers/ubd_kern.c } while((n < len) && (n != 0)); n 1543 arch/um/drivers/ubd_kern.c if (n < len) memset(&buf[n], 0, len - n); n 1546 arch/um/drivers/ubd_kern.c n = os_pwrite_file(req->fds[bit], buf, len, off); n 1547 arch/um/drivers/ubd_kern.c if(n != len){ n 1548 arch/um/drivers/ubd_kern.c req->error = map_error(-n); n 1554 arch/um/drivers/ubd_kern.c n = os_falloc_punch(req->fds[bit], off, len); n 1555 arch/um/drivers/ubd_kern.c if (n) { n 1556 arch/um/drivers/ubd_kern.c req->error = map_error(-n); n 1582 arch/um/drivers/ubd_kern.c int n, count, written, res; n 1587 arch/um/drivers/ubd_kern.c n = bulk_req_safe_read( n 1594 arch/um/drivers/ubd_kern.c if (n < 0) { n 1595 arch/um/drivers/ubd_kern.c if (n == -EAGAIN) { n 1601 arch/um/drivers/ubd_kern.c for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { n 1611 arch/um/drivers/ubd_kern.c n - written); n 1615 arch/um/drivers/ubd_kern.c if (written < n) { n 1618 arch/um/drivers/ubd_kern.c } while (written < n); n 68 arch/um/drivers/vector_kern.c static void vector_eth_configure(int n, struct arglist *def); n 674 arch/um/drivers/vector_kern.c static struct vector_device *find_device(int n) n 682 arch/um/drivers/vector_kern.c if (device->unit == n) n 694 arch/um/drivers/vector_kern.c int n, len, err; n 707 arch/um/drivers/vector_kern.c err = kstrtouint(start, 0, &n); n 714 arch/um/drivers/vector_kern.c if (find_device(n)) { n 719 arch/um/drivers/vector_kern.c *index_out = n; n 726 arch/um/drivers/vector_kern.c int err, n; n 730 arch/um/drivers/vector_kern.c err = vector_parse(str, &n, ¶ms, error_out); n 752 arch/um/drivers/vector_kern.c vector_eth_configure(n, parsed); n 759 arch/um/drivers/vector_kern.c int n; n 761 arch/um/drivers/vector_kern.c n = simple_strtoul(*str, &end, 0); n 765 arch/um/drivers/vector_kern.c *start_out = n; n 766 arch/um/drivers/vector_kern.c *end_out = n; n 768 arch/um/drivers/vector_kern.c return n; n 771 arch/um/drivers/vector_kern.c static int vector_remove(int n, char **error_out) n 777 arch/um/drivers/vector_kern.c vec_d = find_device(n); n 1455 arch/um/drivers/vector_kern.c int n, n 1473 arch/um/drivers/vector_kern.c "net_device for vec%d\n", n); n 1480 arch/um/drivers/vector_kern.c device->unit = n; n 1486 arch/um/drivers/vector_kern.c snprintf(dev->name, sizeof(dev->name), "vec%d", n); n 1495 arch/um/drivers/vector_kern.c device->pdev.id = n; n 1509 arch/um/drivers/vector_kern.c .unit = n, n 1599 arch/um/drivers/vector_kern.c int n, err; n 1602 arch/um/drivers/vector_kern.c err = vector_parse(str, &n, &str, &error); n 1613 arch/um/drivers/vector_kern.c new->unit = n; n 591 arch/um/drivers/vector_user.c int n; n 593 arch/um/drivers/vector_user.c CATCH_EINTR(n = sendmsg(fd, (struct msghdr *) hdr, flags)); n 594 arch/um/drivers/vector_user.c if ((n < 0) && (errno == EAGAIN)) n 596 arch/um/drivers/vector_user.c if (n >= 0) n 597 arch/um/drivers/vector_user.c return n; n 604 arch/um/drivers/vector_user.c int n; n 607 arch/um/drivers/vector_user.c CATCH_EINTR(n = readv(fd, msg->msg_iov, msg->msg_iovlen)); n 608 arch/um/drivers/vector_user.c if ((n < 0) && (errno == EAGAIN)) n 610 arch/um/drivers/vector_user.c if (n >= 0) n 611 arch/um/drivers/vector_user.c return n; n 618 arch/um/drivers/vector_user.c int n; n 620 arch/um/drivers/vector_user.c CATCH_EINTR(n = writev(fd, (struct iovec *) hdr, iovcount)); n 621 arch/um/drivers/vector_user.c if ((n < 0) && ((errno == EAGAIN) || (errno == ENOBUFS))) n 623 arch/um/drivers/vector_user.c if (n >= 0) n 624 arch/um/drivers/vector_user.c return n; n 635 arch/um/drivers/vector_user.c int n; n 637 arch/um/drivers/vector_user.c CATCH_EINTR(n = sendmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags)); n 638 arch/um/drivers/vector_user.c if ((n < 0) && ((errno == EAGAIN) || (errno == ENOBUFS))) n 640 arch/um/drivers/vector_user.c if (n >= 0) n 641 arch/um/drivers/vector_user.c return n; n 652 arch/um/drivers/vector_user.c int n; n 655 arch/um/drivers/vector_user.c n = recvmmsg(fd, (struct mmsghdr *) msgvec, vlen, flags, 0)); n 656 arch/um/drivers/vector_user.c if ((n < 0) && (errno == EAGAIN)) n 658 arch/um/drivers/vector_user.c if (n >= 0) n 659 arch/um/drivers/vector_user.c return n; n 667 arch/um/drivers/virtio_uml.c const uint64_t n = 1; n 671 arch/um/drivers/virtio_uml.c rc = os_write_file(info->kick_fd, &n, sizeof(n)); n 673 arch/um/drivers/virtio_uml.c return !WARN(rc != sizeof(n), "write returned %d\n", rc); n 680 arch/um/drivers/virtio_uml.c uint64_t n; n 685 arch/um/drivers/virtio_uml.c rc = os_read_file(info->call_fd, &n, sizeof(n)); n 686 arch/um/drivers/virtio_uml.c if (rc == sizeof(n)) n 688 arch/um/drivers/virtio_uml.c } while (rc == sizeof(n) || rc == -EINTR); n 747 arch/um/drivers/virtio_uml.c struct virtqueue *vq, *n; n 757 arch/um/drivers/virtio_uml.c list_for_each_entry_safe(vq, n, &vdev->vqs, list) n 24 arch/um/include/asm/uaccess.h extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); n 25 arch/um/include/asm/uaccess.h extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); n 16 arch/um/include/shared/longjmp.h int n; \ n 19 arch/um/include/shared/longjmp.h n = setjmp(*buf); \ n 20 arch/um/include/shared/longjmp.h if(n != 0) \ n 22 arch/um/include/shared/longjmp.h n; }) n 47 arch/um/include/shared/mem_user.h #define ROUND_4M(n) ((((unsigned long) (n)) + (1 << 22)) & ~((1 << 22) - 1)) n 250 arch/um/include/shared/os.h extern void um_early_printk(const char *s, unsigned int n); n 11 arch/um/kernel/early_printk.c static void early_console_write(struct console *con, const char *s, unsigned int n) n 13 arch/um/kernel/early_printk.c um_early_printk(s, n); n 65 arch/um/kernel/initrd.c int fd, n; n 73 arch/um/kernel/initrd.c n = os_read_file(fd, buf, size); n 74 arch/um/kernel/initrd.c if (n != size) { n 77 arch/um/kernel/initrd.c filename, -n); n 68 arch/um/kernel/irq.c int n, i, j; n 77 arch/um/kernel/irq.c n = os_waiting_for_events_epoll(); n 79 arch/um/kernel/irq.c if (n <= 0) { n 80 arch/um/kernel/irq.c if (n == -EINTR) n 86 arch/um/kernel/irq.c for (i = 0; i < n ; i++) { n 120 arch/um/kernel/process.c int (*fn)(void *), n; n 133 arch/um/kernel/process.c n = fn(arg); n 64 arch/um/kernel/skas/uaccess.c int n; n 79 arch/um/kernel/skas/uaccess.c n = (*op)(addr, len, arg); n 87 arch/um/kernel/skas/uaccess.c return n; n 93 arch/um/kernel/skas/uaccess.c long size, remain, n; n 98 arch/um/kernel/skas/uaccess.c n = do_op_one_page(addr, size, is_write, op, arg); n 99 arch/um/kernel/skas/uaccess.c if (n != 0) { n 100 arch/um/kernel/skas/uaccess.c remain = (n < 0 ? remain : 0); n 110 arch/um/kernel/skas/uaccess.c n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); n 111 arch/um/kernel/skas/uaccess.c if (n != 0) { n 112 arch/um/kernel/skas/uaccess.c remain = (n < 0 ? remain : 0); n 122 arch/um/kernel/skas/uaccess.c n = do_op_one_page(addr, remain, is_write, op, arg); n 123 arch/um/kernel/skas/uaccess.c if (n != 0) { n 124 arch/um/kernel/skas/uaccess.c remain = (n < 0 ? remain : 0); n 142 arch/um/kernel/skas/uaccess.c unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 145 arch/um/kernel/skas/uaccess.c memcpy(to, (__force void*)from, n); n 149 arch/um/kernel/skas/uaccess.c return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to); n 162 arch/um/kernel/skas/uaccess.c unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 165 arch/um/kernel/skas/uaccess.c memcpy((__force void *) to, from, n); n 169 arch/um/kernel/skas/uaccess.c return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from); n 176 arch/um/kernel/skas/uaccess.c int n; n 179 arch/um/kernel/skas/uaccess.c n = strnlen(to, len); n 180 arch/um/kernel/skas/uaccess.c *to_ptr += n; n 182 arch/um/kernel/skas/uaccess.c if (n < len) n 189 arch/um/kernel/skas/uaccess.c long n; n 197 arch/um/kernel/skas/uaccess.c n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user, n 199 arch/um/kernel/skas/uaccess.c if (n != 0) n 224 arch/um/kernel/skas/uaccess.c int *len_ptr = arg, n; n 226 arch/um/kernel/skas/uaccess.c n = strnlen((void *) str, len); n 227 arch/um/kernel/skas/uaccess.c *len_ptr += n; n 229 arch/um/kernel/skas/uaccess.c if (n < len) n 236 arch/um/kernel/skas/uaccess.c int count = 0, n; n 241 arch/um/kernel/skas/uaccess.c n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); n 242 arch/um/kernel/skas/uaccess.c if (n == 0) n 219 arch/um/kernel/tlb.c #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1)) n 190 arch/um/kernel/um_arch.c size_t n; n 192 arch/um/kernel/um_arch.c n = strlen(p->str); n 193 arch/um/kernel/um_arch.c if (!strncmp(line, p->str, n) && p->setup_func(line + n, add)) n 41 arch/um/os-Linux/drivers/ethertap_user.c int n; n 46 arch/um/os-Linux/drivers/ethertap_user.c CATCH_EINTR(n = write(fd, &change, sizeof(change))); n 47 arch/um/os-Linux/drivers/ethertap_user.c if (n != sizeof(change)) { n 95 arch/um/os-Linux/drivers/ethertap_user.c int pid, err, n; n 123 arch/um/os-Linux/drivers/ethertap_user.c CATCH_EINTR(n = read(control_me, &c, sizeof(c))); n 124 arch/um/os-Linux/drivers/ethertap_user.c if (n != sizeof(c)) { n 73 arch/um/os-Linux/drivers/tuntap_user.c int pid, n, err; n 101 arch/um/os-Linux/drivers/tuntap_user.c n = recvmsg(me, &msg, 0); n 102 arch/um/os-Linux/drivers/tuntap_user.c *used_out = n; n 103 arch/um/os-Linux/drivers/tuntap_user.c if (n < 0) { n 263 arch/um/os-Linux/file.c int n = read(fd, buf, len); n 265 arch/um/os-Linux/file.c if (n < 0) n 267 arch/um/os-Linux/file.c return n; n 272 arch/um/os-Linux/file.c int n = pread(fd, buf, len, offset); n 274 arch/um/os-Linux/file.c if (n < 0) n 276 arch/um/os-Linux/file.c return n; n 281 arch/um/os-Linux/file.c int n = write(fd, (void *) buf, len); n 283 arch/um/os-Linux/file.c if (n < 0) n 285 arch/um/os-Linux/file.c return n; n 290 arch/um/os-Linux/file.c int n = fsync(fd); n 292 arch/um/os-Linux/file.c if (n < 0) n 294 arch/um/os-Linux/file.c return n; n 299 arch/um/os-Linux/file.c int n = pwrite(fd, (void *) buf, len, offset); n 301 arch/um/os-Linux/file.c if (n < 0) n 303 arch/um/os-Linux/file.c return n; n 504 arch/um/os-Linux/file.c int new, n; n 520 arch/um/os-Linux/file.c n = recvmsg(fd, &msg, 0); n 521 arch/um/os-Linux/file.c if (n < 0) n 523 arch/um/os-Linux/file.c else if (n != iov.iov_len) n 618 arch/um/os-Linux/file.c int n = fallocate(fd, FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE, offset, len); n 620 arch/um/os-Linux/file.c if (n < 0) n 622 arch/um/os-Linux/file.c return n; n 46 arch/um/os-Linux/helper.c int pid, fds[2], ret, n; n 89 arch/um/os-Linux/helper.c n = read(fds[0], &ret, sizeof(ret)); n 90 arch/um/os-Linux/helper.c if (n == 0) { n 93 arch/um/os-Linux/helper.c if (n < 0) { n 94 arch/um/os-Linux/helper.c n = -errno; n 96 arch/um/os-Linux/helper.c "ret = %d\n", -n); n 97 arch/um/os-Linux/helper.c ret = n; n 71 arch/um/os-Linux/irq.c int n, err; n 73 arch/um/os-Linux/irq.c n = epoll_wait(epollfd, n 75 arch/um/os-Linux/irq.c if (n < 0) { n 80 arch/um/os-Linux/irq.c " epoll returned %d, error = %s\n", n, n 85 arch/um/os-Linux/irq.c return n; n 208 arch/um/os-Linux/main.c void *__wrap_calloc(int n, int size) n 210 arch/um/os-Linux/main.c void *ptr = __wrap_malloc(n * size); n 214 arch/um/os-Linux/main.c memset(ptr, 0, n * size); n 63 arch/um/os-Linux/process.c int parent = FAILURE_PID, n, fd; n 76 arch/um/os-Linux/process.c CATCH_EINTR(n = read(fd, data, sizeof(data))); n 79 arch/um/os-Linux/process.c if (n < 0) { n 86 arch/um/os-Linux/process.c n = sscanf(data, "%*d " COMM_SCANF " %*c %d", &parent); n 87 arch/um/os-Linux/process.c if (n != 1) n 55 arch/um/os-Linux/sigio.c int i, n, respond_fd; n 61 arch/um/os-Linux/sigio.c n = poll(fds->poll, fds->used, -1); n 62 arch/um/os-Linux/sigio.c if (n < 0) { n 66 arch/um/os-Linux/sigio.c "%d, errno = %d\n", n, errno); n 73 arch/um/os-Linux/sigio.c CATCH_EINTR(n = read(sigio_private[1], &c, n 75 arch/um/os-Linux/sigio.c if (n != sizeof(c)) n 92 arch/um/os-Linux/sigio.c CATCH_EINTR(n = write(respond_fd, &c, sizeof(c))); n 93 arch/um/os-Linux/sigio.c if (n != sizeof(c)) n 103 arch/um/os-Linux/sigio.c static int need_poll(struct pollfds *polls, int n) n 107 arch/um/os-Linux/sigio.c if (n <= polls->size) n 110 arch/um/os-Linux/sigio.c new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC); n 121 arch/um/os-Linux/sigio.c polls->size = n; n 132 arch/um/os-Linux/sigio.c int n; n 136 arch/um/os-Linux/sigio.c CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c))); n 137 arch/um/os-Linux/sigio.c if (n != sizeof(c)) { n 143 arch/um/os-Linux/sigio.c CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c))); n 144 arch/um/os-Linux/sigio.c if (n != sizeof(c)) { n 170 arch/um/os-Linux/sigio.c int err = 0, i, n; n 187 arch/um/os-Linux/sigio.c n = current_poll.used; n 188 arch/um/os-Linux/sigio.c err = need_poll(&next_poll, n + 1); n 194 arch/um/os-Linux/sigio.c next_poll.poll[n] = *p; n 195 arch/um/os-Linux/sigio.c next_poll.used = n + 1; n 205 arch/um/os-Linux/sigio.c int err = 0, i, n = 0; n 230 arch/um/os-Linux/sigio.c next_poll.poll[n++] = *p; n 493 arch/um/os-Linux/sigio.c int n; n 504 arch/um/os-Linux/sigio.c while (((n = read(slave, buf, sizeof(buf))) > 0) && n 511 arch/um/os-Linux/sigio.c } else if (n == -EAGAIN) n 514 arch/um/os-Linux/sigio.c printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n); n 50 arch/um/os-Linux/skas/mem.c int n, i; n 56 arch/um/os-Linux/skas/mem.c n = ptrace_setregs(pid, syscall_regs); n 57 arch/um/os-Linux/skas/mem.c if (n < 0) { n 62 arch/um/os-Linux/skas/mem.c -n); n 92 arch/um/os-Linux/skas/mem.c for (n = 1; n < data[0]/sizeof(long); n++) { n 93 arch/um/os-Linux/skas/mem.c if (n == 1) n 96 arch/um/os-Linux/skas/mem.c if (n % 4 == 1) n 98 arch/um/os-Linux/skas/mem.c printk(" 0x%lx", data[n]); n 100 arch/um/os-Linux/skas/mem.c if (n > 1) n 57 arch/um/os-Linux/skas/process.c int n, status, err; n 60 arch/um/os-Linux/skas/process.c CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); n 61 arch/um/os-Linux/skas/process.c if ((n < 0) || !WIFSTOPPED(status)) n 84 arch/um/os-Linux/skas/process.c "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, n 268 arch/um/os-Linux/skas/process.c int pid, status, n, flags, err; n 296 arch/um/os-Linux/skas/process.c CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); n 297 arch/um/os-Linux/skas/process.c if (n < 0) { n 578 arch/um/os-Linux/skas/process.c int n; n 590 arch/um/os-Linux/skas/process.c n = setjmp(initial_jmpbuf); n 591 arch/um/os-Linux/skas/process.c switch (n) { n 609 arch/um/os-Linux/skas/process.c "start_idle_thread - %d\n", n); n 95 arch/um/os-Linux/start_up.c int pid, n, status; n 105 arch/um/os-Linux/start_up.c CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); n 106 arch/um/os-Linux/start_up.c if (n < 0) n 123 arch/um/os-Linux/start_up.c int status, n, ret = 0; n 129 arch/um/os-Linux/start_up.c CATCH_EINTR(n = waitpid(pid, &status, 0)); n 167 arch/um/os-Linux/start_up.c int pid, n, status, count=0; n 176 arch/um/os-Linux/start_up.c CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); n 177 arch/um/os-Linux/start_up.c if (n < 0) n 191 arch/um/os-Linux/start_up.c n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); n 192 arch/um/os-Linux/start_up.c if (n < 0) { n 216 arch/um/os-Linux/start_up.c CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); n 217 arch/um/os-Linux/start_up.c if (n < 0) n 227 arch/um/os-Linux/start_up.c n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, n 229 arch/um/os-Linux/start_up.c if (n < 0) n 261 arch/um/os-Linux/start_up.c int pid, syscall, n, status; n 274 arch/um/os-Linux/start_up.c CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); n 275 arch/um/os-Linux/start_up.c if (n < 0) n 286 arch/um/os-Linux/start_up.c n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET, n 288 arch/um/os-Linux/start_up.c if (n < 0) n 139 arch/um/os-Linux/umid.c int dead, fd, p, n, err; n 148 arch/um/os-Linux/umid.c n = snprintf(file, filelen, "%s/pid", dir); n 149 arch/um/os-Linux/umid.c if (n >= filelen) { n 167 arch/um/os-Linux/umid.c n = read(fd, pid, sizeof(pid)); n 168 arch/um/os-Linux/umid.c if (n < 0) { n 172 arch/um/os-Linux/umid.c } else if (n == 0) { n 221 arch/um/os-Linux/umid.c int fd, n; n 238 arch/um/os-Linux/umid.c n = write(fd, pid, strlen(pid)); n 239 arch/um/os-Linux/umid.c if (n != strlen(pid)) n 346 arch/um/os-Linux/umid.c int n, err; n 352 arch/um/os-Linux/umid.c n = snprintf(buf, len, "%s%s/%s", uml_dir, umid, name); n 353 arch/um/os-Linux/umid.c if (n >= len) { n 152 arch/um/os-Linux/util.c void um_early_printk(const char *s, unsigned int n) n 154 arch/um/os-Linux/util.c printf("%.*s", n, s); n 40 arch/unicore32/boot/compressed/misc.c void *memcpy(void *dest, const void *src, size_t n) n 45 arch/unicore32/boot/compressed/misc.c for (i = n >> 3; i > 0; i--) { n 56 arch/unicore32/boot/compressed/misc.c if (n & 1 << 2) { n 63 arch/unicore32/boot/compressed/misc.c if (n & 1 << 1) { n 68 arch/unicore32/boot/compressed/misc.c if (n & 1) n 50 arch/unicore32/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 52 arch/unicore32/include/asm/cmpxchg.h (unsigned long)(o), (unsigned long)(n), sizeof(*(ptr)))) n 53 arch/unicore32/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) \ n 54 arch/unicore32/include/asm/cmpxchg.h __cmpxchg64_local_generic((ptr), (o), (n)) n 42 arch/unicore32/include/asm/delay.h #define udelay(n) \ n 43 arch/unicore32/include/asm/delay.h (__builtin_constant_p(n) ? \ n 44 arch/unicore32/include/asm/delay.h ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ n 45 arch/unicore32/include/asm/delay.h __const_udelay((n) * ((2199023U*HZ)>>11))) : \ n 46 arch/unicore32/include/asm/delay.h __udelay(n)) n 24 arch/unicore32/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n); n 26 arch/unicore32/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n); n 28 arch/unicore32/include/asm/uaccess.h __clear_user(void __user *addr, unsigned long n); n 32 arch/unicore32/include/asm/uaccess.h __strnlen_user(const char __user *s, long n); n 16 arch/unicore32/kernel/early_printk.c static void early_ocd_write(struct console *con, const char *s, unsigned n) n 18 arch/unicore32/kernel/early_printk.c while (*s && n-- > 0) { n 469 arch/unicore32/mm/alignment.c #define ASM_MTF(n) case n: \ n 470 arch/unicore32/mm/alignment.c __asm__ __volatile__("MTF %0, F" __stringify(n) \ n 485 arch/unicore32/mm/alignment.c #define ASM_MFF(n) case n: \ n 486 arch/unicore32/mm/alignment.c __asm__ __volatile__("MFF %0, F" __stringify(n) \ n 196 arch/x86/boot/boot.h static inline char *__get_heap(size_t s, size_t a, size_t n) n 202 arch/x86/boot/boot.h HEAP += s*n; n 205 arch/x86/boot/boot.h #define GET_HEAP(type, n) \ n 206 arch/x86/boot/boot.h ((type *)__get_heap(sizeof(type),__alignof__(type),(n))) n 208 arch/x86/boot/boot.h static inline bool heap_free(size_t n) n 210 arch/x86/boot/boot.h return (int)(heap_end-HEAP) >= (int)n; n 39 arch/x86/boot/compressed/misc.c #define memzero(s, n) memset((s), 0, (n)) n 43 arch/x86/boot/compressed/misc.c void *memmove(void *dest, const void *src, size_t n); n 14 arch/x86/boot/compressed/string.c static void *____memcpy(void *dest, const void *src, size_t n) n 22 arch/x86/boot/compressed/string.c : "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src) n 28 arch/x86/boot/compressed/string.c static void *____memcpy(void *dest, const void *src, size_t n) n 36 arch/x86/boot/compressed/string.c : "0" (n >> 3), "g" (n & 7), "1" (dest), "2" (src) n 43 arch/x86/boot/compressed/string.c void *memset(void *s, int c, size_t n) n 48 arch/x86/boot/compressed/string.c for (i = 0; i < n; i++) n 53 arch/x86/boot/compressed/string.c void *memmove(void *dest, const void *src, size_t n) n 58 arch/x86/boot/compressed/string.c if (d <= s || d - s >= n) n 59 arch/x86/boot/compressed/string.c return ____memcpy(dest, src, n); n 61 arch/x86/boot/compressed/string.c while (n-- > 0) n 62 arch/x86/boot/compressed/string.c d[n] = s[n]; n 68 arch/x86/boot/compressed/string.c void *memcpy(void *dest, const void *src, size_t n) n 70 arch/x86/boot/compressed/string.c if (dest > src && dest - src < n) { n 72 arch/x86/boot/compressed/string.c return memmove(dest, src, n); n 74 arch/x86/boot/compressed/string.c return ____memcpy(dest, src, n); n 78 arch/x86/boot/compressed/string.c extern void *__memset(void *s, int c, size_t n) __alias(memset); n 79 arch/x86/boot/compressed/string.c extern void *__memmove(void *dest, const void *src, size_t n) __alias(memmove); n 80 arch/x86/boot/compressed/string.c extern void *__memcpy(void *dest, const void *src, size_t n) __alias(memcpy); n 35 arch/x86/boot/printf.c #define __do_div(n, base) ({ \ n 37 arch/x86/boot/printf.c __res = ((unsigned long) n) % (unsigned) base; \ n 38 arch/x86/boot/printf.c n = ((unsigned long) n) / (unsigned) base; \ n 87 arch/x86/crypto/ghash-clmulni-intel_glue.c int n = min(srclen, dctx->bytes); n 90 arch/x86/crypto/ghash-clmulni-intel_glue.c dctx->bytes -= n; n 91 arch/x86/crypto/ghash-clmulni-intel_glue.c srclen -= n; n 93 arch/x86/crypto/ghash-clmulni-intel_glue.c while (n--) n 32 arch/x86/crypto/nhpoly1305-avx2-glue.c unsigned int n = min_t(unsigned int, srclen, SZ_4K); n 35 arch/x86/crypto/nhpoly1305-avx2-glue.c crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); n 37 arch/x86/crypto/nhpoly1305-avx2-glue.c src += n; n 38 arch/x86/crypto/nhpoly1305-avx2-glue.c srclen -= n; n 32 arch/x86/crypto/nhpoly1305-sse2-glue.c unsigned int n = min_t(unsigned int, srclen, SZ_4K); n 35 arch/x86/crypto/nhpoly1305-sse2-glue.c crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); n 37 arch/x86/crypto/nhpoly1305-sse2-glue.c src += n; n 38 arch/x86/crypto/nhpoly1305-sse2-glue.c srclen -= n; n 422 arch/x86/events/amd/uncore.c struct hlist_node *n; n 424 arch/x86/events/amd/uncore.c hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) { n 853 arch/x86/events/core.c int perf_assign_events(struct event_constraint **constraints, int n, n 858 arch/x86/events/core.c perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax); n 871 arch/x86/events/core.c int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) n 895 arch/x86/events/core.c for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { n 921 arch/x86/events/core.c for (i = 0; i < n; i++) { n 943 arch/x86/events/core.c if (i != n) { n 960 arch/x86/events/core.c unsched = perf_assign_events(cpuc->event_constraint, n, wmin, n 975 arch/x86/events/core.c for (i = 0; i < n; i++) { n 981 arch/x86/events/core.c for (i = n0; i < n; i++) { n 1007 arch/x86/events/core.c int n, max_count; n 1012 arch/x86/events/core.c n = cpuc->n_events; n 1036 arch/x86/events/core.c if (n >= max_count) n 1038 arch/x86/events/core.c cpuc->event_list[n] = leader; n 1039 arch/x86/events/core.c n++; n 1042 arch/x86/events/core.c return n; n 1049 arch/x86/events/core.c if (n >= max_count) n 1052 arch/x86/events/core.c cpuc->event_list[n] = event; n 1053 arch/x86/events/core.c n++; n 1055 arch/x86/events/core.c return n; n 1272 arch/x86/events/core.c int n, n0, ret; n 1277 arch/x86/events/core.c ret = n = collect_events(cpuc, event, false); n 1296 arch/x86/events/core.c ret = x86_pmu.schedule_events(cpuc, n, assign); n 1303 arch/x86/events/core.c memcpy(cpuc->assign, assign, n*sizeof(int)); n 1310 arch/x86/events/core.c cpuc->n_events = n; n 1311 arch/x86/events/core.c cpuc->n_added += n - n0; n 1312 arch/x86/events/core.c cpuc->n_txn += n - n0; n 1946 arch/x86/events/core.c int n, ret; n 1955 arch/x86/events/core.c n = cpuc->n_events; n 1960 arch/x86/events/core.c ret = x86_pmu.schedule_events(cpuc, n, assign); n 1968 arch/x86/events/core.c memcpy(cpuc->assign, assign, n*sizeof(int)); n 2048 arch/x86/events/core.c int ret = -EINVAL, n; n 2059 arch/x86/events/core.c n = collect_events(fake_cpuc, leader, true); n 2060 arch/x86/events/core.c if (n < 0) n 2063 arch/x86/events/core.c fake_cpuc->n_events = n; n 2064 arch/x86/events/core.c n = collect_events(fake_cpuc, event, false); n 2065 arch/x86/events/core.c if (n < 0) n 2069 arch/x86/events/core.c ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); n 1312 arch/x86/events/intel/ds.c static inline u64 get_pebs_status(void *n) n 1315 arch/x86/events/intel/ds.c return ((struct pebs_record_nhm *)n)->status; n 1316 arch/x86/events/intel/ds.c return ((struct pebs_basic *)n)->applicable_counters; n 1778 arch/x86/events/intel/ds.c int n; n 1799 arch/x86/events/intel/ds.c n = top - at; n 1800 arch/x86/events/intel/ds.c if (n <= 0) { n 1806 arch/x86/events/intel/ds.c __intel_pmu_pebs_event(event, iregs, at, top, 0, n, n 1206 arch/x86/events/intel/p4.c static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) n 1221 arch/x86/events/intel/p4.c for (i = 0, num = n; i < n; i++, num--) { n 357 arch/x86/events/intel/uncore.c int n, max_count; n 366 arch/x86/events/intel/uncore.c n = box->n_events; n 369 arch/x86/events/intel/uncore.c box->event_list[n] = leader; n 370 arch/x86/events/intel/uncore.c n++; n 374 arch/x86/events/intel/uncore.c return n; n 381 arch/x86/events/intel/uncore.c if (n >= max_count) n 384 arch/x86/events/intel/uncore.c box->event_list[n] = event; n 385 arch/x86/events/intel/uncore.c n++; n 387 arch/x86/events/intel/uncore.c return n; n 422 arch/x86/events/intel/uncore.c static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) n 431 arch/x86/events/intel/uncore.c for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { n 439 arch/x86/events/intel/uncore.c for (i = 0; i < n; i++) { n 460 arch/x86/events/intel/uncore.c if (i != n) n 461 arch/x86/events/intel/uncore.c ret = perf_assign_events(box->event_constraint, n, n 462 arch/x86/events/intel/uncore.c wmin, wmax, n, assign); n 465 arch/x86/events/intel/uncore.c for (i = 0; i < n; i++) n 549 arch/x86/events/intel/uncore.c int i, n, ret; n 565 arch/x86/events/intel/uncore.c ret = n = uncore_collect_events(box, event, false); n 573 arch/x86/events/intel/uncore.c ret = uncore_assign_events(box, assign, n); n 596 arch/x86/events/intel/uncore.c for (i = 0; i < n; i++) { n 611 arch/x86/events/intel/uncore.c box->n_events = n; n 662 arch/x86/events/intel/uncore.c int ret = -EINVAL, n; n 679 arch/x86/events/intel/uncore.c n = uncore_collect_events(fake_box, leader, true); n 680 arch/x86/events/intel/uncore.c if (n < 0) n 683 arch/x86/events/intel/uncore.c fake_box->n_events = n; n 684 arch/x86/events/intel/uncore.c n = uncore_collect_events(fake_box, event, false); n 685 arch/x86/events/intel/uncore.c if (n < 0) n 688 arch/x86/events/intel/uncore.c fake_box->n_events = n; n 690 arch/x86/events/intel/uncore.c ret = uncore_assign_events(fake_box, NULL, n); n 34 arch/x86/events/intel/uncore.h #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) n 122 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) n 125 arch/x86/events/intel/uncore_nhmex.c #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) n 152 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ n 153 arch/x86/events/intel/uncore_nhmex.c ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) n 154 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) n 155 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) n 156 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ n 157 arch/x86/events/intel/uncore_nhmex.c (((n) < 4 ? 0 : 0x10) + (n) * 4) n 158 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ n 159 arch/x86/events/intel/uncore_nhmex.c (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) n 160 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ n 161 arch/x86/events/intel/uncore_nhmex.c (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) n 162 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ n 163 arch/x86/events/intel/uncore_nhmex.c (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) n 164 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ n 165 arch/x86/events/intel/uncore_nhmex.c (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) n 166 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ n 167 arch/x86/events/intel/uncore_nhmex.c (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) n 168 arch/x86/events/intel/uncore_nhmex.c #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ n 169 arch/x86/events/intel/uncore_nhmex.c (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) n 187 arch/x86/events/intel/uncore_nhmex.c #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ n 188 arch/x86/events/intel/uncore_nhmex.c ((1ULL << (n)) - 1))) n 165 arch/x86/events/intel/uncore_snbep.c #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ n 166 arch/x86/events/intel/uncore_snbep.c ((1ULL << (n)) - 1))) n 280 arch/x86/events/perf_event.h #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ n 281 arch/x86/events/perf_event.h { .idxmsk64 = (n) }, \ n 290 arch/x86/events/perf_event.h #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ n 291 arch/x86/events/perf_event.h __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) n 293 arch/x86/events/perf_event.h #define EVENT_CONSTRAINT(c, n, m) \ n 294 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) n 300 arch/x86/events/perf_event.h #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ n 301 arch/x86/events/perf_event.h __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) n 303 arch/x86/events/perf_event.h #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ n 304 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ n 328 arch/x86/events/perf_event.h #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ n 329 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) n 334 arch/x86/events/perf_event.h #define INTEL_EVENT_CONSTRAINT(c, n) \ n 335 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) n 340 arch/x86/events/perf_event.h #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ n 341 arch/x86/events/perf_event.h EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) n 357 arch/x86/events/perf_event.h #define FIXED_EVENT_CONSTRAINT(c, n) \ n 358 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) n 363 arch/x86/events/perf_event.h #define INTEL_UEVENT_CONSTRAINT(c, n) \ n 364 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) n 367 arch/x86/events/perf_event.h #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ n 368 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) n 371 arch/x86/events/perf_event.h #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ n 372 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) n 374 arch/x86/events/perf_event.h #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ n 375 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ n 376 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_EXCL) n 378 arch/x86/events/perf_event.h #define INTEL_PLD_CONSTRAINT(c, n) \ n 379 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ n 380 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT) n 382 arch/x86/events/perf_event.h #define INTEL_PST_CONSTRAINT(c, n) \ n 383 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ n 384 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST) n 387 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ n 388 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) n 390 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ n 391 arch/x86/events/perf_event.h EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) n 394 arch/x86/events/perf_event.h #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ n 395 arch/x86/events/perf_event.h EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) n 398 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \ n 399 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 401 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) n 404 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \ n 405 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 407 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) n 409 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ n 410 arch/x86/events/perf_event.h __EVENT_CONSTRAINT_RANGE(code, end, n, \ n 412 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) n 414 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ n 415 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 417 arch/x86/events/perf_event.h HWEIGHT(n), 0, \ n 421 arch/x86/events/perf_event.h #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \ n 422 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 424 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) n 426 arch/x86/events/perf_event.h #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \ n 427 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 429 arch/x86/events/perf_event.h HWEIGHT(n), 0, \ n 433 arch/x86/events/perf_event.h #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \ n 434 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 436 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) n 438 arch/x86/events/perf_event.h #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \ n 439 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 441 arch/x86/events/perf_event.h HWEIGHT(n), 0, \ n 445 arch/x86/events/perf_event.h #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ n 446 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(code, n, \ n 448 arch/x86/events/perf_event.h HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) n 581 arch/x86/events/perf_event.h int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); n 847 arch/x86/events/perf_event.h int perf_assign_events(struct event_constraint **constraints, int n, n 849 arch/x86/events/perf_event.h int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); n 135 arch/x86/include/asm/apicdef.h #define APIC_EILVTn(n) (0x500 + 0x10 * n) n 74 arch/x86/include/asm/atomic64_32.h static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) n 76 arch/x86/include/asm/atomic64_32.h return arch_cmpxchg64(&v->counter, o, n); n 87 arch/x86/include/asm/atomic64_32.h static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n) n 90 arch/x86/include/asm/atomic64_32.h unsigned high = (unsigned)(n >> 32); n 91 arch/x86/include/asm/atomic64_32.h unsigned low = (unsigned)n; n 29 arch/x86/include/asm/bitops.h #define BIT_64(n) (U64_C(1) << (n)) n 39 arch/x86/include/asm/cmpxchg_32.h #define arch_cmpxchg64(ptr, o, n) \ n 41 arch/x86/include/asm/cmpxchg_32.h (unsigned long long)(n))) n 42 arch/x86/include/asm/cmpxchg_32.h #define arch_cmpxchg64_local(ptr, o, n) \ n 44 arch/x86/include/asm/cmpxchg_32.h (unsigned long long)(n))) n 79 arch/x86/include/asm/cmpxchg_32.h #define arch_cmpxchg64(ptr, o, n) \ n 83 arch/x86/include/asm/cmpxchg_32.h __typeof__(*(ptr)) __new = (n); \ n 96 arch/x86/include/asm/cmpxchg_32.h #define arch_cmpxchg64_local(ptr, o, n) \ n 100 arch/x86/include/asm/cmpxchg_32.h __typeof__(*(ptr)) __new = (n); \ n 10 arch/x86/include/asm/cmpxchg_64.h #define arch_cmpxchg64(ptr, o, n) \ n 13 arch/x86/include/asm/cmpxchg_64.h arch_cmpxchg((ptr), (o), (n)); \ n 16 arch/x86/include/asm/cmpxchg_64.h #define arch_cmpxchg64_local(ptr, o, n) \ n 19 arch/x86/include/asm/cmpxchg_64.h arch_cmpxchg_local((ptr), (o), (n)); \ n 389 arch/x86/include/asm/desc.h void update_intr_gate(unsigned int n, const void *addr); n 390 arch/x86/include/asm/desc.h void alloc_intr_gate(unsigned int n, const void *addr); n 22 arch/x86/include/asm/div64.h #define do_div(n, base) \ n 27 arch/x86/include/asm/div64.h __mod = n & (__base - 1); \ n 28 arch/x86/include/asm/div64.h n >>= ilog2(__base); \ n 30 arch/x86/include/asm/div64.h asm("" : "=a" (__low), "=d" (__high) : "A" (n));\ n 38 arch/x86/include/asm/div64.h asm("" : "=A" (n) : "a" (__low), "d" (__high)); \ n 17 arch/x86/include/asm/hpet.h #define HPET_Tn_CFG(n) (0x100 + 0x20 * n) n 18 arch/x86/include/asm/hpet.h #define HPET_Tn_CMP(n) (0x108 + 0x20 * n) n 19 arch/x86/include/asm/hpet.h #define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n) n 54 arch/x86/include/asm/irq_remapping.h arch_create_remap_msi_irq_domain(struct irq_domain *par, const char *n, int id); n 68 arch/x86/include/asm/kvm_page_track.h struct kvm_page_track_notifier_node *n); n 71 arch/x86/include/asm/kvm_page_track.h struct kvm_page_track_notifier_node *n); n 123 arch/x86/include/asm/local.h #define local_cmpxchg(l, o, n) \ n 124 arch/x86/include/asm/local.h (cmpxchg_local(&((l)->a.counter), (o), (n))) n 126 arch/x86/include/asm/local.h #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) n 126 arch/x86/include/asm/mpspec.h #define physids_shift_right(d, s, n) \ n 127 arch/x86/include/asm/mpspec.h bitmap_shift_right((d).mask, (s).mask, n, MAX_LOCAL_APIC) n 129 arch/x86/include/asm/mpspec.h #define physids_shift_left(d, s, n) \ n 130 arch/x86/include/asm/mpspec.h bitmap_shift_left((d).mask, (s).mask, n, MAX_LOCAL_APIC) n 48 arch/x86/include/asm/nmi.h #define register_nmi_handler(t, fn, fg, n, init...) \ n 52 arch/x86/include/asm/nmi.h .name = (n), \ n 256 arch/x86/include/asm/ptrace.h static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n) n 260 arch/x86/include/asm/ptrace.h addr += n; n 280 arch/x86/include/asm/ptrace.h unsigned int n) n 286 arch/x86/include/asm/ptrace.h addr = regs_get_kernel_stack_nth_addr(regs, n); n 307 arch/x86/include/asm/ptrace.h unsigned int n) n 326 arch/x86/include/asm/ptrace.h if (n >= NR_REG_ARGUMENTS) { n 327 arch/x86/include/asm/ptrace.h n -= NR_REG_ARGUMENTS - 1; n 328 arch/x86/include/asm/ptrace.h return regs_get_kernel_stack_nth(regs, n); n 330 arch/x86/include/asm/ptrace.h return regs_get_register(regs, argument_offs[n]); n 33 arch/x86/include/asm/string_32.h static __always_inline void *__memcpy(void *to, const void *from, size_t n) n 43 arch/x86/include/asm/string_32.h : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) n 53 arch/x86/include/asm/string_32.h size_t n) n 56 arch/x86/include/asm/string_32.h if (!n) n 59 arch/x86/include/asm/string_32.h switch (n) { n 89 arch/x86/include/asm/string_32.h if (n >= 5 * 4) { n 94 arch/x86/include/asm/string_32.h : "0" (n / 4), "1" (edi), "2" (esi) n 99 arch/x86/include/asm/string_32.h if (n >= 4 * 4) n 104 arch/x86/include/asm/string_32.h if (n >= 3 * 4) n 109 arch/x86/include/asm/string_32.h if (n >= 2 * 4) n 114 arch/x86/include/asm/string_32.h if (n >= 1 * 4) n 120 arch/x86/include/asm/string_32.h switch (n % 4) { n 171 arch/x86/include/asm/string_32.h #define memcpy(t, f, n) \ n 172 arch/x86/include/asm/string_32.h (__builtin_constant_p((n)) \ n 173 arch/x86/include/asm/string_32.h ? __constant_memcpy3d((t), (f), (n)) \ n 174 arch/x86/include/asm/string_32.h : __memcpy3d((t), (f), (n))) n 182 arch/x86/include/asm/string_32.h #define memcpy(t, f, n) __builtin_memcpy(t, f, n) n 188 arch/x86/include/asm/string_32.h void *memmove(void *dest, const void *src, size_t n); n 232 arch/x86/include/asm/string_32.h static inline void *memset16(uint16_t *s, uint16_t v, size_t n) n 238 arch/x86/include/asm/string_32.h : "a" (v), "1" (s), "0" (n) n 244 arch/x86/include/asm/string_32.h static inline void *memset32(uint32_t *s, uint32_t v, size_t n) n 250 arch/x86/include/asm/string_32.h : "a" (v), "1" (s), "0" (n) n 18 arch/x86/include/asm/string_64.h void *memset(void *s, int c, size_t n); n 19 arch/x86/include/asm/string_64.h void *__memset(void *s, int c, size_t n); n 22 arch/x86/include/asm/string_64.h static inline void *memset16(uint16_t *s, uint16_t v, size_t n) n 28 arch/x86/include/asm/string_64.h : "a" (v), "1" (s), "0" (n) n 34 arch/x86/include/asm/string_64.h static inline void *memset32(uint32_t *s, uint32_t v, size_t n) n 40 arch/x86/include/asm/string_64.h : "a" (v), "1" (s), "0" (n) n 46 arch/x86/include/asm/string_64.h static inline void *memset64(uint64_t *s, uint64_t v, size_t n) n 52 arch/x86/include/asm/string_64.h : "a" (v), "1" (s), "0" (n) n 77 arch/x86/include/asm/string_64.h #define memset(s, c, n) __memset(s, c, n) n 102 arch/x86/include/asm/syscall.h unsigned int i, unsigned int n, n 105 arch/x86/include/asm/syscall.h BUG_ON(i + n > 6); n 106 arch/x86/include/asm/syscall.h memcpy(®s->bx + i, args, n * sizeof(args[0])); n 578 arch/x86/include/asm/uaccess.h copy_from_user_nmi(void *to, const void __user *from, unsigned long n); n 582 arch/x86/include/asm/uaccess.h extern __must_check long strnlen_user(const char __user *str, long n); n 13 arch/x86/include/asm/uaccess_32.h (void *to, const void *from, unsigned long n); n 15 arch/x86/include/asm/uaccess_32.h (void *to, const void __user *from, unsigned long n); n 18 arch/x86/include/asm/uaccess_32.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 20 arch/x86/include/asm/uaccess_32.h return __copy_user_ll((__force void *)to, from, n); n 24 arch/x86/include/asm/uaccess_32.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 26 arch/x86/include/asm/uaccess_32.h if (__builtin_constant_p(n)) { n 29 arch/x86/include/asm/uaccess_32.h switch (n) { n 53 arch/x86/include/asm/uaccess_32.h return __copy_user_ll(to, (__force const void *)from, n); n 58 arch/x86/include/asm/uaccess_32.h unsigned long n) n 60 arch/x86/include/asm/uaccess_32.h return __copy_from_user_ll_nocache_nozero(to, from, n); n 345 arch/x86/include/asm/uv/uv_hub.h #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) n 2631 arch/x86/kernel/apic/io_apic.c unsigned long n; n 2639 arch/x86/kernel/apic/io_apic.c n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); n 2640 arch/x86/kernel/apic/io_apic.c n *= nr_ioapics; n 2642 arch/x86/kernel/apic/io_apic.c mem = memblock_alloc(n, SMP_CACHE_BYTES); n 2644 arch/x86/kernel/apic/io_apic.c panic("%s: Failed to allocate %lu bytes\n", __func__, n); n 832 arch/x86/kernel/apic/x2apic_uv_x.c int i, n, shift, m_io, max_io; n 845 arch/x86/kernel/apic/x2apic_uv_x.c n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH; n 856 arch/x86/kernel/apic/x2apic_uv_x.c n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH; n 870 arch/x86/kernel/apic/x2apic_uv_x.c for (i = 0; i < n; i++) { n 886 arch/x86/kernel/apic/x2apic_uv_x.c if (i != n-1) n 891 arch/x86/kernel/apic/x2apic_uv_x.c if (lnasid != -1 || (i == n-1 && nasid != -1)) { n 133 arch/x86/kernel/cpu/amd.c int n; n 144 arch/x86/kernel/cpu/amd.c n = K6_BUG_LOOP; n 148 arch/x86/kernel/cpu/amd.c while (n--) n 535 arch/x86/kernel/cpu/cacheinfo.c int n = 1; n 542 arch/x86/kernel/cpu/cacheinfo.c n += 2; n 544 arch/x86/kernel/cpu/cacheinfo.c n += 1; n 546 arch/x86/kernel/cpu/cacheinfo.c amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL); n 550 arch/x86/kernel/cpu/cacheinfo.c n = 0; n 552 arch/x86/kernel/cpu/cacheinfo.c amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr; n 553 arch/x86/kernel/cpu/cacheinfo.c amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr; n 556 arch/x86/kernel/cpu/cacheinfo.c amd_l3_attrs[n++] = &dev_attr_subcaches.attr; n 783 arch/x86/kernel/cpu/cacheinfo.c int j, n; n 792 arch/x86/kernel/cpu/cacheinfo.c n = cpuid_eax(2) & 0xFF; n 794 arch/x86/kernel/cpu/cacheinfo.c for (i = 0 ; i < n ; i++) { n 669 arch/x86/kernel/cpu/common.c unsigned int n, dummy, ebx, ecx, edx, l2size; n 671 arch/x86/kernel/cpu/common.c n = c->extended_cpuid_level; n 673 arch/x86/kernel/cpu/common.c if (n >= 0x80000005) { n 682 arch/x86/kernel/cpu/common.c if (n < 0x80000006) /* Some chips just has a large L1. */ n 86 arch/x86/kernel/cpu/hypervisor.c unsigned int i, n = size / sizeof(void *); n 90 arch/x86/kernel/cpu/hypervisor.c for (i = 0; i < n; i++) n 944 arch/x86/kernel/cpu/intel.c int i, j, n; n 952 arch/x86/kernel/cpu/intel.c n = cpuid_eax(2) & 0xFF; n 954 arch/x86/kernel/cpu/intel.c for (i = 0 ; i < n ; i++) { n 337 arch/x86/kernel/cpu/mce/inject.c int n; n 339 arch/x86/kernel/cpu/mce/inject.c n = sprintf(buf, "%s\n", flags_options[inj_type]); n 341 arch/x86/kernel/cpu/mce/inject.c return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); n 681 arch/x86/kernel/cpu/mtrr/mtrr.c #define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1)) n 754 arch/x86/kernel/cpu/resctrl/core.c #define RDT_OPT(idx, n, f) \ n 756 arch/x86/kernel/cpu/resctrl/core.c .name = n, \ n 684 arch/x86/kernel/e820.c struct e820_table *n; n 688 arch/x86/kernel/e820.c n = kmemdup(e820_table, size, GFP_KERNEL); n 689 arch/x86/kernel/e820.c BUG_ON(!n); n 690 arch/x86/kernel/e820.c e820_table = n; n 693 arch/x86/kernel/e820.c n = kmemdup(e820_table_kexec, size, GFP_KERNEL); n 694 arch/x86/kernel/e820.c BUG_ON(!n); n 695 arch/x86/kernel/e820.c e820_table_kexec = n; n 698 arch/x86/kernel/e820.c n = kmemdup(e820_table_firmware, size, GFP_KERNEL); n 699 arch/x86/kernel/e820.c BUG_ON(!n); n 700 arch/x86/kernel/e820.c e820_table_firmware = n; n 32 arch/x86/kernel/early_printk.c static void early_vga_write(struct console *con, const char *str, unsigned n) n 37 arch/x86/kernel/early_printk.c while ((c = *str++) != '\0' && n-- > 0) { n 123 arch/x86/kernel/early_printk.c static void early_serial_write(struct console *con, const char *s, unsigned n) n 125 arch/x86/kernel/early_printk.c while (*s && n-- > 0) { n 138 arch/x86/kernel/espfix_64.c int n, node; n 172 arch/x86/kernel/espfix_64.c for (n = 0; n < ESPFIX_PUD_CLONES; n++) n 173 arch/x86/kernel/espfix_64.c set_pud(&pud_p[n], pud); n 184 arch/x86/kernel/espfix_64.c for (n = 0; n < ESPFIX_PMD_CLONES; n++) n 185 arch/x86/kernel/espfix_64.c set_pmd(&pmd_p[n], pmd); n 195 arch/x86/kernel/espfix_64.c for (n = 0; n < ESPFIX_PTE_CLONES; n++) n 196 arch/x86/kernel/espfix_64.c set_pte(&pte_p[n*PTE_STRIDE], pte); n 18 arch/x86/kernel/fpu/regset.c return regset->n; n 24 arch/x86/kernel/fpu/regset.c return regset->n; n 180 arch/x86/kernel/fpu/regset.c #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16) n 230 arch/x86/kernel/idt.c static void set_intr_gate(unsigned int n, const void *addr) n 234 arch/x86/kernel/idt.c BUG_ON(n > 0xFF); n 237 arch/x86/kernel/idt.c data.vector = n; n 355 arch/x86/kernel/idt.c void __init update_intr_gate(unsigned int n, const void *addr) n 357 arch/x86/kernel/idt.c if (WARN_ON_ONCE(!test_bit(n, system_vectors))) n 359 arch/x86/kernel/idt.c set_intr_gate(n, addr); n 362 arch/x86/kernel/idt.c void alloc_intr_gate(unsigned int n, const void *addr) n 364 arch/x86/kernel/idt.c BUG_ON(n < FIRST_SYSTEM_VECTOR); n 365 arch/x86/kernel/idt.c if (!test_and_set_bit(n, system_vectors)) n 366 arch/x86/kernel/idt.c set_intr_gate(n, addr); n 89 arch/x86/kernel/kvm.c struct kvm_task_sleep_node *n = n 90 arch/x86/kernel/kvm.c hlist_entry(p, typeof(*n), link); n 91 arch/x86/kernel/kvm.c if (n->token == token) n 92 arch/x86/kernel/kvm.c return n; n 106 arch/x86/kernel/kvm.c struct kvm_task_sleep_node n, *e; n 123 arch/x86/kernel/kvm.c n.token = token; n 124 arch/x86/kernel/kvm.c n.cpu = smp_processor_id(); n 125 arch/x86/kernel/kvm.c n.halted = is_idle_task(current) || n 129 arch/x86/kernel/kvm.c init_swait_queue_head(&n.wq); n 130 arch/x86/kernel/kvm.c hlist_add_head(&n.link, &b->list); n 134 arch/x86/kernel/kvm.c if (!n.halted) n 135 arch/x86/kernel/kvm.c prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE); n 136 arch/x86/kernel/kvm.c if (hlist_unhashed(&n.link)) n 141 arch/x86/kernel/kvm.c if (!n.halted) { n 155 arch/x86/kernel/kvm.c if (!n.halted) n 156 arch/x86/kernel/kvm.c finish_swait(&n.wq, &wait); n 163 arch/x86/kernel/kvm.c static void apf_task_wake_one(struct kvm_task_sleep_node *n) n 165 arch/x86/kernel/kvm.c hlist_del_init(&n->link); n 166 arch/x86/kernel/kvm.c if (n->halted) n 167 arch/x86/kernel/kvm.c smp_send_reschedule(n->cpu); n 168 arch/x86/kernel/kvm.c else if (swq_has_sleeper(&n->wq)) n 169 arch/x86/kernel/kvm.c swake_up_one(&n->wq); n 181 arch/x86/kernel/kvm.c struct kvm_task_sleep_node *n = n 182 arch/x86/kernel/kvm.c hlist_entry(p, typeof(*n), link); n 183 arch/x86/kernel/kvm.c if (n->cpu == smp_processor_id()) n 184 arch/x86/kernel/kvm.c apf_task_wake_one(n); n 194 arch/x86/kernel/kvm.c struct kvm_task_sleep_node *n; n 203 arch/x86/kernel/kvm.c n = _find_apf_task(b, token); n 204 arch/x86/kernel/kvm.c if (!n) { n 209 arch/x86/kernel/kvm.c n = kzalloc(sizeof(*n), GFP_ATOMIC); n 210 arch/x86/kernel/kvm.c if (!n) { n 219 arch/x86/kernel/kvm.c n->token = token; n 220 arch/x86/kernel/kvm.c n->cpu = smp_processor_id(); n 221 arch/x86/kernel/kvm.c init_swait_queue_head(&n->wq); n 222 arch/x86/kernel/kvm.c hlist_add_head(&n->link, &b->list); n 224 arch/x86/kernel/kvm.c apf_task_wake_one(n); n 193 arch/x86/kernel/nmi.c struct nmiaction *n; n 198 arch/x86/kernel/nmi.c list_for_each_entry_rcu(n, &desc->head, list) { n 203 arch/x86/kernel/nmi.c if (!strcmp(n->name, name)) { n 205 arch/x86/kernel/nmi.c "Trying to free NMI (%s) from NMI context!\n", n->name); n 206 arch/x86/kernel/nmi.c list_del_rcu(&n->list); n 615 arch/x86/kernel/ptrace.c static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) n 620 arch/x86/kernel/ptrace.c if (n < HBP_NUM) { n 621 arch/x86/kernel/ptrace.c int index = array_index_nospec(n, HBP_NUM); n 626 arch/x86/kernel/ptrace.c } else if (n == 6) { n 628 arch/x86/kernel/ptrace.c } else if (n == 7) { n 673 arch/x86/kernel/ptrace.c static int ptrace_set_debugreg(struct task_struct *tsk, int n, n 680 arch/x86/kernel/ptrace.c if (n < HBP_NUM) { n 681 arch/x86/kernel/ptrace.c rc = ptrace_set_breakpoint_addr(tsk, n, val); n 682 arch/x86/kernel/ptrace.c } else if (n == 6) { n 685 arch/x86/kernel/ptrace.c } else if (n == 7) { n 1220 arch/x86/kernel/ptrace.c .n = sizeof(struct user_regs_struct) / sizeof(long), n 1226 arch/x86/kernel/ptrace.c .n = sizeof(struct user_i387_struct) / sizeof(long), n 1238 arch/x86/kernel/ptrace.c .n = IO_BITMAP_LONGS, n 1246 arch/x86/kernel/ptrace.c .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) n 1261 arch/x86/kernel/ptrace.c .n = sizeof(struct user_regs_struct32) / sizeof(u32), n 1267 arch/x86/kernel/ptrace.c .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), n 1273 arch/x86/kernel/ptrace.c .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), n 1285 arch/x86/kernel/ptrace.c .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, n 1293 arch/x86/kernel/ptrace.c .n = IO_BITMAP_BYTES / sizeof(u32), n 1301 arch/x86/kernel/ptrace.c .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) n 1314 arch/x86/kernel/ptrace.c x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64); n 1317 arch/x86/kernel/ptrace.c x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64); n 85 arch/x86/kernel/tls.c const struct user_desc *info, int n) n 96 arch/x86/kernel/tls.c while (n-- > 0) { n 252 arch/x86/kernel/tls.c int n = GDT_ENTRY_TLS_ENTRIES; n 253 arch/x86/kernel/tls.c while (n > 0 && desc_empty(&t->tls_array[n - 1])) n 254 arch/x86/kernel/tls.c --n; n 255 arch/x86/kernel/tls.c return n; n 455 arch/x86/kernel/vm86_32.c #define val_byte(val, n) (((__u8 *)&val)[n]) n 1517 arch/x86/kvm/emulate.c unsigned int in_page, n; n 1523 arch/x86/kvm/emulate.c n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); n 1524 arch/x86/kvm/emulate.c if (n == 0) n 1525 arch/x86/kvm/emulate.c n = 1; n 1527 arch/x86/kvm/emulate.c if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) n 1529 arch/x86/kvm/emulate.c rc->end = n * size; n 2404 arch/x86/kvm/emulate.c int n) n 2410 arch/x86/kvm/emulate.c selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4); n 2412 arch/x86/kvm/emulate.c if (n < 3) n 2413 arch/x86/kvm/emulate.c offset = 0x7f84 + n * 12; n 2415 arch/x86/kvm/emulate.c offset = 0x7f2c + (n - 3) * 12; n 2420 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); n 2426 arch/x86/kvm/emulate.c int n) n 2433 arch/x86/kvm/emulate.c offset = 0x7e00 + n * 16; n 2441 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); n 2445 arch/x86/kvm/mmu.c int n; n 2447 arch/x86/kvm/mmu.c for (n = i+1; n < pvec->nr; n++) { n 2448 arch/x86/kvm/mmu.c struct kvm_mmu_page *sp = pvec->page[n].sp; n 2449 arch/x86/kvm/mmu.c unsigned idx = pvec->page[n].idx; n 2459 arch/x86/kvm/mmu.c return n; n 236 arch/x86/kvm/mtrr.c int n; n 238 arch/x86/kvm/mtrr.c n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift; n 239 arch/x86/kvm/mtrr.c return mtrr_seg->range_start + n - 1; n 185 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_node *n) n 192 arch/x86/kvm/page_track.c hlist_add_head_rcu(&n->node, &head->track_notifier_list); n 203 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_node *n) n 210 arch/x86/kvm/page_track.c hlist_del_rcu(&n->node); n 227 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_node *n; n 236 arch/x86/kvm/page_track.c hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) n 237 arch/x86/kvm/page_track.c if (n->track_write) n 238 arch/x86/kvm/page_track.c n->track_write(vcpu, gpa, new, bytes, n); n 252 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_node *n; n 261 arch/x86/kvm/page_track.c hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) n 262 arch/x86/kvm/page_track.c if (n->track_flush_slot) n 263 arch/x86/kvm/page_track.c n->track_flush_slot(kvm, slot, n); n 1827 arch/x86/kvm/svm.c unsigned long ulen, unsigned long *n, n 1869 arch/x86/kvm/svm.c *n = npages; n 6871 arch/x86/kvm/svm.c unsigned long n; n 6895 arch/x86/kvm/svm.c src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); n 6899 arch/x86/kvm/svm.c dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1); n 6901 arch/x86/kvm/svm.c sev_unpin_memory(kvm, src_p, n); n 6936 arch/x86/kvm/svm.c sev_unpin_memory(kvm, src_p, n); n 6937 arch/x86/kvm/svm.c sev_unpin_memory(kvm, dst_p, n); n 6957 arch/x86/kvm/svm.c unsigned long n; n 6966 arch/x86/kvm/svm.c pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); n 6974 arch/x86/kvm/svm.c if (get_num_contig_pages(0, pages, n) != n) { n 7015 arch/x86/kvm/svm.c sev_unpin_memory(kvm, pages, n); n 15 arch/x86/kvm/vmx/evmcs.c #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) n 75 arch/x86/kvm/vmx/evmcs.h #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) n 5 arch/x86/kvm/vmx/vmcs12.c #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) n 375 arch/x86/kvm/vmx/vmcs12.h #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) n 2261 arch/x86/kvm/vmx/vmx.c struct loaded_vmcs *v, *n; n 2263 arch/x86/kvm/vmx/vmx.c list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), n 5687 arch/x86/kvm/vmx/vmx.c int i, n; n 5824 arch/x86/kvm/vmx/vmx.c n = vmcs_read32(CR3_TARGET_COUNT); n 5825 arch/x86/kvm/vmx/vmx.c for (i = 0; i + 1 < n; i += 4) n 5829 arch/x86/kvm/vmx/vmx.c if (i < n) n 3225 arch/x86/kvm/x86.c int r, n; n 3243 arch/x86/kvm/x86.c r = n = __msr_io(vcpu, &msrs, entries, do_msr); n 3251 arch/x86/kvm/x86.c r = n; n 3400 arch/x86/kvm/x86.c unsigned n; n 3405 arch/x86/kvm/x86.c n = msr_list.nmsrs; n 3410 arch/x86/kvm/x86.c if (n < msr_list.nmsrs) n 3453 arch/x86/kvm/x86.c unsigned int n; n 3458 arch/x86/kvm/x86.c n = msr_list.nmsrs; n 3463 arch/x86/kvm/x86.c if (n < msr_list.nmsrs) n 5291 arch/x86/kvm/x86.c int n; n 5294 arch/x86/kvm/x86.c n = min(len, 8); n 5296 arch/x86/kvm/x86.c !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) n 5297 arch/x86/kvm/x86.c && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) n 5299 arch/x86/kvm/x86.c handled += n; n 5300 arch/x86/kvm/x86.c addr += n; n 5301 arch/x86/kvm/x86.c len -= n; n 5302 arch/x86/kvm/x86.c v += n; n 5311 arch/x86/kvm/x86.c int n; n 5314 arch/x86/kvm/x86.c n = min(len, 8); n 5317 arch/x86/kvm/x86.c addr, n, v)) n 5318 arch/x86/kvm/x86.c && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) n 5320 arch/x86/kvm/x86.c trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); n 5321 arch/x86/kvm/x86.c handled += n; n 5322 arch/x86/kvm/x86.c addr += n; n 5323 arch/x86/kvm/x86.c len -= n; n 5324 arch/x86/kvm/x86.c v += n; n 7715 arch/x86/kvm/x86.c static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) n 7720 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, n); n 7721 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); n 7723 arch/x86/kvm/x86.c if (n < 3) n 7724 arch/x86/kvm/x86.c offset = 0x7f84 + n * 12; n 7726 arch/x86/kvm/x86.c offset = 0x7f2c + (n - 3) * 12; n 7734 arch/x86/kvm/x86.c static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) n 7740 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, n); n 7741 arch/x86/kvm/x86.c offset = 0x7e00 + n * 16; n 319 arch/x86/kvm/x86.h #define do_shl32_div32(n, base) \ n 323 arch/x86/kvm/x86.h : "rm" (base), "0" (0), "1" ((u32) n)); \ n 324 arch/x86/kvm/x86.h n = __quot; \ n 30 arch/x86/lib/inat.c int n; n 32 arch/x86/lib/inat.c n = inat_escape_id(esc_attr); n 34 arch/x86/lib/inat.c table = inat_escape_tables[n][0]; n 38 arch/x86/lib/inat.c table = inat_escape_tables[n][lpfx_id]; n 49 arch/x86/lib/inat.c int n; n 51 arch/x86/lib/inat.c n = inat_group_id(grp_attr); n 53 arch/x86/lib/inat.c table = inat_group_tables[n][0]; n 57 arch/x86/lib/inat.c table = inat_group_tables[n][lpfx_id]; n 17 arch/x86/lib/insn.c #define validate_next(t, insn, n) \ n 18 arch/x86/lib/insn.c ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) n 23 arch/x86/lib/insn.c #define __peek_nbyte_next(t, insn, n) \ n 24 arch/x86/lib/insn.c ({ t r = *(t*)((insn)->next_byte + n); r; }) n 29 arch/x86/lib/insn.c #define peek_nbyte_next(t, insn, n) \ n 30 arch/x86/lib/insn.c ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); }) n 9 arch/x86/lib/iomem.c static __always_inline void rep_movs(void *to, const void *from, size_t n) n 21 arch/x86/lib/iomem.c : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) n 25 arch/x86/lib/iomem.c void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) n 27 arch/x86/lib/iomem.c if (unlikely(!n)) n 33 arch/x86/lib/iomem.c n--; n 35 arch/x86/lib/iomem.c if (n > 1 && unlikely(2 & (unsigned long)from)) { n 37 arch/x86/lib/iomem.c n-=2; n 39 arch/x86/lib/iomem.c rep_movs(to, (const void *)from, n); n 43 arch/x86/lib/iomem.c void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) n 45 arch/x86/lib/iomem.c if (unlikely(!n)) n 51 arch/x86/lib/iomem.c n--; n 53 arch/x86/lib/iomem.c if (n > 1 && unlikely(2 & (unsigned long)to)) { n 55 arch/x86/lib/iomem.c n-=2; n 57 arch/x86/lib/iomem.c rep_movs((void *)to, (const void *) from, n); n 8 arch/x86/lib/memcpy_32.c __visible void *memcpy(void *to, const void *from, size_t n) n 11 arch/x86/lib/memcpy_32.c return __memcpy3d(to, from, n); n 13 arch/x86/lib/memcpy_32.c return __memcpy(to, from, n); n 24 arch/x86/lib/memcpy_32.c __visible void *memmove(void *dest, const void *src, size_t n) n 201 arch/x86/lib/memcpy_32.c :"0" (n), n 17 arch/x86/lib/usercopy.c copy_from_user_nmi(void *to, const void __user *from, unsigned long n) n 21 arch/x86/lib/usercopy.c if (__range_not_ok(from, n, TASK_SIZE)) n 22 arch/x86/lib/usercopy.c return n; n 25 arch/x86/lib/usercopy.c return n; n 33 arch/x86/lib/usercopy.c ret = __copy_from_user_inatomic(to, from, n); n 21 arch/x86/lib/usercopy_32.c static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n) n 24 arch/x86/lib/usercopy_32.c if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask)) n 29 arch/x86/lib/usercopy_32.c #define movsl_is_ok(a1, a2, n) \ n 30 arch/x86/lib/usercopy_32.c __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) n 67 arch/x86/lib/usercopy_32.c clear_user(void __user *to, unsigned long n) n 70 arch/x86/lib/usercopy_32.c if (access_ok(to, n)) n 71 arch/x86/lib/usercopy_32.c __do_clear_user(to, n); n 72 arch/x86/lib/usercopy_32.c return n; n 88 arch/x86/lib/usercopy_32.c __clear_user(void __user *to, unsigned long n) n 90 arch/x86/lib/usercopy_32.c __do_clear_user(to, n); n 91 arch/x86/lib/usercopy_32.c return n; n 332 arch/x86/lib/usercopy_32.c unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) n 335 arch/x86/lib/usercopy_32.c if (movsl_is_ok(to, from, n)) n 336 arch/x86/lib/usercopy_32.c __copy_user(to, from, n); n 338 arch/x86/lib/usercopy_32.c n = __copy_user_intel(to, from, n); n 340 arch/x86/lib/usercopy_32.c return n; n 345 arch/x86/lib/usercopy_32.c unsigned long n) n 349 arch/x86/lib/usercopy_32.c if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) n 350 arch/x86/lib/usercopy_32.c n = __copy_user_intel_nocache(to, from, n); n 352 arch/x86/lib/usercopy_32.c __copy_user(to, from, n); n 354 arch/x86/lib/usercopy_32.c __copy_user(to, from, n); n 357 arch/x86/lib/usercopy_32.c return n; n 50 arch/x86/lib/usercopy_64.c unsigned long clear_user(void __user *to, unsigned long n) n 52 arch/x86/lib/usercopy_64.c if (access_ok(to, n)) n 53 arch/x86/lib/usercopy_64.c return __clear_user(to, n); n 54 arch/x86/lib/usercopy_64.c return n; n 307 arch/x86/math-emu/errors.c asmlinkage __visible void FPU_exception(int n) n 312 arch/x86/math-emu/errors.c if (n & EX_INTERNAL) { n 313 arch/x86/math-emu/errors.c int_type = n - EX_INTERNAL; n 314 arch/x86/math-emu/errors.c n = EX_INTERNAL; n 319 arch/x86/math-emu/errors.c n &= (SW_Exc_Mask); n 321 arch/x86/math-emu/errors.c partial_status |= n; n 325 arch/x86/math-emu/errors.c if (n & (SW_Stack_Fault | EX_Precision)) { n 326 arch/x86/math-emu/errors.c if (!(n & SW_C1)) n 334 arch/x86/math-emu/errors.c if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) { n 337 arch/x86/math-emu/errors.c if ((exception_names[i].type & n) == n 346 arch/x86/math-emu/errors.c printk("FPU emulator: Unknown Exception: 0x%04x!\n", n); n 348 arch/x86/math-emu/errors.c if (n == EX_INTERNAL) { n 204 arch/x86/math-emu/fpu_emu.h asmlinkage int wm_sqrt(FPU_REG *n, int dummy1, int dummy2, n 8 arch/x86/math-emu/fpu_proto.h asmlinkage void FPU_exception(int n); n 110 arch/x86/math-emu/fpu_system.h #define FPU_copy_from_user(to, from, n) \ n 111 arch/x86/math-emu/fpu_system.h do { if (copy_from_user(to, from, n)) FPU_abort; } while (0) n 22 arch/x86/math-emu/fpu_trig.c unsigned long long st1, unsigned long long q, int n); n 747 arch/x86/math-emu/fpu_trig.c unsigned long long st1, unsigned long long q, int n) n 752 arch/x86/math-emu/fpu_trig.c x = st0 << n; n 33 arch/x86/math-emu/poly.h const unsigned long long terms[], const int n); n 39 arch/x86/math-emu/poly.h asmlinkage void shr_Xsig(Xsig *, const int n); n 396 arch/x86/mm/numa_emulation.c unsigned long n; n 399 arch/x86/mm/numa_emulation.c n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); n 412 arch/x86/mm/numa_emulation.c n, &pi.blk[0], nid); n 415 arch/x86/mm/numa_emulation.c if (ret < n) { n 417 arch/x86/mm/numa_emulation.c __func__, i, ret, n); n 429 arch/x86/mm/numa_emulation.c unsigned long n; n 431 arch/x86/mm/numa_emulation.c n = simple_strtoul(emu_cmdline, &emu_cmdline, 0); n 432 arch/x86/mm/numa_emulation.c ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n); n 347 arch/x86/pci/amd_bus.c unsigned int i, n; n 349 arch/x86/pci/amd_bus.c for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) { n 365 arch/x86/pci/amd_bus.c ++n; n 17 arch/x86/pci/legacy.c int n; n 23 arch/x86/pci/legacy.c for (n=0; n <= pcibios_last_bus; n++) n 24 arch/x86/pci/legacy.c pcibios_scan_specific_bus(n); n 59 arch/x86/platform/olpc/olpc-xo15-sci.c const char *buf, size_t n) n 68 arch/x86/platform/olpc/olpc-xo15-sci.c return n; n 1717 arch/x86/platform/uv/tlb_uv.c unsigned long n; n 1734 arch/x86/platform/uv/tlb_uv.c n = uv_gpa_to_gnode(gpa); n 1741 arch/x86/platform/uv/tlb_uv.c (n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m)); n 187 arch/x86/platform/uv/uv_nmi.c int n = ARRAY_SIZE(valid_acts); n 197 arch/x86/platform/uv/uv_nmi.c for (i = 0; i < n; i++) n 201 arch/x86/platform/uv/uv_nmi.c if (i < n) { n 208 arch/x86/platform/uv/uv_nmi.c for (i = 0; i < n; i++) n 576 arch/x86/platform/uv/uv_nmi.c int i, j, k, n = num_online_cpus(); n 584 arch/x86/platform/uv/uv_nmi.c k = n - cpumask_weight(uv_nmi_cpu_mask); n 590 arch/x86/platform/uv/uv_nmi.c return n - k - 1; n 600 arch/x86/platform/uv/uv_nmi.c if (++k >= n) n 604 arch/x86/platform/uv/uv_nmi.c if (k >= n) { /* all in? */ n 605 arch/x86/platform/uv/uv_nmi.c k = n; n 615 arch/x86/platform/uv/uv_nmi.c if (waiting && (n - k) == 1 && n 622 arch/x86/platform/uv/uv_nmi.c return n - k; n 20 arch/x86/um/os-Linux/task_size.c unsigned long n = ~0UL; n 33 arch/x86/um/os-Linux/task_size.c n = *address; n 50 arch/x86/um/os-Linux/task_size.c *address = n; n 58 arch/x86/um/os-Linux/task_size.c *address = n; n 30 arch/x86/um/ptrace_32.c int n; n 32 arch/x86/um/ptrace_32.c n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); n 33 arch/x86/um/ptrace_32.c if (n) { n 39 arch/x86/um/ptrace_32.c n = access_process_vm(current, addr, &instr, sizeof(instr), n 41 arch/x86/um/ptrace_32.c if (n != sizeof(instr)) { n 198 arch/x86/um/ptrace_32.c int err, n, cpu = task_cpu(child); n 206 arch/x86/um/ptrace_32.c n = copy_to_user(buf, &fpregs, sizeof(fpregs)); n 207 arch/x86/um/ptrace_32.c if(n > 0) n 210 arch/x86/um/ptrace_32.c return n; n 215 arch/x86/um/ptrace_32.c int n, cpu = task_cpu(child); n 218 arch/x86/um/ptrace_32.c n = copy_from_user(&fpregs, buf, sizeof(fpregs)); n 219 arch/x86/um/ptrace_32.c if (n > 0) n 228 arch/x86/um/ptrace_32.c int err, n, cpu = task_cpu(child); n 235 arch/x86/um/ptrace_32.c n = copy_to_user(buf, &fpregs, sizeof(fpregs)); n 236 arch/x86/um/ptrace_32.c if(n > 0) n 239 arch/x86/um/ptrace_32.c return n; n 244 arch/x86/um/ptrace_32.c int n, cpu = task_cpu(child); n 247 arch/x86/um/ptrace_32.c n = copy_from_user(&fpregs, buf, sizeof(fpregs)); n 248 arch/x86/um/ptrace_32.c if (n > 0) n 205 arch/x86/um/ptrace_64.c int n; n 207 arch/x86/um/ptrace_64.c n = copy_from_user(&instr, (void __user *) addr, sizeof(instr)); n 208 arch/x86/um/ptrace_64.c if (n) { n 215 arch/x86/um/ptrace_64.c n = access_process_vm(current, addr, &instr, sizeof(instr), n 217 arch/x86/um/ptrace_64.c if (n != sizeof(instr)) { n 229 arch/x86/um/ptrace_64.c int err, n, cpu = ((struct thread_info *) child->stack)->cpu; n 237 arch/x86/um/ptrace_64.c n = copy_to_user(buf, &fpregs, sizeof(fpregs)); n 238 arch/x86/um/ptrace_64.c if (n > 0) n 241 arch/x86/um/ptrace_64.c return n; n 246 arch/x86/um/ptrace_64.c int n, cpu = ((struct thread_info *) child->stack)->cpu; n 249 arch/x86/um/ptrace_64.c n = copy_from_user(&fpregs, buf, sizeof(fpregs)); n 250 arch/x86/um/ptrace_64.c if (n > 0) n 45 arch/x86/um/signal.c #define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16) n 393 arch/x86/xen/setup.c unsigned long n = end_pfn - start_pfn; n 398 arch/x86/xen/setup.c while (i < n) { n 400 arch/x86/xen/setup.c unsigned long left = n - i; n 678 arch/x86/xen/setup.c phys_addr_t n) n 683 arch/x86/xen/setup.c while (n) { n 686 arch/x86/xen/setup.c dest_len = n; n 689 arch/x86/xen/setup.c src_len = n; n 698 arch/x86/xen/setup.c n -= len; n 258 arch/xtensa/include/asm/atomic.h #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) n 83 arch/xtensa/include/asm/cmpxchg.h #define cmpxchg(ptr,o,n) \ n 85 arch/xtensa/include/asm/cmpxchg.h __typeof__(*(ptr)) _n_ = (n); \ n 110 arch/xtensa/include/asm/cmpxchg.h #define cmpxchg_local(ptr, o, n) \ n 112 arch/xtensa/include/asm/cmpxchg.h (unsigned long)(n), sizeof(*(ptr)))) n 113 arch/xtensa/include/asm/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 114 arch/xtensa/include/asm/cmpxchg.h #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) n 117 arch/xtensa/include/asm/coprocessor.h #define __REG2_0(n,s,a) __u32 name; n 118 arch/xtensa/include/asm/coprocessor.h #define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); n 119 arch/xtensa/include/asm/coprocessor.h #define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); n 65 arch/xtensa/include/asm/delay.h #define ndelay(n) ndelay(n) n 25 arch/xtensa/include/asm/ftrace.h #define ftrace_return_address(n) return_address(n) n 133 arch/xtensa/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) n 248 arch/xtensa/include/asm/uaccess.h extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); n 251 arch/xtensa/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) n 254 arch/xtensa/include/asm/uaccess.h return __xtensa_copy_user(to, (__force const void *)from, n); n 257 arch/xtensa/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 260 arch/xtensa/include/asm/uaccess.h return __xtensa_copy_user((__force void *)to, from, n); n 17 arch/xtensa/include/uapi/asm/setup.h extern void set_except_vector(int n, void *addr); n 133 arch/xtensa/kernel/hw_breakpoint.c static int alloc_slot(struct perf_event **slot, size_t n, n 138 arch/xtensa/kernel/hw_breakpoint.c for (i = 0; i < n; ++i) { n 192 arch/xtensa/kernel/hw_breakpoint.c static int free_slot(struct perf_event **slot, size_t n, n 197 arch/xtensa/kernel/hw_breakpoint.c for (i = 0; i < n; ++i) { n 198 arch/xtensa/kernel/ptrace.c .n = sizeof(struct user_pt_regs) / sizeof(u32), n 206 arch/xtensa/kernel/ptrace.c .n = sizeof(elf_xtregs_t) / sizeof(u32), n 218 arch/xtensa/kernel/ptrace.c .n = ARRAY_SIZE(xtensa_regsets) n 58 arch/xtensa/kernel/s32c1i_selftest.c int n, cause1, cause2; n 72 arch/xtensa/kernel/s32c1i_selftest.c n = probed_compare_swap(&rcw_word, 0, 2); n 78 arch/xtensa/kernel/s32c1i_selftest.c if (n != 2 || rcw_word != 1) n 80 arch/xtensa/kernel/s32c1i_selftest.c } else if (rcw_word != 1 || n != 1) { n 87 arch/xtensa/kernel/s32c1i_selftest.c n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde); n 92 arch/xtensa/kernel/s32c1i_selftest.c if (n != 0xabcde || rcw_word != 0x1234567) n 94 arch/xtensa/kernel/s32c1i_selftest.c } else if (rcw_word != 0xabcde || n != 0x1234567) { n 613 arch/xtensa/platforms/iss/network.c unsigned n; n 621 arch/xtensa/platforms/iss/network.c rc = kstrtouint(str, 0, &n); n 633 arch/xtensa/platforms/iss/network.c if (device->index == n) n 639 arch/xtensa/platforms/iss/network.c if (device && device->index == n) { n 640 arch/xtensa/platforms/iss/network.c pr_err("Device %u already configured\n", n); n 651 arch/xtensa/platforms/iss/network.c new->index = n; n 217 arch/xtensa/platforms/iss/simdisk.c ssize_t n = simple_read_from_buffer(buf, size, ppos, n 219 arch/xtensa/platforms/iss/simdisk.c if (n < 0) n 220 arch/xtensa/platforms/iss/simdisk.c return n; n 221 arch/xtensa/platforms/iss/simdisk.c buf += n; n 222 arch/xtensa/platforms/iss/simdisk.c size -= n; n 561 block/bfq-iosched.c struct rb_node **n; n 571 block/bfq-iosched.c n = &(*p)->rb_right; n 573 block/bfq-iosched.c n = &(*p)->rb_left; n 576 block/bfq-iosched.c p = n; n 1071 block/bfq-iosched.c struct hlist_node *n; n 1073 block/bfq-iosched.c hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) n 1098 block/bfq-iosched.c struct hlist_node *n; n 1122 block/bfq-iosched.c hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, n 4826 block/bfq-iosched.c struct hlist_node *n; n 4889 block/bfq-iosched.c hlist_for_each_entry_safe(item, n, &bfqq->woken_list, n 6381 block/bfq-iosched.c struct bfq_queue *bfqq, *n; n 6386 block/bfq-iosched.c list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) n 36 block/bio.c #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } n 1425 block/bio.c unsigned int n = PAGE_SIZE - offs; n 1428 block/bio.c if (n > bytes) n 1429 block/bio.c n = bytes; n 1431 block/bio.c if (!__bio_add_pc_page(q, bio, page, n, offs, n 1438 block/bio.c added += n; n 1439 block/bio.c bytes -= n; n 447 block/blk-cgroup.c struct blkcg_gq *blkg, *n; n 450 block/blk-cgroup.c list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { n 211 block/blk-flush.c struct request *rq, *n; n 244 block/blk-flush.c list_for_each_entry_safe(rq, n, running, flush.list) { n 773 block/blk-sysfs.c int n) n 620 block/blk-throttle.c struct rb_node *n; n 625 block/blk-throttle.c n = rb_first_cached(&parent_sq->pending_tree); n 626 block/blk-throttle.c WARN_ON_ONCE(!n); n 627 block/blk-throttle.c if (!n) n 629 block/blk-throttle.c return rb_entry_tg(n); n 632 block/blk-throttle.c static void throtl_rb_erase(struct rb_node *n, n 635 block/blk-throttle.c rb_erase_cached(n, &parent_sq->pending_tree); n 636 block/blk-throttle.c RB_CLEAR_NODE(n); n 126 block/blk-zoned.c unsigned int z = 0, n, nrz = *nr_zones; n 131 block/blk-zoned.c n = nrz - z; n 132 block/blk-zoned.c ret = disk->fops->report_zones(disk, sector, &zones[z], &n); n 135 block/blk-zoned.c if (!n) n 137 block/blk-zoned.c sector += blk_queue_zone_sectors(q) * n; n 138 block/blk-zoned.c z += n; n 189 block/compat_ioctl.c compat_int_t n; n 192 block/compat_ioctl.c err = get_user(n, &ua32->op); n 193 block/compat_ioctl.c err |= put_user(n, &a->op); n 194 block/compat_ioctl.c err |= get_user(n, &ua32->flags); n 195 block/compat_ioctl.c err |= put_user(n, &a->flags); n 196 block/compat_ioctl.c err |= get_user(n, &ua32->datalen); n 197 block/compat_ioctl.c err |= put_user(n, &a->datalen); n 286 block/elevator.c struct rb_node *n = root->rb_node; n 289 block/elevator.c while (n) { n 290 block/elevator.c rq = rb_entry(n, struct request, rb_node); n 293 block/elevator.c n = n->rb_left; n 295 block/elevator.c n = n->rb_right; n 356 block/genhd.c struct blk_major_name **n, *p; n 397 block/genhd.c for (n = &major_names[index]; *n; n = &(*n)->next) { n 398 block/genhd.c if ((*n)->major == major) n 401 block/genhd.c if (!*n) n 402 block/genhd.c *n = p; n 420 block/genhd.c struct blk_major_name **n; n 425 block/genhd.c for (n = &major_names[index]; *n; n = &(*n)->next) n 426 block/genhd.c if ((*n)->major == major) n 428 block/genhd.c if (!*n || strcmp((*n)->name, name)) { n 431 block/genhd.c p = *n; n 432 block/genhd.c *n = p->next; n 1226 block/genhd.c static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n) n 445 block/ioctl.c int ret, n; n 453 block/ioctl.c if (get_user(n, (int __user *)arg)) n 455 block/ioctl.c set_device_ro(bdev, n); n 489 block/ioctl.c int ret, n; n 495 block/ioctl.c if (get_user(n, argp)) n 504 block/ioctl.c ret = set_blocksize(bdev, n); n 662 block/partition-generic.c unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) n 667 block/partition-generic.c page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL); n 672 block/partition-generic.c return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9); n 187 block/partitions/aix.c struct lvname *n = NULL; n 226 block/partitions/aix.c n = alloc_lvn(state, vgda_sector + vgda_len - 33); n 227 block/partitions/aix.c if (n) { n 277 block/partitions/aix.c n[lv_ix].name); n 287 block/partitions/aix.c char tmp[sizeof(n[i].name) + 1]; // null char n 289 block/partitions/aix.c snprintf(tmp, sizeof(tmp), "%s", n[i].name); n 296 block/partitions/aix.c kfree(n); n 32 block/partitions/check.h sector_t n, Sector *p) n 34 block/partitions/check.h if (n >= get_capacity(state->bdev->bd_disk)) { n 38 block/partitions/check.h return read_dev_sector(state->bdev, n, p); n 42 block/partitions/check.h put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) n 44 block/partitions/check.h if (n < p->limit) { n 47 block/partitions/check.h p->parts[n].from = from; n 48 block/partitions/check.h p->parts[n].size = size; n 49 block/partitions/check.h snprintf(tmp, sizeof(tmp), " %s%d", p->name, n); n 242 block/partitions/efi.c sector_t n = lba * (bdev_logical_block_size(bdev) / 512); n 250 block/partitions/efi.c unsigned char *data = read_part_sector(state, n++, §); n 546 block/partitions/msdos.c sector_t n = 2; n 548 block/partitions/msdos.c n = min(size, max(sector_size, n)); n 549 block/partitions/msdos.c put_partition(state, slot, start, n); n 572 block/partitions/msdos.c int n; n 577 block/partitions/msdos.c for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++) n 580 block/partitions/msdos.c if (!subtypes[n].parse) n 582 block/partitions/msdos.c subtypes[n].parse(state, start_sect(p) * sector_size, n 730 block/sed-opal.c int n) n 739 block/sed-opal.c if (n >= resp->num) { n 741 block/sed-opal.c n, resp->num); n 745 block/sed-opal.c tok = &resp->toks[n]; n 913 block/sed-opal.c static size_t response_get_string(const struct parsed_resp *resp, int n, n 920 block/sed-opal.c tok = response_get_token(resp, n); n 950 block/sed-opal.c static u64 response_get_u64(const struct parsed_resp *resp, int n) n 954 block/sed-opal.c tok = response_get_token(resp, n); n 28 certs/blacklist.c int n = 0; n 42 certs/blacklist.c n++; n 45 certs/blacklist.c if (n == 0 || n & 1) n 70 crypto/ablkcipher.c unsigned int n) n 75 crypto/ablkcipher.c if (len_this_page > n) n 76 crypto/ablkcipher.c len_this_page = n; n 77 crypto/ablkcipher.c scatterwalk_advance(&walk->out, n); n 78 crypto/ablkcipher.c if (n == len_this_page) n 80 crypto/ablkcipher.c n -= len_this_page; n 86 crypto/ablkcipher.c unsigned int n) n 88 crypto/ablkcipher.c scatterwalk_advance(&walk->in, n); n 89 crypto/ablkcipher.c scatterwalk_advance(&walk->out, n); n 99 crypto/ablkcipher.c unsigned int n; /* bytes processed */ n 105 crypto/ablkcipher.c n = walk->nbytes - err; n 106 crypto/ablkcipher.c walk->total -= n; n 110 crypto/ablkcipher.c ablkcipher_done_fast(walk, n); n 117 crypto/ablkcipher.c ablkcipher_done_slow(walk, n); n 146 crypto/ablkcipher.c unsigned int n; n 148 crypto/ablkcipher.c n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); n 149 crypto/ablkcipher.c n += (aligned_bsize * 3 - (alignmask + 1) + n 152 crypto/ablkcipher.c p = kmalloc(n, GFP_ATOMIC); n 217 crypto/ablkcipher.c unsigned int alignmask, bsize, n; n 222 crypto/ablkcipher.c n = walk->total; n 223 crypto/ablkcipher.c if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { n 231 crypto/ablkcipher.c bsize = min(walk->blocksize, n); n 232 crypto/ablkcipher.c n = scatterwalk_clamp(&walk->in, n); n 233 crypto/ablkcipher.c n = scatterwalk_clamp(&walk->out, n); n 235 crypto/ablkcipher.c if (n < bsize || n 243 crypto/ablkcipher.c walk->nbytes = n; n 264 crypto/adiantum.c unsigned int i, n; n 275 crypto/adiantum.c for (i = 0; i < bulk_len; i += n) { n 277 crypto/adiantum.c n = min_t(unsigned int, miter.length, bulk_len - i); n 278 crypto/adiantum.c err = crypto_shash_update(hash_desc, miter.addr, n); n 22 crypto/aegis128-neon-inner.c void *memcpy(void *dest, const void *src, size_t n); n 23 crypto/aegis128-neon-inner.c void *memset(void *s, int c, size_t n); n 59 crypto/aes_generic.c static inline u8 byte(const u32 x, const unsigned n) n 61 crypto/aes_generic.c return x >> (n << 3); n 1153 crypto/aes_generic.c #define f_rn(bo, bi, n, k) do { \ n 1154 crypto/aes_generic.c bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^ \ n 1155 crypto/aes_generic.c crypto_ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ n 1156 crypto/aes_generic.c crypto_ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ n 1157 crypto/aes_generic.c crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ n 1168 crypto/aes_generic.c #define f_rl(bo, bi, n, k) do { \ n 1169 crypto/aes_generic.c bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^ \ n 1170 crypto/aes_generic.c crypto_fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \ n 1171 crypto/aes_generic.c crypto_fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ n 1172 crypto/aes_generic.c crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \ n 1223 crypto/aes_generic.c #define i_rn(bo, bi, n, k) do { \ n 1224 crypto/aes_generic.c bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^ \ n 1225 crypto/aes_generic.c crypto_it_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ n 1226 crypto/aes_generic.c crypto_it_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ n 1227 crypto/aes_generic.c crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ n 1238 crypto/aes_generic.c #define i_rl(bo, bi, n, k) do { \ n 1239 crypto/aes_generic.c bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^ \ n 1240 crypto/aes_generic.c crypto_il_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \ n 1241 crypto/aes_generic.c crypto_il_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \ n 1242 crypto/aes_generic.c crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \ n 396 crypto/af_alg.c ssize_t n; n 399 crypto/af_alg.c n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off); n 400 crypto/af_alg.c if (n < 0) n 401 crypto/af_alg.c return n; n 403 crypto/af_alg.c npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; n 409 crypto/af_alg.c for (i = 0, len = n; i < npages; i++) { n 420 crypto/af_alg.c return n; n 90 crypto/algapi.c struct crypto_spawn *spawn, *n; n 96 crypto/algapi.c n = list_next_entry(spawn, list); n 98 crypto/algapi.c if (spawn->alg && &n->list != stack && !n->alg) n 99 crypto/algapi.c n->alg = (n->list.next == stack) ? alg : n 100 crypto/algapi.c &list_next_entry(n, list)->inst->alg; n 104 crypto/algapi.c return &n->list == stack ? top : &n->inst->alg.cra_users; n 133 crypto/algapi.c struct crypto_spawn *spawn, *n; n 140 crypto/algapi.c list_for_each_entry_safe(spawn, n, spawns, list) { n 181 crypto/algapi.c list_for_each_entry_safe(spawn, n, &secondary_spawns, list) { n 354 crypto/algapi.c struct crypto_alg *n; n 356 crypto/algapi.c list_for_each_entry_safe(alg, n, list, cra_list) { n 517 crypto/algapi.c struct hlist_node *n; n 535 crypto/algapi.c hlist_for_each_entry_safe(inst, n, list, list) { n 336 crypto/asymmetric_keys/asymmetric_type.c int n; n 347 crypto/asymmetric_keys/asymmetric_type.c n = kid->len; n 349 crypto/asymmetric_keys/asymmetric_type.c if (n > 4) { n 350 crypto/asymmetric_keys/asymmetric_type.c p += n - 4; n 351 crypto/asymmetric_keys/asymmetric_type.c n = 4; n 353 crypto/asymmetric_keys/asymmetric_type.c seq_printf(m, "%*phN", n, p); n 68 crypto/asymmetric_keys/public_key.c int n; n 76 crypto/asymmetric_keys/public_key.c n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, n 80 crypto/asymmetric_keys/public_key.c n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, n 83 crypto/asymmetric_keys/public_key.c return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; n 79 crypto/blkcipher.c unsigned int n) n 83 crypto/blkcipher.c memcpy(walk->dst.virt.addr, walk->page, n); n 91 crypto/blkcipher.c scatterwalk_advance(&walk->in, n); n 92 crypto/blkcipher.c scatterwalk_advance(&walk->out, n); n 98 crypto/blkcipher.c unsigned int n; /* bytes processed */ n 104 crypto/blkcipher.c n = walk->nbytes - err; n 105 crypto/blkcipher.c walk->total -= n; n 109 crypto/blkcipher.c blkcipher_done_fast(walk, n); n 116 crypto/blkcipher.c blkcipher_done_slow(walk, n); n 144 crypto/blkcipher.c unsigned int n; n 154 crypto/blkcipher.c n = aligned_bsize * 3 - (alignmask + 1) + n 156 crypto/blkcipher.c walk->buffer = kmalloc(n, GFP_ATOMIC); n 220 crypto/blkcipher.c unsigned int n; n 223 crypto/blkcipher.c n = walk->total; n 224 crypto/blkcipher.c if (unlikely(n < walk->cipher_blocksize)) { n 229 crypto/blkcipher.c bsize = min(walk->walk_blocksize, n); n 239 crypto/blkcipher.c n = 0; n 243 crypto/blkcipher.c n = scatterwalk_clamp(&walk->in, n); n 244 crypto/blkcipher.c n = scatterwalk_clamp(&walk->out, n); n 246 crypto/blkcipher.c if (unlikely(n < bsize)) { n 251 crypto/blkcipher.c walk->nbytes = n; n 304 crypto/blowfish_common.c #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) n 34 crypto/blowfish_generic.c #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) n 1157 crypto/ecc.c carry = vli_add(sk[0], scalar, curve->n, ndigits); n 1158 crypto/ecc.c vli_add(sk[1], sk[0], curve->n, ndigits); n 1309 crypto/ecc.c vli_sub(res, curve->n, one, ndigits); n 1349 crypto/ecc.c unsigned int nbits = vli_num_bits(curve->n, ndigits); n 68 crypto/ecc.h u64 *n; n 26 crypto/ecc_curve_defs.h .n = nist_p192_n, n 52 crypto/ecc_curve_defs.h .n = nist_p256_n, n 115 crypto/ecrdsa.c vli_cmp(r, ctx->curve->n, ndigits) == 1 || n 117 crypto/ecrdsa.c vli_cmp(s, ctx->curve->n, ndigits) == 1) n 123 crypto/ecrdsa.c if (vli_cmp(e, ctx->curve->n, ndigits) == 1) n 124 crypto/ecrdsa.c vli_sub(e, e, ctx->curve->n, ndigits); n 129 crypto/ecrdsa.c vli_mod_inv(v, e, ctx->curve->n, ndigits); n 132 crypto/ecrdsa.c vli_mod_mult_slow(z1, s, v, ctx->curve->n, ndigits); n 133 crypto/ecrdsa.c vli_sub(_r, ctx->curve->n, r, ndigits); n 134 crypto/ecrdsa.c vli_mod_mult_slow(z2, _r, v, ctx->curve->n, ndigits); n 139 crypto/ecrdsa.c if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1) n 140 crypto/ecrdsa.c vli_sub(cc.x, cc.x, ctx->curve->n, ndigits); n 56 crypto/ecrdsa_defs.h .n = cp256a_n, n 89 crypto/ecrdsa_defs.h .n = cp256b_n, n 126 crypto/ecrdsa_defs.h .n = cp256c_n, n 175 crypto/ecrdsa_defs.h .n = tc512a_n, n 220 crypto/ecrdsa_defs.h .n = tc512b_n, n 58 crypto/fcrypt.c #define ror56(hi, lo, n) \ n 60 crypto/fcrypt.c u32 t = lo & ((1 << n) - 1); \ n 61 crypto/fcrypt.c lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \ n 62 crypto/fcrypt.c hi = (hi >> n) | (t << (24-n)); \ n 66 crypto/fcrypt.c #define ror56_64(k, n) \ n 68 crypto/fcrypt.c k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ n 88 crypto/ghash-generic.c int n = min(srclen, dctx->bytes); n 91 crypto/ghash-generic.c dctx->bytes -= n; n 92 crypto/ghash-generic.c srclen -= n; n 94 crypto/ghash-generic.c while (n--) n 79 crypto/jitterentropy-kcapi.c void jent_memcpy(void *dest, const void *src, unsigned int n) n 81 crypto/jitterentropy-kcapi.c memcpy(dest, src, n); n 111 crypto/jitterentropy.c void jent_memcpy(void *dest, const void *src, unsigned int n); n 27 crypto/poly1305_generic.c static inline u32 sr(u64 v, u_char n) n 29 crypto/poly1305_generic.c return v >> n; n 16 crypto/rsa.c MPI n; n 28 crypto/rsa.c if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) n 32 crypto/rsa.c return mpi_powm(c, m, key->e, key->n); n 42 crypto/rsa.c if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) n 46 crypto/rsa.c return mpi_powm(m, c, key->d, key->n); n 65 crypto/rsa.c if (unlikely(!pkey->n || !pkey->e)) { n 104 crypto/rsa.c if (unlikely(!pkey->n || !pkey->d)) { n 135 crypto/rsa.c mpi_free(key->n); n 138 crypto/rsa.c key->n = NULL; n 174 crypto/rsa.c mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz); n 175 crypto/rsa.c if (!mpi_key->n) n 178 crypto/rsa.c if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { n 212 crypto/rsa.c mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz); n 213 crypto/rsa.c if (!mpi_key->n) n 216 crypto/rsa.c if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { n 232 crypto/rsa.c return mpi_get_size(pkey->n); n 40 crypto/rsa_helper.c key->n = value; n 27 crypto/seed.c byte(const u32 x, const unsigned n) n 29 crypto/seed.c return x >> (n << 3); n 106 crypto/skcipher.c unsigned int n = walk->nbytes; n 109 crypto/skcipher.c if (!n) n 113 crypto/skcipher.c n -= err; n 114 crypto/skcipher.c nbytes = walk->total - n; n 128 crypto/skcipher.c memcpy(walk->dst.virt.addr, walk->page, n); n 141 crypto/skcipher.c n = skcipher_done_slow(walk, n); n 150 crypto/skcipher.c scatterwalk_advance(&walk->in, n); n 151 crypto/skcipher.c scatterwalk_advance(&walk->out, n); n 230 crypto/skcipher.c unsigned n; n 244 crypto/skcipher.c n = bsize; n 249 crypto/skcipher.c n += sizeof(*p); n 253 crypto/skcipher.c n += alignmask & ~a; n 256 crypto/skcipher.c n += (bsize - 1) & ~(alignmask | a); n 258 crypto/skcipher.c v = kzalloc(n, skcipher_walk_gfp(walk)); n 346 crypto/skcipher.c unsigned int n; n 352 crypto/skcipher.c n = walk->total; n 353 crypto/skcipher.c bsize = min(walk->stride, max(n, walk->blocksize)); n 354 crypto/skcipher.c n = scatterwalk_clamp(&walk->in, n); n 355 crypto/skcipher.c n = scatterwalk_clamp(&walk->out, n); n 357 crypto/skcipher.c if (unlikely(n < bsize)) { n 375 crypto/skcipher.c walk->nbytes = min_t(unsigned, n, n 382 crypto/skcipher.c walk->nbytes = n; n 39 crypto/sm3_generic.c static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) n 41 crypto/sm3_generic.c return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); n 44 crypto/sm3_generic.c static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g) n 46 crypto/sm3_generic.c return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g)); n 49 crypto/sm3_generic.c static inline u32 t(unsigned int n) n 51 crypto/sm3_generic.c return (n < 16) ? SM3_T1 : SM3_T2; n 59 crypto/tea.c u32 y, z, n, sum = 0; n 73 crypto/tea.c n = TEA_ROUNDS; n 75 crypto/tea.c while (n-- > 0) { n 87 crypto/tea.c u32 y, z, n, sum; n 103 crypto/tea.c n = TEA_ROUNDS; n 105 crypto/tea.c while (n-- > 0) { n 535 crypto/twofish_common.c #define CALC_K(a, j, k, l, m, n) \ n 537 crypto/twofish_common.c y = CALC_K_2 (m, n, m, n, 4); \ n 548 crypto/twofish_common.c #define CALC_K192(a, j, k, l, m, n) \ n 550 crypto/twofish_common.c y = CALC_K192_2 (n, n, m, m, 4); \ n 561 crypto/twofish_common.c #define CALC_K256(a, j, k, l, m, n) \ n 563 crypto/twofish_common.c y = CALC_K256_2 (m, n, 4); \ n 53 crypto/twofish_generic.c #define ENCROUND(n, a, b, c, d) \ n 55 crypto/twofish_generic.c x += y; y += x + ctx->k[2 * (n) + 1]; \ n 56 crypto/twofish_generic.c (c) ^= x + ctx->k[2 * (n)]; \ n 60 crypto/twofish_generic.c #define DECROUND(n, a, b, c, d) \ n 63 crypto/twofish_generic.c (d) ^= y + ctx->k[2 * (n) + 1]; \ n 66 crypto/twofish_generic.c (c) ^= (x + ctx->k[2 * (n)]) n 71 crypto/twofish_generic.c #define ENCCYCLE(n) \ n 72 crypto/twofish_generic.c ENCROUND (2 * (n), a, b, c, d); \ n 73 crypto/twofish_generic.c ENCROUND (2 * (n) + 1, c, d, a, b) n 75 crypto/twofish_generic.c #define DECCYCLE(n) \ n 76 crypto/twofish_generic.c DECROUND (2 * (n) + 1, c, d, a, b); \ n 77 crypto/twofish_generic.c DECROUND (2 * (n), a, b, c, d) n 85 crypto/twofish_generic.c #define INPACK(n, x, m) \ n 86 crypto/twofish_generic.c x = le32_to_cpu(src[n]) ^ ctx->w[m] n 88 crypto/twofish_generic.c #define OUTUNPACK(n, x, m) \ n 90 crypto/twofish_generic.c dst[n] = cpu_to_le32(x) n 497 crypto/vmac.c unsigned int n; n 501 crypto/vmac.c n = min(len, VMAC_NONCEBYTES - dctx->nonce_size); n 502 crypto/vmac.c memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n); n 503 crypto/vmac.c dctx->nonce_size += n; n 504 crypto/vmac.c p += n; n 505 crypto/vmac.c len -= n; n 509 crypto/vmac.c n = min(len, VMAC_NHBYTES - dctx->partial_size); n 510 crypto/vmac.c memcpy(&dctx->partial[dctx->partial_size], p, n); n 511 crypto/vmac.c dctx->partial_size += n; n 512 crypto/vmac.c p += n; n 513 crypto/vmac.c len -= n; n 521 crypto/vmac.c n = round_down(len, VMAC_NHBYTES); n 523 crypto/vmac.c vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); n 524 crypto/vmac.c p += n; n 525 crypto/vmac.c len -= n; n 546 crypto/vmac.c unsigned int n = round_up(partial, 16); n 549 crypto/vmac.c memset(&dctx->partial[partial], 0, n - partial); n 550 crypto/vmac.c nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); n 261 drivers/acpi/acpi_dbg.c int n; n 270 drivers/acpi/acpi_dbg.c n = min(len, circ_space_to_end(crc)); n 271 drivers/acpi/acpi_dbg.c memcpy(p, buf, n); n 274 drivers/acpi/acpi_dbg.c crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); n 276 drivers/acpi/acpi_dbg.c return n; n 583 drivers/acpi/acpi_dbg.c int n; n 592 drivers/acpi/acpi_dbg.c n = min(len, circ_count_to_end(crc)); n 593 drivers/acpi/acpi_dbg.c if (copy_to_user(buf, p, n)) { n 599 drivers/acpi/acpi_dbg.c crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); n 600 drivers/acpi/acpi_dbg.c ret = n; n 653 drivers/acpi/acpi_dbg.c int n; n 662 drivers/acpi/acpi_dbg.c n = min(len, circ_space_to_end(crc)); n 663 drivers/acpi/acpi_dbg.c if (copy_from_user(p, buf, n)) { n 669 drivers/acpi/acpi_dbg.c crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); n 670 drivers/acpi/acpi_dbg.c ret = n; n 673 drivers/acpi/acpi_dbg.c return n; n 112 drivers/acpi/acpi_memhotplug.c struct acpi_memory_info *info, *n; n 114 drivers/acpi/acpi_memhotplug.c list_for_each_entry_safe(info, n, &mem_device->res_list, list) n 253 drivers/acpi/acpi_memhotplug.c struct acpi_memory_info *info, *n; n 256 drivers/acpi/acpi_memhotplug.c list_for_each_entry_safe(info, n, &mem_device->res_list, list) { n 425 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e); n 436 drivers/acpi/acpica/acmacros.h #define ACPI_ERROR_METHOD(s, n, p, e) n 1383 drivers/acpi/nfit/core.c static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) n 1669 drivers/acpi/nfit/core.c struct attribute *a, int n) n 2276 drivers/acpi/nfit/core.c struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) n 2282 drivers/acpi/nfit/core.c if (n-- == 0) n 101 drivers/acpi/numa.c int min_dist = INT_MAX, dist, n; n 103 drivers/acpi/numa.c for_each_online_node(n) { n 104 drivers/acpi/numa.c dist = node_distance(node, n); n 107 drivers/acpi/numa.c min_node = n; n 1493 drivers/acpi/osl.c int acpi_check_region(resource_size_t start, resource_size_t n, n 1498 drivers/acpi/osl.c .end = start + n - 1, n 245 drivers/acpi/pci_mcfg.c int i, n; n 250 drivers/acpi/pci_mcfg.c n = (header->length - sizeof(struct acpi_table_mcfg)) / n 255 drivers/acpi/pci_mcfg.c arr = kcalloc(n, sizeof(*arr), GFP_KERNEL); n 259 drivers/acpi/pci_mcfg.c for (i = 0, e = arr; i < n; i++, mptr++, e++) { n 274 drivers/acpi/pci_mcfg.c pr_info("MCFG table detected, %d entries\n", n); n 950 drivers/android/binder.c struct rb_node *n; n 953 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { n 954 drivers/android/binder.c thread = rb_entry(n, struct binder_thread, rb_node); n 1067 drivers/android/binder.c struct rb_node *n = proc->nodes.rb_node; n 1072 drivers/android/binder.c while (n) { n 1073 drivers/android/binder.c node = rb_entry(n, struct binder_node, rb_node); n 1076 drivers/android/binder.c n = n->rb_left; n 1078 drivers/android/binder.c n = n->rb_right; n 1397 drivers/android/binder.c struct rb_node *n = proc->refs_by_desc.rb_node; n 1400 drivers/android/binder.c while (n) { n 1401 drivers/android/binder.c ref = rb_entry(n, struct binder_ref, rb_node_desc); n 1404 drivers/android/binder.c n = n->rb_left; n 1406 drivers/android/binder.c n = n->rb_right; n 1444 drivers/android/binder.c struct rb_node *n; n 1468 drivers/android/binder.c for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { n 1469 drivers/android/binder.c ref = rb_entry(n, struct binder_ref, rb_node_desc); n 4977 drivers/android/binder.c struct rb_node *n; n 4983 drivers/android/binder.c for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { n 4984 drivers/android/binder.c struct binder_node *node = rb_entry(n, struct binder_node, n 5307 drivers/android/binder.c struct rb_node *n; n 5311 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { n 5312 drivers/android/binder.c struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); n 5412 drivers/android/binder.c struct rb_node *n; n 5444 drivers/android/binder.c while ((n = rb_first(&proc->threads))) { n 5447 drivers/android/binder.c thread = rb_entry(n, struct binder_thread, rb_node); n 5456 drivers/android/binder.c while ((n = rb_first(&proc->nodes))) { n 5459 drivers/android/binder.c node = rb_entry(n, struct binder_node, rb_node); n 5476 drivers/android/binder.c while ((n = rb_first(&proc->refs_by_desc))) { n 5479 drivers/android/binder.c ref = rb_entry(n, struct binder_ref, rb_node_desc); n 5710 drivers/android/binder.c struct rb_node *n; n 5720 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) n 5721 drivers/android/binder.c print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, n 5724 drivers/android/binder.c for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { n 5725 drivers/android/binder.c struct binder_node *node = rb_entry(n, struct binder_node, n 5752 drivers/android/binder.c for (n = rb_first(&proc->refs_by_desc); n 5753 drivers/android/binder.c n != NULL; n 5754 drivers/android/binder.c n = rb_next(n)) n 5755 drivers/android/binder.c print_binder_ref_olocked(m, rb_entry(n, n 5874 drivers/android/binder.c struct rb_node *n; n 5884 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) n 5898 drivers/android/binder.c for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) n 5906 drivers/android/binder.c for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { n 5907 drivers/android/binder.c struct binder_ref *ref = rb_entry(n, struct binder_ref, n 130 drivers/android/binder_alloc.c struct rb_node *n = alloc->allocated_buffers.rb_node; n 136 drivers/android/binder_alloc.c while (n) { n 137 drivers/android/binder_alloc.c buffer = rb_entry(n, struct binder_buffer, rb_node); n 141 drivers/android/binder_alloc.c n = n->rb_left; n 143 drivers/android/binder_alloc.c n = n->rb_right; n 349 drivers/android/binder_alloc.c struct rb_node *n = alloc->free_buffers.rb_node; n 392 drivers/android/binder_alloc.c while (n) { n 393 drivers/android/binder_alloc.c buffer = rb_entry(n, struct binder_buffer, rb_node); n 398 drivers/android/binder_alloc.c best_fit = n; n 399 drivers/android/binder_alloc.c n = n->rb_left; n 401 drivers/android/binder_alloc.c n = n->rb_right; n 403 drivers/android/binder_alloc.c best_fit = n; n 415 drivers/android/binder_alloc.c for (n = rb_first(&alloc->allocated_buffers); n != NULL; n 416 drivers/android/binder_alloc.c n = rb_next(n)) { n 417 drivers/android/binder_alloc.c buffer = rb_entry(n, struct binder_buffer, rb_node); n 424 drivers/android/binder_alloc.c for (n = rb_first(&alloc->free_buffers); n != NULL; n 425 drivers/android/binder_alloc.c n = rb_next(n)) { n 426 drivers/android/binder_alloc.c buffer = rb_entry(n, struct binder_buffer, rb_node); n 443 drivers/android/binder_alloc.c if (n == NULL) { n 454 drivers/android/binder_alloc.c WARN_ON(n && buffer_size != size); n 742 drivers/android/binder_alloc.c struct rb_node *n; n 750 drivers/android/binder_alloc.c while ((n = rb_first(&alloc->allocated_buffers))) { n 751 drivers/android/binder_alloc.c buffer = rb_entry(n, struct binder_buffer, rb_node); n 823 drivers/android/binder_alloc.c struct rb_node *n; n 826 drivers/android/binder_alloc.c for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) n 828 drivers/android/binder_alloc.c rb_entry(n, struct binder_buffer, rb_node)); n 875 drivers/android/binder_alloc.c struct rb_node *n; n 879 drivers/android/binder_alloc.c for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) n 2469 drivers/ata/libata-scsi.c static void modecpy(u8 *dest, const u8 *src, int n, bool changeable) n 2473 drivers/ata/libata-scsi.c memset(dest + 2, 0, n - 2); n 2475 drivers/ata/libata-scsi.c memcpy(dest, src, n); n 603 drivers/ata/pata_it821x.c static void it821x_display_disk(int n, u8 *buf) n 637 drivers/ata/pata_it821x.c n, mbuf, types[buf[52]], id, cbl); n 640 drivers/ata/pata_it821x.c n, mbuf, types[buf[52]], buf[53], id, cbl); n 642 drivers/ata/pata_it821x.c printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]); n 659 drivers/ata/pata_it821x.c int n = 0; n 675 drivers/ata/pata_it821x.c while(n++ < 10) { n 614 drivers/ata/sata_sil.c unsigned int n, quirks = 0; n 622 drivers/ata/sata_sil.c for (n = 0; sil_blacklist[n].product; n++) n 623 drivers/ata/sata_sil.c if (!strcmp(sil_blacklist[n].product, model_num)) { n 624 drivers/ata/sata_sil.c quirks = sil_blacklist[n].quirk; n 125 drivers/atm/eni.c int n,i; n 127 drivers/atm/eni.c for (n = 0; n < EV; n++) { n 128 drivers/atm/eni.c i = (ec+n) % EV; n 801 drivers/atm/firestream.c pe->fp->n--; n 822 drivers/atm/firestream.c pe->fp->n--; n 1457 drivers/atm/firestream.c return fp->n; n 1471 drivers/atm/firestream.c int n = 0; n 1475 drivers/atm/firestream.c fp->offset, read_fs (dev, FP_CNT (fp->offset)), fp->n, n 1492 drivers/atm/firestream.c n++; n 1516 drivers/atm/firestream.c fp->n++; /* XXX Atomic_inc? */ n 1520 drivers/atm/firestream.c fs_dprintk (FS_DEBUG_QUEUE, "Added %d entries. \n", n); n 450 drivers/atm/firestream.h int n; n 1594 drivers/atm/idt77252.c int n, unsigned int opc) n 1614 drivers/atm/idt77252.c r = n; n 1646 drivers/atm/idt77252.c cl += n; n 1653 drivers/atm/idt77252.c fill_tst(struct idt77252_dev *card, struct vc_map *vc, int n, unsigned int opc) n 1660 drivers/atm/idt77252.c res = __fill_tst(card, vc, n, opc); n 1715 drivers/atm/idt77252.c int n, unsigned int opc) n 1723 drivers/atm/idt77252.c res = __fill_tst(card, vc, n, opc); n 3113 drivers/atm/iphase.c int left = *pos, n; n 3118 drivers/atm/iphase.c n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n"); n 3119 drivers/atm/iphase.c return n; n 3122 drivers/atm/iphase.c n = sprintf(page, " Board Type : Iphase-ATM-DS3"); n 3124 drivers/atm/iphase.c n = sprintf(page, " Board Type : Iphase-ATM-E3"); n 3126 drivers/atm/iphase.c n = sprintf(page, " Board Type : Iphase-ATM-UTP155"); n 3128 drivers/atm/iphase.c n = sprintf(page, " Board Type : Iphase-ATM-OC3"); n 3129 drivers/atm/iphase.c tmpPtr = page + n; n 3131 drivers/atm/iphase.c n += sprintf(tmpPtr, "-1KVC-"); n 3133 drivers/atm/iphase.c n += sprintf(tmpPtr, "-4KVC-"); n 3134 drivers/atm/iphase.c tmpPtr = page + n; n 3136 drivers/atm/iphase.c n += sprintf(tmpPtr, "1M \n"); n 3138 drivers/atm/iphase.c n += sprintf(tmpPtr, "512K\n"); n 3140 drivers/atm/iphase.c n += sprintf(tmpPtr, "128K\n"); n 3141 drivers/atm/iphase.c return n; n 1198 drivers/atm/lanai.c const unsigned char *src, int n) n 1202 drivers/atm/lanai.c e = ((unsigned char *) lvcc->tx.buf.ptr) + n; n 1206 drivers/atm/lanai.c memcpy(lvcc->tx.buf.ptr, src, n - m); n 1208 drivers/atm/lanai.c memcpy(lvcc->tx.buf.start, src + n - m, m); n 1214 drivers/atm/lanai.c static inline void vcc_tx_memzero(struct lanai_vcc *lvcc, int n) n 1218 drivers/atm/lanai.c if (n == 0) n 1220 drivers/atm/lanai.c e = ((unsigned char *) lvcc->tx.buf.ptr) + n; n 1224 drivers/atm/lanai.c memset(lvcc->tx.buf.ptr, 0, n - m); n 1301 drivers/atm/lanai.c int n; n 1311 drivers/atm/lanai.c n = aal5_size(skb->len); n 1312 drivers/atm/lanai.c if (n + 16 > space) { n 1317 drivers/atm/lanai.c lanai_send_one_aal5(lanai, lvcc, skb, n); n 1318 drivers/atm/lanai.c space -= n + 16; n 1330 drivers/atm/lanai.c int space, n; n 1335 drivers/atm/lanai.c n = aal5_size(skb->len); n 1336 drivers/atm/lanai.c APRINTK(n + 16 >= 64, "vcc_tx_aal5: n too small (%d)\n", n); n 1337 drivers/atm/lanai.c if (space < n + 16) { /* No space for this PDU */ n 1343 drivers/atm/lanai.c lanai_send_one_aal5(lanai, lvcc, skb, n); n 1365 drivers/atm/lanai.c const struct lanai_vcc *lvcc, int n) n 1367 drivers/atm/lanai.c int m = ((const unsigned char *) lvcc->rx.buf.ptr) + n - n 1371 drivers/atm/lanai.c memcpy(dest, lvcc->rx.buf.ptr, n - m); n 1372 drivers/atm/lanai.c memcpy(dest + n - m, lvcc->rx.buf.start, m); n 1384 drivers/atm/lanai.c int n = ((unsigned long) end) - ((unsigned long) lvcc->rx.buf.ptr); n 1385 drivers/atm/lanai.c if (n < 0) n 1386 drivers/atm/lanai.c n += lanai_buf_size(&lvcc->rx.buf); n 1387 drivers/atm/lanai.c APRINTK(n >= 0 && n < lanai_buf_size(&lvcc->rx.buf) && !(n & 15), n 1389 drivers/atm/lanai.c n, lanai_buf_size(&lvcc->rx.buf)); n 1399 drivers/atm/lanai.c if (unlikely(n != aal5_size(size))) { n 1403 drivers/atm/lanai.c lvcc->rx.atmvcc->dev->number, lvcc->vci, size, n); n 131 drivers/atm/nicstar.c static void fill_tst(ns_dev * card, int n, vc_map * vc); n 1234 drivers/atm/nicstar.c int n = 0; /* Number of entries in the TST. Initialized to remove n 1299 drivers/atm/nicstar.c n = (int)(tmpl / card->max_pcr); n 1302 drivers/atm/nicstar.c n++; n 1304 drivers/atm/nicstar.c if ((n = n 1316 drivers/atm/nicstar.c if (n == 0) { n 1325 drivers/atm/nicstar.c if (n > (card->tst_free_entries - NS_TST_RESERVED)) { n 1333 drivers/atm/nicstar.c card->tst_free_entries -= n; n 1336 drivers/atm/nicstar.c card->index, n); n 1347 drivers/atm/nicstar.c card->tst_free_entries += n; n 1360 drivers/atm/nicstar.c card->tst_free_entries += n; n 1372 drivers/atm/nicstar.c fill_tst(card, n, vc); n 1573 drivers/atm/nicstar.c static void fill_tst(ns_dev * card, int n, vc_map * vc) n 1597 drivers/atm/nicstar.c r = n; n 1612 drivers/atm/nicstar.c cl += n; n 231 drivers/atm/nicstar.h #define ns_tbd_mkword_1(flags, m, n, buflen) \ n 232 drivers/atm/nicstar.h (cpu_to_le32((flags) | (m) << 23 | (n) << 16 | (buflen))) n 110 drivers/atm/uPD98401.h #define uPD98401_MSH(n) (0x10+(n)) /* Mailbox n Start Address High */ n 111 drivers/atm/uPD98401.h #define uPD98401_MSL(n) (0x14+(n)) /* Mailbox n Start Address High */ n 112 drivers/atm/uPD98401.h #define uPD98401_MBA(n) (0x18+(n)) /* Mailbox n Bottom Address */ n 113 drivers/atm/uPD98401.h #define uPD98401_MTA(n) (0x1c+(n)) /* Mailbox n Tail Address */ n 114 drivers/atm/uPD98401.h #define uPD98401_MWA(n) (0x20+(n)) /* Mailbox n Write Address */ n 161 drivers/atm/uPD98401.h #define uPD98401_IM(n) (0x40000+(n)) /* Scheduler n I and M */ n 162 drivers/atm/uPD98401.h #define uPD98401_X(n) (0x40010+(n)) /* Scheduler n X */ n 163 drivers/atm/uPD98401.h #define uPD98401_Y(n) (0x40020+(n)) /* Scheduler n Y */ n 164 drivers/atm/uPD98401.h #define uPD98401_PC(n) (0x40030+(n)) /* Scheduler n P, C, p and c */ n 165 drivers/atm/uPD98401.h #define uPD98401_PS(n) (0x40040+(n)) /* Scheduler n priority and status */ n 105 drivers/atm/zatm.c int n,i; n 108 drivers/atm/zatm.c for (n = 0; n < EV; n++) { n 109 drivers/atm/zatm.c i = (ec+n) % EV; n 64 drivers/auxdisplay/cfag12864b.c #define bit(n) (((unsigned char)1)<<(n)) n 78 drivers/auxdisplay/cfag12864b.c static void cfag12864b_setbit(unsigned char state, unsigned char n) n 81 drivers/auxdisplay/cfag12864b.c cfag12864b_state |= bit(n); n 83 drivers/auxdisplay/cfag12864b.c cfag12864b_state &= ~bit(n); n 65 drivers/auxdisplay/hd44780.c unsigned int n; n 69 drivers/auxdisplay/hd44780.c n = hd->pins[PIN_CTRL_RW] ? 10 : 9; n 72 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA0], NULL, values); n 81 drivers/auxdisplay/hd44780.c unsigned int n; n 86 drivers/auxdisplay/hd44780.c n = hd->pins[PIN_CTRL_RW] ? 6 : 5; n 89 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); n 98 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); n 147 drivers/auxdisplay/hd44780.c unsigned int n; n 151 drivers/auxdisplay/hd44780.c n = hd->pins[PIN_CTRL_RW] ? 6 : 5; n 154 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); n 61 drivers/auxdisplay/ks0108.c #define bit(n) (((unsigned char)1)<<(n)) n 31 drivers/base/attribute_container.c static void internal_container_klist_get(struct klist_node *n) n 34 drivers/base/attribute_container.c container_of(n, struct internal_container, node); n 38 drivers/base/attribute_container.c static void internal_container_klist_put(struct klist_node *n) n 41 drivers/base/attribute_container.c container_of(n, struct internal_container, node); n 184 drivers/base/attribute_container.c struct klist_node *n = klist_next(iter); \ n 185 drivers/base/attribute_container.c n ? container_of(n, typeof(*pos), member) : \ n 261 drivers/base/bus.c struct klist_node *n = klist_next(i); n 265 drivers/base/bus.c if (n) { n 266 drivers/base/bus.c dev_prv = to_device_private_bus(n); n 388 drivers/base/bus.c struct klist_node *n = klist_next(i); n 391 drivers/base/bus.c if (n) { n 392 drivers/base/bus.c drv_priv = container_of(n, struct driver_private, knode_bus); n 760 drivers/base/bus.c static void klist_devices_get(struct klist_node *n) n 762 drivers/base/bus.c struct device_private *dev_prv = to_device_private_bus(n); n 768 drivers/base/bus.c static void klist_devices_put(struct klist_node *n) n 770 drivers/base/bus.c struct device_private *dev_prv = to_device_private_bus(n); n 935 drivers/base/bus.c struct klist_node *n; n 939 drivers/base/bus.c list_for_each_entry(n, list, n_node) { n 940 drivers/base/bus.c dev_prv = to_device_private_bus(n); n 956 drivers/base/bus.c struct klist_node *n, *tmp; n 964 drivers/base/bus.c list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) { n 965 drivers/base/bus.c dev_prv = to_device_private_bus(n); n 425 drivers/base/cacheinfo.c int n = 0; n 428 drivers/base/cacheinfo.c n = sprintf(buf, "ReadWriteAllocate\n"); n 430 drivers/base/cacheinfo.c n = sprintf(buf, "ReadAllocate\n"); n 432 drivers/base/cacheinfo.c n = sprintf(buf, "WriteAllocate\n"); n 433 drivers/base/cacheinfo.c return n; n 441 drivers/base/cacheinfo.c int n = 0; n 444 drivers/base/cacheinfo.c n = sprintf(buf, "WriteThrough\n"); n 446 drivers/base/cacheinfo.c n = sprintf(buf, "WriteBack\n"); n 447 drivers/base/cacheinfo.c return n; n 120 drivers/base/class.c static struct device *klist_class_to_dev(struct klist_node *n) n 122 drivers/base/class.c struct device_private *p = to_device_private_class(n); n 126 drivers/base/class.c static void klist_class_dev_get(struct klist_node *n) n 128 drivers/base/class.c struct device *dev = klist_class_to_dev(n); n 133 drivers/base/class.c static void klist_class_dev_put(struct klist_node *n) n 135 drivers/base/class.c struct device *dev = klist_class_to_dev(n); n 1661 drivers/base/core.c static void klist_children_get(struct klist_node *n) n 1663 drivers/base/core.c struct device_private *p = to_device_private_parent(n); n 1669 drivers/base/core.c static void klist_children_put(struct klist_node *n) n 1671 drivers/base/core.c struct device_private *p = to_device_private_parent(n); n 2415 drivers/base/core.c struct klist_node *n = klist_prev(i); n 2419 drivers/base/core.c if (n) { n 2420 drivers/base/core.c p = to_device_private_parent(n); n 2428 drivers/base/core.c struct klist_node *n = klist_next(i); n 2432 drivers/base/core.c if (n) { n 2433 drivers/base/core.c p = to_device_private_parent(n); n 234 drivers/base/cpu.c int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); n 235 drivers/base/cpu.c return n; n 245 drivers/base/cpu.c int n = 0, len = PAGE_SIZE-2; n 252 drivers/base/cpu.c n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline)); n 257 drivers/base/cpu.c if (n && n < len) n 258 drivers/base/cpu.c buf[n++] = ','; n 261 drivers/base/cpu.c n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids); n 263 drivers/base/cpu.c n += snprintf(&buf[n], len - n, "%u-%d", n 267 drivers/base/cpu.c n += snprintf(&buf[n], len - n, "\n"); n 268 drivers/base/cpu.c return n; n 275 drivers/base/cpu.c int n = 0, len = PAGE_SIZE-2; n 283 drivers/base/cpu.c n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated)); n 287 drivers/base/cpu.c return n; n 295 drivers/base/cpu.c int n = 0, len = PAGE_SIZE-2; n 297 drivers/base/cpu.c n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); n 299 drivers/base/cpu.c return n; n 328 drivers/base/cpu.c ssize_t n; n 331 drivers/base/cpu.c n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", n 336 drivers/base/cpu.c if (PAGE_SIZE < n + sizeof(",XXXX\n")) { n 340 drivers/base/cpu.c n += sprintf(&buf[n], ",%04X", i); n 342 drivers/base/cpu.c buf[n++] = '\n'; n 343 drivers/base/cpu.c return n; n 65 drivers/base/devres.c #define set_node_dbginfo(node, n, s) do {} while (0) n 21 drivers/base/driver.c struct klist_node *n = klist_next(i); n 25 drivers/base/driver.c if (n) { n 26 drivers/base/driver.c dev_prv = to_device_private_driver(n); n 36 drivers/base/map.c unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; n 41 drivers/base/map.c if (n > 255) n 42 drivers/base/map.c n = 255; n 44 drivers/base/map.c p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL); n 48 drivers/base/map.c for (i = 0; i < n; i++, p++) { n 57 drivers/base/map.c for (i = 0, p -= n; i < n; i++, p++, index++) { n 70 drivers/base/map.c unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; n 75 drivers/base/map.c if (n > 255) n 76 drivers/base/map.c n = 255; n 79 drivers/base/map.c for (i = 0; i < n; i++, index++) { n 32 drivers/base/node.c ssize_t n; n 43 drivers/base/node.c n = cpumap_print_to_pagebuf(list, buf, mask); n 46 drivers/base/node.c return n; n 364 drivers/base/node.c int n; n 373 drivers/base/node.c n = sprintf(buf, n 400 drivers/base/node.c n += sprintf(buf + n, n 410 drivers/base/node.c n += sprintf(buf + n, n 464 drivers/base/node.c n += hugetlb_report_node_meminfo(nid, buf + n); n 465 drivers/base/node.c return n; n 496 drivers/base/node.c int n = 0; n 499 drivers/base/node.c n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], n 504 drivers/base/node.c n += sprintf(buf+n, "%s %lu\n", n 510 drivers/base/node.c n += sprintf(buf+n, "%s %lu\n", n 515 drivers/base/node.c return n; n 947 drivers/base/node.c int n; n 949 drivers/base/node.c n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", n 951 drivers/base/node.c buf[n++] = '\n'; n 952 drivers/base/node.c buf[n] = '\0'; n 953 drivers/base/node.c return n; n 1321 drivers/base/platform.c int n; n 1335 drivers/base/platform.c n = strlen(epdrv->pdrv->driver.name); n 1336 drivers/base/platform.c if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { n 1340 drivers/base/platform.c if (buf[n] == '\0' || buf[n] == ',') n 1343 drivers/base/platform.c epdrv->requested_id = simple_strtoul(&buf[n + 1], n 1346 drivers/base/platform.c if (buf[n] != '.' || (tmp == &buf[n + 1])) { n 1348 drivers/base/platform.c n = 0; n 1350 drivers/base/platform.c n += strcspn(&buf[n + 1], ",") + 1; n 1353 drivers/base/platform.c if (buf[n] == ',') n 1354 drivers/base/platform.c n++; n 1357 drivers/base/platform.c memcpy(epdrv->buffer, &buf[n], n 1358 drivers/base/platform.c min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); n 1468 drivers/base/platform.c int n = 0; n 1532 drivers/base/platform.c n++; n 1535 drivers/base/platform.c if (n >= nr_probe) n 1540 drivers/base/platform.c return n; n 1559 drivers/base/platform.c int k, n, i; n 1561 drivers/base/platform.c n = 0; n 1562 drivers/base/platform.c for (i = -2; n < nr_probe; i++) { n 1563 drivers/base/platform.c k = early_platform_driver_probe_id(class_str, i, nr_probe - n); n 1568 drivers/base/platform.c n += k; n 1574 drivers/base/platform.c return n; n 2653 drivers/base/power/domain.c struct genpd_power_state **states, int *n) n 2664 drivers/base/power/domain.c *n = 0; n 2679 drivers/base/power/domain.c *n = ret; n 184 drivers/base/power/qos.c struct blocking_notifier_head *n; n 190 drivers/base/power/qos.c n = kzalloc(3 * sizeof(*n), GFP_KERNEL); n 191 drivers/base/power/qos.c if (!n) { n 202 drivers/base/power/qos.c c->notifiers = n; n 203 drivers/base/power/qos.c BLOCKING_INIT_NOTIFIER_HEAD(n); n 108 drivers/base/power/sysfs.c const char * buf, size_t n) n 116 drivers/base/power/sysfs.c n = -EINVAL; n 118 drivers/base/power/sysfs.c return n; n 188 drivers/base/power/sysfs.c struct device_attribute *attr, const char *buf, size_t n) n 201 drivers/base/power/sysfs.c return n; n 222 drivers/base/power/sysfs.c const char *buf, size_t n) n 245 drivers/base/power/sysfs.c return ret < 0 ? ret : n; n 266 drivers/base/power/sysfs.c const char *buf, size_t n) n 284 drivers/base/power/sysfs.c return ret < 0 ? ret : n; n 299 drivers/base/power/sysfs.c const char *buf, size_t n) n 310 drivers/base/power/sysfs.c return ret < 0 ? ret : n; n 328 drivers/base/power/sysfs.c const char *buf, size_t n) n 339 drivers/base/power/sysfs.c return n; n 546 drivers/base/power/sysfs.c const char *buf, size_t n) n 554 drivers/base/power/sysfs.c return n; n 87 drivers/base/power/trace.c unsigned int n = user + USERHASH*(file + FILEHASH*device); n 102 drivers/base/power/trace.c time.tm_year = (n % 100); n 103 drivers/base/power/trace.c n /= 100; n 104 drivers/base/power/trace.c time.tm_mon = (n % 12); n 105 drivers/base/power/trace.c n /= 12; n 106 drivers/base/power/trace.c time.tm_mday = (n % 28) + 1; n 107 drivers/base/power/trace.c n /= 28; n 108 drivers/base/power/trace.c time.tm_hour = (n % 24); n 109 drivers/base/power/trace.c n /= 24; n 110 drivers/base/power/trace.c time.tm_min = (n % 20) * 3; n 111 drivers/base/power/trace.c n /= 20; n 114 drivers/base/power/trace.c return n ? -1 : 0; n 1045 drivers/base/power/wakeup.c loff_t n = *pos; n 1048 drivers/base/power/wakeup.c if (n == 0) { n 1056 drivers/base/power/wakeup.c if (n-- <= 0) n 136 drivers/base/regmap/regcache-rbtree.c struct regcache_rbtree_node *n; n 150 drivers/base/regmap/regcache-rbtree.c n = rb_entry(node, struct regcache_rbtree_node, node); n 151 drivers/base/regmap/regcache-rbtree.c mem_size += sizeof(*n); n 152 drivers/base/regmap/regcache-rbtree.c mem_size += (n->blklen * map->cache_word_size); n 153 drivers/base/regmap/regcache-rbtree.c mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); n 155 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, n, &base, &top); n 2196 drivers/base/regmap/regmap.c int i, n; n 2207 drivers/base/regmap/regmap.c for (i = 0, n = 0; i < num_regs; i++, n++) { n 2238 drivers/base/regmap/regmap.c n = 1; n 2240 drivers/base/regmap/regmap.c ret = _regmap_raw_multi_reg_write(map, base, n); n 2247 drivers/base/regmap/regmap.c base += n; n 2248 drivers/base/regmap/regmap.c n = 0; n 2252 drivers/base/regmap/regmap.c &base[n].reg, n 2263 drivers/base/regmap/regmap.c if (n > 0) n 2264 drivers/base/regmap/regmap.c return _regmap_raw_multi_reg_write(map, base, n); n 422 drivers/base/swnode.c int i, n = 0; n 428 drivers/base/swnode.c while (properties[n].name) n 429 drivers/base/swnode.c n++; n 431 drivers/base/swnode.c p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL); n 435 drivers/base/swnode.c for (i = 0; i < n; i++) { n 517 drivers/block/amiflop.c int n; n 528 drivers/block/amiflop.c for (n = unit[drive].type->tracks/2; n != 0; --n) { n 540 drivers/block/amiflop.c n = unit[drive].type->tracks + 20; n 550 drivers/block/amiflop.c if (--n == 0) { n 1944 drivers/block/amiflop.c int n; n 1947 drivers/block/amiflop.c if (!get_option(&str, &n)) n 1949 drivers/block/amiflop.c printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n); n 1950 drivers/block/amiflop.c fd_def_df0 = n; n 82 drivers/block/aoe/aoechr.c int major, minor, n; n 94 drivers/block/aoe/aoechr.c n = sscanf(buf, "e%d.%d", &major, &minor); n 95 drivers/block/aoe/aoechr.c if (n != 2) { n 130 drivers/block/aoe/aoechr.c ulong flags, n; n 132 drivers/block/aoe/aoechr.c n = strlen(msg); n 142 drivers/block/aoe/aoechr.c mp = kmemdup(msg, n, GFP_ATOMIC); n 144 drivers/block/aoe/aoechr.c printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n); n 150 drivers/block/aoe/aoechr.c em->len = n; n 191 drivers/block/aoe/aoechr.c int n, i; n 194 drivers/block/aoe/aoechr.c n = iminor(inode); n 195 drivers/block/aoe/aoechr.c filp->private_data = (void *) (unsigned long) n; n 198 drivers/block/aoe/aoechr.c if (chardevs[i].minor == n) { n 215 drivers/block/aoe/aoechr.c unsigned long n; n 221 drivers/block/aoe/aoechr.c n = (unsigned long) filp->private_data; n 222 drivers/block/aoe/aoechr.c if (n != MINOR_ERR) n 239 drivers/block/aoe/aoechr.c n = wait_for_completion_interruptible(&emsgs_comp); n 245 drivers/block/aoe/aoechr.c if (n) { n 264 drivers/block/aoe/aoechr.c n = copy_to_user(buf, mp, len); n 266 drivers/block/aoe/aoechr.c return n == 0 ? len : -EFAULT; n 286 drivers/block/aoe/aoechr.c int n, i; n 288 drivers/block/aoe/aoechr.c n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops); n 289 drivers/block/aoe/aoechr.c if (n < 0) { n 291 drivers/block/aoe/aoechr.c return n; n 100 drivers/block/aoe/aoecmd.c u32 n; n 102 drivers/block/aoe/aoecmd.c n = tag % NFACTIVE; n 103 drivers/block/aoe/aoecmd.c head = &d->factive[n]; n 122 drivers/block/aoe/aoecmd.c register ulong n; n 124 drivers/block/aoe/aoecmd.c n = jiffies & 0xffff; n 125 drivers/block/aoe/aoecmd.c return n |= (++d->lasttag & 0x7fff) << 16; n 311 drivers/block/aoe/aoecmd.c u32 n; n 313 drivers/block/aoe/aoecmd.c n = f->tag % NFACTIVE; n 314 drivers/block/aoe/aoecmd.c list_add_tail(&f->head, &d->factive[n]); n 459 drivers/block/aoe/aoecmd.c u32 n; n 462 drivers/block/aoe/aoecmd.c n = newtag(d); n 476 drivers/block/aoe/aoecmd.c f->tag, jiffies, n, n 481 drivers/block/aoe/aoecmd.c f->tag = n; n 483 drivers/block/aoe/aoecmd.c h->tag = cpu_to_be32(n); n 516 drivers/block/aoe/aoecmd.c int n; n 518 drivers/block/aoe/aoecmd.c n = jiffies & 0xffff; n 519 drivers/block/aoe/aoecmd.c n -= tag & 0xffff; n 520 drivers/block/aoe/aoecmd.c if (n < 0) n 521 drivers/block/aoe/aoecmd.c n += 1<<16; n 522 drivers/block/aoe/aoecmd.c return jiffies_to_usecs(n + 1); n 543 drivers/block/aoe/aoecmd.c ulong n; n 547 drivers/block/aoe/aoecmd.c n = (e - ifp) * sizeof *ifp; n 548 drivers/block/aoe/aoecmd.c memmove(ifp, ifp+1, n); n 586 drivers/block/aoe/aoecmd.c size_t n, m; n 604 drivers/block/aoe/aoecmd.c for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) { n 605 drivers/block/aoe/aoecmd.c if (n < PAGE_SIZE) n 606 drivers/block/aoe/aoecmd.c m = n; n 702 drivers/block/aoe/aoecmd.c int n; n 704 drivers/block/aoe/aoecmd.c n = t->taint++; n 706 drivers/block/aoe/aoecmd.c if (n > t->taint) n 707 drivers/block/aoe/aoecmd.c t->taint = n; n 736 drivers/block/aoe/aoecmd.c ulong flags, n; n 772 drivers/block/aoe/aoecmd.c n = f->waited_total + since; n 773 drivers/block/aoe/aoecmd.c n /= USEC_PER_SEC; n 775 drivers/block/aoe/aoecmd.c && n > aoe_deadsecs n 787 drivers/block/aoe/aoecmd.c n = f->waited + since; n 788 drivers/block/aoe/aoecmd.c n /= USEC_PER_SEC; n 790 drivers/block/aoe/aoecmd.c && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS)) n 930 drivers/block/aoe/aoecmd.c u16 n; n 933 drivers/block/aoe/aoecmd.c n = get_unaligned_le16(&id[83 << 1]); n 936 drivers/block/aoe/aoecmd.c n |= get_unaligned_le16(&id[86 << 1]); n 938 drivers/block/aoe/aoecmd.c if (n & (1<<10)) { /* bit 10: LBA 48 */ n 987 drivers/block/aoe/aoecmd.c register long n; n 989 drivers/block/aoe/aoecmd.c n = rtt; n 992 drivers/block/aoe/aoecmd.c n -= d->rttavg >> RTTSCALE; n 993 drivers/block/aoe/aoecmd.c d->rttavg += n; n 994 drivers/block/aoe/aoecmd.c if (n < 0) n 995 drivers/block/aoe/aoecmd.c n = -n; n 996 drivers/block/aoe/aoecmd.c n -= d->rttdev >> RTTDSCALE; n 997 drivers/block/aoe/aoecmd.c d->rttdev += n; n 1086 drivers/block/aoe/aoecmd.c long n; n 1117 drivers/block/aoe/aoecmd.c n = ahout->scnt << 9; n 1121 drivers/block/aoe/aoecmd.c if (skb->len < n) { n 1125 drivers/block/aoe/aoecmd.c skb->len, n); n 1129 drivers/block/aoe/aoecmd.c if (n > f->iter.bi_size) { n 1133 drivers/block/aoe/aoecmd.c n, f->iter.bi_size); n 1137 drivers/block/aoe/aoecmd.c bvcpy(skb, f->buf->bio, f->iter, n); n 1307 drivers/block/aoe/aoecmd.c u32 n; n 1325 drivers/block/aoe/aoecmd.c n = be32_to_cpu(get_unaligned(&h->tag)); n 1326 drivers/block/aoe/aoecmd.c f = getframe(d, n); n 1333 drivers/block/aoe/aoecmd.c f = getframe_deferred(d, n); n 1337 drivers/block/aoe/aoecmd.c calc_rttavg(d, NULL, tsince(n)); n 1534 drivers/block/aoe/aoecmd.c u16 n; n 1561 drivers/block/aoe/aoecmd.c n = be16_to_cpu(ch->bufcnt); n 1562 drivers/block/aoe/aoecmd.c if (n > aoe_maxout) /* keep it reasonable */ n 1563 drivers/block/aoe/aoecmd.c n = aoe_maxout; n 1575 drivers/block/aoe/aoecmd.c t->nframes = n; n 1576 drivers/block/aoe/aoecmd.c if (n < t->maxout) n 1579 drivers/block/aoe/aoecmd.c t = addtgt(d, h->src, n); n 1583 drivers/block/aoe/aoecmd.c n = skb->dev->mtu; n 1584 drivers/block/aoe/aoecmd.c n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr); n 1585 drivers/block/aoe/aoecmd.c n /= 512; n 1586 drivers/block/aoe/aoecmd.c if (n > ch->scnt) n 1587 drivers/block/aoe/aoecmd.c n = ch->scnt; n 1588 drivers/block/aoe/aoecmd.c n = n ? n * 512 : DEFAULTBCNT; n 1589 drivers/block/aoe/aoecmd.c setifbcnt(t, skb->dev, n); n 44 drivers/block/aoe/aoedev.c ulong n; n 48 drivers/block/aoe/aoedev.c n = find_first_zero_bit(used_minors, N_DEVS); n 49 drivers/block/aoe/aoedev.c if (n < N_DEVS) n 50 drivers/block/aoe/aoedev.c set_bit(n, used_minors); n 55 drivers/block/aoe/aoedev.c *sysminor = n * AOE_PARTITIONS; n 63 drivers/block/aoe/aoedev.c ulong n; n 79 drivers/block/aoe/aoedev.c n = aoemaj * NPERSHELF + aoemin; n 80 drivers/block/aoe/aoedev.c if (n >= N_DEVS) { n 89 drivers/block/aoe/aoedev.c if (test_bit(n, used_minors)) { n 92 drivers/block/aoe/aoedev.c n); n 95 drivers/block/aoe/aoedev.c set_bit(n, used_minors); n 97 drivers/block/aoe/aoedev.c *sysminor = n * AOE_PARTITIONS; n 135 drivers/block/aoe/aoenet.c u32 n; n 154 drivers/block/aoe/aoenet.c n = get_unaligned_be32(&h->tag); n 155 drivers/block/aoe/aoenet.c if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) n 159 drivers/block/aoe/aoenet.c n = h->err; n 160 drivers/block/aoe/aoenet.c if (n > NECODES) n 161 drivers/block/aoe/aoenet.c n = 0; n 168 drivers/block/aoe/aoenet.c h->err, aoe_errlist[n]); n 718 drivers/block/ataflop.c #define FILL(n,val) \ n 720 drivers/block/ataflop.c memset( p, val, n ); \ n 721 drivers/block/ataflop.c p += n; \ n 173 drivers/block/brd.c static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) n 178 drivers/block/brd.c copy = min_t(size_t, n, PAGE_SIZE - offset); n 181 drivers/block/brd.c if (copy < n) { n 193 drivers/block/brd.c sector_t sector, size_t n) n 200 drivers/block/brd.c copy = min_t(size_t, n, PAGE_SIZE - offset); n 208 drivers/block/brd.c if (copy < n) { n 211 drivers/block/brd.c copy = n - copy; n 225 drivers/block/brd.c sector_t sector, size_t n) n 232 drivers/block/brd.c copy = min_t(size_t, n, PAGE_SIZE - offset); n 241 drivers/block/brd.c if (copy < n) { n 244 drivers/block/brd.c copy = n - copy; n 1652 drivers/block/drbd/drbd_bitmap.c int n = e-s; n 1655 drivers/block/drbd/drbd_bitmap.c count += bitmap_weight(bm, n * BITS_PER_LONG); n 166 drivers/block/drbd/drbd_debugfs.c int n = atomic_read(&device->ap_actlog_cnt); n 167 drivers/block/drbd/drbd_debugfs.c if (n) { n 179 drivers/block/drbd/drbd_debugfs.c if (n) { n 185 drivers/block/drbd/drbd_debugfs.c seq_printf(m, "%u\n", n); n 1711 drivers/block/drbd/drbd_int.h #define page_chain_for_each_safe(page, n) \ n 1712 drivers/block/drbd/drbd_int.h for (; page && ({ n = page_chain_next(page); 1; }); page = n) n 2081 drivers/block/drbd/drbd_int.h #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__) n 2082 drivers/block/drbd/drbd_int.h static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line) n 2084 drivers/block/drbd/drbd_int.h atomic_sub(n, &device->unacked_cnt); n 1102 drivers/block/drbd/drbd_main.c static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n) n 1104 drivers/block/drbd/drbd_main.c BUG_ON(n & ~0x7); n 1105 drivers/block/drbd/drbd_main.c p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); n 1148 drivers/block/drbd/drbd_nl.c struct lru_cache *n, *t; n 1159 drivers/block/drbd/drbd_nl.c n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION, n 1162 drivers/block/drbd/drbd_nl.c if (n == NULL) { n 1177 drivers/block/drbd/drbd_nl.c device->act_log = n; n 1181 drivers/block/drbd/drbd_nl.c lc_destroy(n); n 3374 drivers/block/drbd/drbd_nl.c int n; n 3379 drivers/block/drbd/drbd_nl.c for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++) n 3380 drivers/block/drbd/drbd_nl.c history_uuids[n] = md->uuid[UI_HISTORY_START + n]; n 3381 drivers/block/drbd/drbd_nl.c for (; n < HISTORY_UUIDS; n++) n 3382 drivers/block/drbd/drbd_nl.c history_uuids[n] = 0; n 4923 drivers/block/drbd/drbd_nl.c unsigned int n; n 4936 drivers/block/drbd/drbd_nl.c n = cb->args[4]++; n 4939 drivers/block/drbd/drbd_nl.c if (n < 1) { n 4944 drivers/block/drbd/drbd_nl.c n--; n 4945 drivers/block/drbd/drbd_nl.c if (n < state_change->n_connections) { n 4946 drivers/block/drbd/drbd_nl.c notify_connection_state_change(skb, seq, &state_change->connections[n], n 4950 drivers/block/drbd/drbd_nl.c n -= state_change->n_connections; n 4951 drivers/block/drbd/drbd_nl.c if (n < state_change->n_devices) { n 4952 drivers/block/drbd/drbd_nl.c notify_device_state_change(skb, seq, &state_change->devices[n], n 4956 drivers/block/drbd/drbd_nl.c n -= state_change->n_devices; n 4957 drivers/block/drbd/drbd_nl.c if (n < state_change->n_devices * state_change->n_connections) { n 4958 drivers/block/drbd/drbd_nl.c notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], n 75 drivers/block/drbd/drbd_receiver.c static struct page *page_chain_del(struct page **head, int n) n 80 drivers/block/drbd/drbd_receiver.c BUG_ON(!n); n 90 drivers/block/drbd/drbd_receiver.c if (--n == 0) n 61 drivers/block/drbd/drbd_state.c unsigned int size, n; n 76 drivers/block/drbd/drbd_state.c for (n = 0; n < n_devices; n++) n 77 drivers/block/drbd/drbd_state.c state_change->devices[n].device = NULL; n 78 drivers/block/drbd/drbd_state.c for (n = 0; n < n_connections; n++) n 79 drivers/block/drbd/drbd_state.c state_change->connections[n].connection = NULL; n 159 drivers/block/drbd/drbd_state.c unsigned int n; n 173 drivers/block/drbd/drbd_state.c for (n = 0; n < state_change->n_devices; n++) { n 175 drivers/block/drbd/drbd_state.c &state_change->devices[n]; n 181 drivers/block/drbd/drbd_state.c for (n = 0; n < state_change->n_connections; n++) { n 183 drivers/block/drbd/drbd_state.c &state_change->connections[n]; n 192 drivers/block/drbd/drbd_state.c for (n = 0; n < state_change->n_devices * state_change->n_connections; n++) { n 194 drivers/block/drbd/drbd_state.c &state_change->peer_devices[n]; n 256 drivers/block/drbd/drbd_state.c unsigned int n; n 263 drivers/block/drbd/drbd_state.c for (n = 0; n < state_change->n_devices; n++) { n 264 drivers/block/drbd/drbd_state.c struct drbd_device *device = state_change->devices[n].device; n 269 drivers/block/drbd/drbd_state.c for (n = 0; n < state_change->n_connections; n++) { n 271 drivers/block/drbd/drbd_state.c state_change->connections[n].connection; n 284 drivers/block/drbd/drbd_vli.h unsigned int n; n 300 drivers/block/drbd/drbd_vli.h n = (bs->cur.bit + bits + 7) >> 3; n 303 drivers/block/drbd/drbd_vli.h if (n) { n 304 drivers/block/drbd/drbd_vli.h memcpy(&val, bs->cur.b+1, n - 1); n 2100 drivers/block/floppy.c int n; n 2136 drivers/block/floppy.c n = (track_shift * format_req.track + head_shift * format_req.head) n 2153 drivers/block/floppy.c here[n].sect = count; n 2154 drivers/block/floppy.c n = (n + il) % F_SECT_PER_TRACK; n 2155 drivers/block/floppy.c if (here[n].sect) { /* sector busy, find next free sector */ n 2156 drivers/block/floppy.c ++n; n 2157 drivers/block/floppy.c if (n >= F_SECT_PER_TRACK) { n 2158 drivers/block/floppy.c n -= F_SECT_PER_TRACK; n 2159 drivers/block/floppy.c while (here[n].sect) n 2160 drivers/block/floppy.c ++n; n 1885 drivers/block/loop.c unsigned int n = funcs->number; n 1887 drivers/block/loop.c if (n >= MAX_LO_CRYPT || xfer_funcs[n]) n 1889 drivers/block/loop.c xfer_funcs[n] = funcs; n 1907 drivers/block/loop.c unsigned int n = number; n 1910 drivers/block/loop.c if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) n 1913 drivers/block/loop.c xfer_funcs[n] = NULL; n 877 drivers/block/mtip32xx/mtip32xx.c unsigned int n; n 884 drivers/block/mtip32xx/mtip32xx.c for (n = 1; n < port->dd->slot_groups; n++) n 885 drivers/block/mtip32xx/mtip32xx.c active |= readl(port->s_active[n]); n 1461 drivers/block/mtip32xx/mtip32xx.c int n; n 1468 drivers/block/mtip32xx/mtip32xx.c for_each_sg(command->sg, sg, nents, n) { n 2273 drivers/block/mtip32xx/mtip32xx.c int n, rv = 0; n 2287 drivers/block/mtip32xx/mtip32xx.c for (n = dd->slot_groups-1; n >= 0; n--) n 2289 drivers/block/mtip32xx/mtip32xx.c readl(dd->port->s_active[n])); n 2294 drivers/block/mtip32xx/mtip32xx.c for (n = dd->slot_groups-1; n >= 0; n--) n 2296 drivers/block/mtip32xx/mtip32xx.c readl(dd->port->cmd_issue[n])); n 2301 drivers/block/mtip32xx/mtip32xx.c for (n = dd->slot_groups-1; n >= 0; n--) n 2303 drivers/block/mtip32xx/mtip32xx.c readl(dd->port->completed[n])); n 2314 drivers/block/mtip32xx/mtip32xx.c for (n = dd->slot_groups-1; n >= 0; n--) { n 2317 drivers/block/mtip32xx/mtip32xx.c dd->port->cmds_to_issue[n/2] >> (32*(n&1)); n 2319 drivers/block/mtip32xx/mtip32xx.c group_allocated = dd->port->cmds_to_issue[n]; n 878 drivers/block/null_blk_main.c static int null_make_cache_space(struct nullb *nullb, unsigned long n) n 886 drivers/block/null_blk_main.c nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) n 918 drivers/block/null_blk_main.c if (n > flushed) { n 932 drivers/block/null_blk_main.c unsigned int off, sector_t sector, size_t n, bool is_fua) n 939 drivers/block/null_blk_main.c while (count < n) { n 940 drivers/block/null_blk_main.c temp = min_t(size_t, nullb->dev->blocksize, n - count); n 969 drivers/block/null_blk_main.c unsigned int off, sector_t sector, size_t n) n 976 drivers/block/null_blk_main.c while (count < n) { n 977 drivers/block/null_blk_main.c temp = min_t(size_t, nullb->dev->blocksize, n - count); n 1000 drivers/block/null_blk_main.c static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) n 1005 drivers/block/null_blk_main.c while (n > 0) { n 1006 drivers/block/null_blk_main.c temp = min_t(size_t, n, nullb->dev->blocksize); n 1011 drivers/block/null_blk_main.c n -= temp; n 434 drivers/block/paride/pcd.c int r, d, p, n, k, j; n 445 drivers/block/paride/pcd.c n = (d + 3) & 0xfffc; n 448 drivers/block/paride/pcd.c if ((p == 2) && (n > 0) && (j == 0)) { n 449 drivers/block/paride/pcd.c pi_read_block(cd->pi, buf, n); n 452 drivers/block/paride/pcd.c cd->name, fun, n); n 327 drivers/block/paride/pd.c static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func) n 331 drivers/block/paride/pd.c write_reg(disk, 2, n); n 483 drivers/block/paride/pf.c int r, s, n; n 489 drivers/block/paride/pf.c n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) + n 491 drivers/block/paride/pf.c pi_read_block(pf->pi, buf, n); n 359 drivers/block/paride/pg.c int r, d, n, p; n 368 drivers/block/paride/pg.c n = ((d + 3) & 0xfffc); n 371 drivers/block/paride/pg.c pi_write_block(dev->pi, buf, n); n 373 drivers/block/paride/pg.c pi_read_block(dev->pi, buf, n); n 376 drivers/block/paride/pg.c p ? "Read" : "Write", n); n 332 drivers/block/paride/pt.c int r, s, n, p; n 338 drivers/block/paride/pt.c n = (((read_reg(pi, 4) + 256 * read_reg(pi, 5)) + n 342 drivers/block/paride/pt.c pi_write_block(pi, buf, n); n 344 drivers/block/paride/pt.c pi_read_block(pi, buf, n); n 770 drivers/block/paride/pt.c int k, n, r, p, s, t, b; n 789 drivers/block/paride/pt.c n = count; n 790 drivers/block/paride/pt.c if (n > 32768) n 791 drivers/block/paride/pt.c n = 32768; /* max per command */ n 792 drivers/block/paride/pt.c b = (n - 1 + tape->bs) / tape->bs; n 793 drivers/block/paride/pt.c n = b * tape->bs; /* rounded up to even block */ n 797 drivers/block/paride/pt.c r = pt_command(tape, rd_cmd, n, "read"); n 826 drivers/block/paride/pt.c n = (read_reg(pi, 4) + 256 * read_reg(pi, 5)); n 835 drivers/block/paride/pt.c while (n > 0) { n 836 drivers/block/paride/pt.c k = n; n 840 drivers/block/paride/pt.c n -= k; n 867 drivers/block/paride/pt.c int k, n, r, p, s, t, b; n 890 drivers/block/paride/pt.c n = count; n 891 drivers/block/paride/pt.c if (n > 32768) n 892 drivers/block/paride/pt.c n = 32768; /* max per command */ n 893 drivers/block/paride/pt.c b = (n - 1 + tape->bs) / tape->bs; n 894 drivers/block/paride/pt.c n = b * tape->bs; /* rounded up to even block */ n 898 drivers/block/paride/pt.c r = pt_command(tape, wr_cmd, n, "write"); n 927 drivers/block/paride/pt.c n = (read_reg(pi, 4) + 256 * read_reg(pi, 5)); n 936 drivers/block/paride/pt.c while (n > 0) { n 937 drivers/block/paride/pt.c k = n; n 950 drivers/block/paride/pt.c n -= k; n 211 drivers/block/pktcdvd.c int n = 0; n 214 drivers/block/pktcdvd.c n = sprintf(data, "%lu\n", pd->stats.pkt_started); n 217 drivers/block/pktcdvd.c n = sprintf(data, "%lu\n", pd->stats.pkt_ended); n 220 drivers/block/pktcdvd.c n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1); n 223 drivers/block/pktcdvd.c n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1); n 226 drivers/block/pktcdvd.c n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1); n 232 drivers/block/pktcdvd.c n = sprintf(data, "%d\n", v); n 238 drivers/block/pktcdvd.c n = sprintf(data, "%d\n", v); n 244 drivers/block/pktcdvd.c n = sprintf(data, "%d\n", v); n 246 drivers/block/pktcdvd.c return n; n 356 drivers/block/pktcdvd.c int n = 0; n 363 drivers/block/pktcdvd.c n += sprintf(data+n, "%s %u:%u %u:%u\n", n 370 drivers/block/pktcdvd.c return n; n 625 drivers/block/pktcdvd.c struct rb_node *n = rb_next(&node->rb_node); n 626 drivers/block/pktcdvd.c if (!n) n 628 drivers/block/pktcdvd.c return rb_entry(n, struct pkt_rb_node, rb_node); n 644 drivers/block/pktcdvd.c struct rb_node *n = pd->bio_queue.rb_node; n 648 drivers/block/pktcdvd.c if (!n) { n 654 drivers/block/pktcdvd.c tmp = rb_entry(n, struct pkt_rb_node, rb_node); n 656 drivers/block/pktcdvd.c next = n->rb_left; n 658 drivers/block/pktcdvd.c next = n->rb_right; n 661 drivers/block/pktcdvd.c n = next; n 1168 drivers/block/pktcdvd.c struct rb_node *n; n 1184 drivers/block/pktcdvd.c n = rb_first(&pd->bio_queue); n 1185 drivers/block/pktcdvd.c if (n) n 1186 drivers/block/pktcdvd.c first_node = rb_entry(n, struct pkt_rb_node, rb_node); n 1202 drivers/block/pktcdvd.c n = rb_first(&pd->bio_queue); n 1203 drivers/block/pktcdvd.c if (n) n 1204 drivers/block/pktcdvd.c node = rb_entry(n, struct pkt_rb_node, rb_node); n 120 drivers/block/ps3disk.c unsigned int n = 0; n 125 drivers/block/ps3disk.c n++; n 128 drivers/block/ps3disk.c __func__, __LINE__, op, n, blk_rq_sectors(req)); n 357 drivers/block/rbd.c #define for_each_obj_request_safe(ireq, oreq, n) \ n 358 drivers/block/rbd.c list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item) n 3908 drivers/block/rbd.c u32 n; n 3910 drivers/block/rbd.c ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */ n 3911 drivers/block/rbd.c while (n--) { n 236 drivers/block/swim3.c static void seek_track(struct floppy_state *fs, int n); n 386 drivers/block/swim3.c static inline void seek_track(struct floppy_state *fs, int n) n 390 drivers/block/swim3.c if (n >= 0) { n 392 drivers/block/swim3.c sw->nseek = n; n 395 drivers/block/swim3.c sw->nseek = -n; n 397 drivers/block/swim3.c fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1; n 418 drivers/block/swim3.c int n; n 429 drivers/block/swim3.c n = 1; n 431 drivers/block/swim3.c n = fs->secpertrack - fs->req_sector + 1; n 432 drivers/block/swim3.c if (n > blk_rq_cur_sectors(req)) n 433 drivers/block/swim3.c n = blk_rq_cur_sectors(req); n 437 drivers/block/swim3.c fs->req_sector, fs->secpertrack, fs->head, n); n 439 drivers/block/swim3.c fs->scount = n; n 442 drivers/block/swim3.c out_8(&sw->nsect, n); n 453 drivers/block/swim3.c init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); n 617 drivers/block/swim3.c int n; n 625 drivers/block/swim3.c for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++) n 642 drivers/block/swim3.c int intr, err, n; n 727 drivers/block/swim3.c for (n = 0; n < 100; ++n) { n 739 drivers/block/swim3.c n = fs->scount - 1 - resid / 512; n 740 drivers/block/swim3.c if (n > 0) { n 741 drivers/block/swim3.c blk_update_request(req, 0, n << 9); n 742 drivers/block/swim3.c fs->req_sector += n; n 842 drivers/block/swim3.c int err, n; n 848 drivers/block/swim3.c for (n = 20; n > 0; --n) { n 912 drivers/block/swim3.c int n, err = 0; n 927 drivers/block/swim3.c for (n = 0; n < 2 * HZ; ++n) { n 928 drivers/block/swim3.c if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE)) n 1017 drivers/block/swim3.c int ret, n; n 1031 drivers/block/swim3.c for (n = HZ; n > 0; --n) { n 196 drivers/block/virtio_blk.c unsigned short n = 0; n 212 drivers/block/virtio_blk.c range[n].flags = cpu_to_le32(flags); n 213 drivers/block/virtio_blk.c range[n].num_sectors = cpu_to_le32(num_sectors); n 214 drivers/block/virtio_blk.c range[n].sector = cpu_to_le64(sector); n 215 drivers/block/virtio_blk.c n++; n 728 drivers/block/virtio_blk.c struct attribute *a, int n) n 212 drivers/block/xen-blkback/blkback.c #define foreach_grant_safe(pos, n, rbtree, node) \ n 214 drivers/block/xen-blkback/blkback.c (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ n 216 drivers/block/xen-blkback/blkback.c (pos) = container_of(n, typeof(*(pos)), node), \ n 217 drivers/block/xen-blkback/blkback.c (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) n 310 drivers/block/xen-blkback/blkback.c struct rb_node *n; n 318 drivers/block/xen-blkback/blkback.c foreach_grant_safe(persistent_gnt, n, root, node) { n 390 drivers/block/xen-blkback/blkback.c struct rb_node *n; n 427 drivers/block/xen-blkback/blkback.c foreach_grant_safe(persistent_gnt, n, root, node) { n 962 drivers/block/xen-blkback/blkback.c int indirect_grefs, rc, n, nseg, i; n 976 drivers/block/xen-blkback/blkback.c for (n = 0, i = 0; n < nseg; n++) { n 979 drivers/block/xen-blkback/blkback.c if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { n 983 drivers/block/xen-blkback/blkback.c segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); n 985 drivers/block/xen-blkback/blkback.c i = n % SEGS_PER_INDIRECT_FRAME; n 987 drivers/block/xen-blkback/blkback.c pending_req->segments[n]->gref = segments[i].gref; n 996 drivers/block/xen-blkback/blkback.c seg[n].nsec = last_sect - first_sect + 1; n 997 drivers/block/xen-blkback/blkback.c seg[n].offset = first_sect << 9; n 998 drivers/block/xen-blkback/blkback.c preq->nr_sects += seg[n].nsec; n 398 drivers/block/xen-blkback/common.h int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; n 410 drivers/block/xen-blkback/common.h if (n > dst->u.rw.nr_segments) n 411 drivers/block/xen-blkback/common.h n = dst->u.rw.nr_segments; n 412 drivers/block/xen-blkback/common.h for (i = 0; i < n; i++) n 446 drivers/block/xen-blkback/common.h int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; n 458 drivers/block/xen-blkback/common.h if (n > dst->u.rw.nr_segments) n 459 drivers/block/xen-blkback/common.h n = dst->u.rw.nr_segments; n 460 drivers/block/xen-blkback/common.h for (i = 0; i < n; i++) n 247 drivers/block/xen-blkback/xenbus.c struct pending_req *req, *n; n 286 drivers/block/xen-blkback/xenbus.c list_for_each_entry_safe(req, n, &ring->pending_free, free_list) { n 927 drivers/block/xen-blkback/xenbus.c struct pending_req *req, *n; n 1007 drivers/block/xen-blkback/xenbus.c list_for_each_entry_safe(req, n, &ring->pending_free, free_list) { n 293 drivers/block/xen-blkfront.c struct grant *gnt_list_entry, *n; n 318 drivers/block/xen-blkfront.c list_for_each_entry_safe(gnt_list_entry, n, n 588 drivers/block/xen-blkfront.c int n, ref; n 618 drivers/block/xen-blkfront.c n = grant_idx / GRANTS_PER_INDIRECT_FRAME; n 620 drivers/block/xen-blkfront.c shadow->indirect_grants[n] = gnt_list_entry; n 622 drivers/block/xen-blkfront.c ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref; n 1084 drivers/block/xen-blkfront.c static char *encode_disk_name(char *ptr, unsigned int n) n 1086 drivers/block/xen-blkfront.c if (n >= 26) n 1087 drivers/block/xen-blkfront.c ptr = encode_disk_name(ptr, n / 26 - 1); n 1088 drivers/block/xen-blkfront.c *ptr = 'a' + n % 26; n 1245 drivers/block/xen-blkfront.c struct grant *persistent_gnt, *n; n 1254 drivers/block/xen-blkfront.c struct page *indirect_page, *n; n 1257 drivers/block/xen-blkfront.c list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { n 1265 drivers/block/xen-blkfront.c list_for_each_entry_safe(persistent_gnt, n, n 2019 drivers/block/xen-blkfront.c struct request *req, *n; n 2050 drivers/block/xen-blkfront.c list_for_each_entry_safe(req, n, &info->requests, queuelist) { n 2271 drivers/block/xen-blkfront.c struct page *indirect_page, *n; n 2272 drivers/block/xen-blkfront.c list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { n 1471 drivers/block/zram/zram_drv.c size_t n = bio->bi_iter.bi_size; n 1484 drivers/block/zram/zram_drv.c if (n <= (PAGE_SIZE - offset)) n 1487 drivers/block/zram/zram_drv.c n -= (PAGE_SIZE - offset); n 1491 drivers/block/zram/zram_drv.c while (n >= PAGE_SIZE) { n 1497 drivers/block/zram/zram_drv.c n -= PAGE_SIZE; n 340 drivers/bluetooth/bluecard_cs.c int i, n, len; n 345 drivers/bluetooth/bluecard_cs.c n = 0; n 348 drivers/bluetooth/bluecard_cs.c while (n < len) { n 355 drivers/bluetooth/bluecard_cs.c buf[n] = inb(iobase + offset + i); n 357 drivers/bluetooth/bluecard_cs.c n++; n 847 drivers/bluetooth/bluecard_cs.c int i, n; n 855 drivers/bluetooth/bluecard_cs.c for (n = 0; n < 0x400; n += 0x40) { n 856 drivers/bluetooth/bluecard_cs.c link->resource[0]->start = n ^ 0x300; n 470 drivers/bus/moxtet.c int ret, n; n 476 drivers/bus/moxtet.c n = moxtet->count + 1; n 477 drivers/bus/moxtet.c bin2hex(hex, bin, n); n 479 drivers/bus/moxtet.c hex[2*n] = '\n'; n 481 drivers/bus/moxtet.c return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1); n 93 drivers/bus/mvebu-mbus.c #define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3)) n 96 drivers/bus/mvebu-mbus.c #define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3)) n 102 drivers/bus/mvebu-mbus.c #define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4) n 106 drivers/bus/omap_l3_smx.h #define L3_PM_REQ_INFO_PERMISSION(n) (0x048 + (0x020 * n)) n 107 drivers/bus/omap_l3_smx.h #define L3_PM_READ_PERMISSION(n) (0x050 + (0x020 * n)) n 108 drivers/bus/omap_l3_smx.h #define L3_PM_WRITE_PERMISSION(n) (0x058 + (0x020 * n)) n 109 drivers/bus/omap_l3_smx.h #define L3_PM_ADDR_MATCH(n) (0x060 + (0x020 * n)) n 193 drivers/bus/ti-sysc.c const char *n; n 196 drivers/bus/ti-sysc.c n = name; n 198 drivers/bus/ti-sysc.c n = optfck_name; n 201 drivers/bus/ti-sysc.c clock = of_clk_get_by_name(np, n); n 225 drivers/bus/ti-sysc.c cl->con_id = n; n 25 drivers/char/agp/isoch.c struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list); n 30 drivers/char/agp/isoch.c if (cur->maxbw > n->maxbw) n 73 drivers/char/agp/isoch.c u32 n; n 125 drivers/char/agp/isoch.c target.n = (tnistat >> 8) & 0xff; n 146 drivers/char/agp/isoch.c master[cdev].n = (mnistat >> 8) & 0xff; n 180 drivers/char/agp/isoch.c target.n = (tnistat >> 8) & 0xff; n 185 drivers/char/agp/isoch.c master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1); n 187 drivers/char/agp/isoch.c tot_n += master[cdev].n; n 192 drivers/char/agp/isoch.c if (tot_n > target.n) { n 203 drivers/char/agp/isoch.c rem = target.n - tot_n; n 218 drivers/char/agp/isoch.c master[cdev].rq = master[cdev].n; n 224 drivers/char/agp/isoch.c master[ndevs-1].n += rem; n 228 drivers/char/agp/isoch.c rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; n 266 drivers/char/agp/isoch.c mnicmd |= master[cdev].n << 8; n 155 drivers/char/bsr.c struct bsr_dev *cur, *n; n 157 drivers/char/bsr.c list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) { n 77 drivers/char/dsp56k.c #define tx_wait(n) \ n 80 drivers/char/dsp56k.c for(t = 0; t < n && !DSP56K_TRANSMIT; t++) \ n 87 drivers/char/dsp56k.c #define rx_wait(n) \ n 90 drivers/char/dsp56k.c for(t = 0; t < n && !DSP56K_RECEIVE; t++) \ n 192 drivers/char/dsp56k.c long n; n 197 drivers/char/dsp56k.c n = 0; n 202 drivers/char/dsp56k.c put_user(dsp56k_host_interface.data.b[3], buf+n++)); n 203 drivers/char/dsp56k.c return n; n 212 drivers/char/dsp56k.c put_user(dsp56k_host_interface.data.w[1], data+n++)); n 213 drivers/char/dsp56k.c return 2*n; n 219 drivers/char/dsp56k.c put_user(dsp56k_host_interface.data.b[1], buf+n++); n 220 drivers/char/dsp56k.c put_user(dsp56k_host_interface.data.b[2], buf+n++); n 221 drivers/char/dsp56k.c put_user(dsp56k_host_interface.data.b[3], buf+n++)); n 222 drivers/char/dsp56k.c return 3*n; n 231 drivers/char/dsp56k.c put_user(dsp56k_host_interface.data.l, data+n++)); n 232 drivers/char/dsp56k.c return 4*n; n 254 drivers/char/dsp56k.c long n; n 259 drivers/char/dsp56k.c n = 0; n 264 drivers/char/dsp56k.c get_user(dsp56k_host_interface.data.b[3], buf+n++)); n 265 drivers/char/dsp56k.c return n; n 274 drivers/char/dsp56k.c get_user(dsp56k_host_interface.data.w[1], data+n++)); n 275 drivers/char/dsp56k.c return 2*n; n 281 drivers/char/dsp56k.c get_user(dsp56k_host_interface.data.b[1], buf+n++); n 282 drivers/char/dsp56k.c get_user(dsp56k_host_interface.data.b[2], buf+n++); n 283 drivers/char/dsp56k.c get_user(dsp56k_host_interface.data.b[3], buf+n++)); n 284 drivers/char/dsp56k.c return 3*n; n 293 drivers/char/dsp56k.c get_user(dsp56k_host_interface.data.l, data+n++)); n 294 drivers/char/dsp56k.c return 4*n; n 120 drivers/char/dtlk.c static char dtlk_write_bytes(const char *buf, int n); n 621 drivers/char/dtlk.c static char dtlk_write_bytes(const char *buf, int n) n 626 drivers/char/dtlk.c while (n-- > 0) n 60 drivers/char/hw_random/s390-trng.c unsigned int n; n 84 drivers/char/hw_random/s390-trng.c n = nbytes > PAGE_SIZE ? PAGE_SIZE : nbytes; n 85 drivers/char/hw_random/s390-trng.c cpacf_trng(NULL, 0, p, n); n 86 drivers/char/hw_random/s390-trng.c atomic64_add(n, &trng_dev_counter); n 87 drivers/char/hw_random/s390-trng.c if (copy_to_user(ubuf, p, n)) { n 91 drivers/char/hw_random/s390-trng.c nbytes -= n; n 92 drivers/char/hw_random/s390-trng.c ubuf += n; n 93 drivers/char/hw_random/s390-trng.c ret += n; n 21 drivers/char/hw_random/tx4939-rng.c #define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8) n 134 drivers/char/ipmi/bt-bmc.c static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n) n 138 drivers/char/ipmi/bt-bmc.c for (i = 0; i < n; i++) n 140 drivers/char/ipmi/bt-bmc.c return n; n 148 drivers/char/ipmi/bt-bmc.c static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n) n 152 drivers/char/ipmi/bt-bmc.c for (i = 0; i < n; i++) n 154 drivers/char/ipmi/bt-bmc.c return n; n 87 drivers/char/ipmi/ipmi_si_hotmod.c char *n; n 94 drivers/char/ipmi/ipmi_si_hotmod.c *val = simple_strtoul(option, &n, 0); n 95 drivers/char/ipmi/ipmi_si_hotmod.c if ((*n != '\0') || (*option == '\0')) { n 897 drivers/char/lp.c int n = simple_strtoul(str+7, NULL, 10); n 899 drivers/char/lp.c parport_nr[parport_ptr++] = n; n 992 drivers/char/lp.c int n; n 1002 drivers/char/lp.c for (n = 0; n < LP_NO; n++) { n 1003 drivers/char/lp.c if (port_num[n] == port->number) { n 1004 drivers/char/lp.c port_num[n] = -1; n 1006 drivers/char/lp.c device_destroy(lp_class, MKDEV(LP_MAJOR, n)); n 1007 drivers/char/lp.c parport_unregister_device(lp_table[n].dev); n 1087 drivers/char/lp.c int n; n 1088 drivers/char/lp.c for (n = 0; n < LP_NO && parport[n]; n++) { n 1089 drivers/char/lp.c if (!strncmp(parport[n], "none", 4)) n 1090 drivers/char/lp.c parport_nr[n] = LP_PARPORT_NONE; n 1093 drivers/char/lp.c unsigned long r = simple_strtoul(parport[n], &ep, 0); n 1094 drivers/char/lp.c if (ep != parport[n]) n 1095 drivers/char/lp.c parport_nr[n] = r; n 1097 drivers/char/lp.c printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]); n 601 drivers/char/mem.c unsigned long n; n 607 drivers/char/mem.c n = copy_from_user(kbuf, buf, sz); n 608 drivers/char/mem.c if (n) { n 710 drivers/char/mem.c size_t chunk = iov_iter_count(iter), n; n 714 drivers/char/mem.c n = iov_iter_zero(chunk, iter); n 715 drivers/char/mem.c if (!n && iov_iter_count(iter)) n 717 drivers/char/mem.c written += n; n 52 drivers/char/pcmcia/cm4000_cs.c #define DEBUGP(n, rdr, x, args...) do { \ n 46 drivers/char/pcmcia/cm4040_cs.c #define DEBUGP(n, rdr, x, args...) do { \ n 27 drivers/char/pcmcia/scr24x_cs.c #define SCR24X_DATA(n) (1 + n) n 223 drivers/char/ppdev.c ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); n 225 drivers/char/ppdev.c if (copy_from_user(kbuffer, buf + bytes_written, n)) { n 234 drivers/char/ppdev.c kbuffer, n, PARPORT_EPP_FAST); n 237 drivers/char/ppdev.c kbuffer, n, PARPORT_EPP_FAST); n 240 drivers/char/ppdev.c wrote = parport_write(pp->pdev->port, kbuffer, n); n 100 drivers/char/ps3flash.c size_t remaining, n; n 123 drivers/char/ps3flash.c n = min_t(u64, remaining, dev->bounce_size - offset); n 134 drivers/char/ps3flash.c __func__, __LINE__, n, src, userbuf, kernelbuf); n 136 drivers/char/ps3flash.c if (copy_to_user(userbuf, src, n)) { n 140 drivers/char/ps3flash.c userbuf += n; n 143 drivers/char/ps3flash.c memcpy(kernelbuf, src, n); n 144 drivers/char/ps3flash.c kernelbuf += n; n 149 drivers/char/ps3flash.c *pos += n; n 150 drivers/char/ps3flash.c remaining -= n; n 169 drivers/char/ps3flash.c size_t remaining, n; n 192 drivers/char/ps3flash.c n = min_t(u64, remaining, dev->bounce_size - offset); n 197 drivers/char/ps3flash.c if (n != dev->bounce_size) n 206 drivers/char/ps3flash.c __func__, __LINE__, n, userbuf, kernelbuf, dst); n 208 drivers/char/ps3flash.c if (copy_from_user(dst, userbuf, n)) { n 212 drivers/char/ps3flash.c userbuf += n; n 215 drivers/char/ps3flash.c memcpy(dst, kernelbuf, n); n 216 drivers/char/ps3flash.c kernelbuf += n; n 224 drivers/char/ps3flash.c *pos += n; n 225 drivers/char/ps3flash.c remaining -= n; n 1989 drivers/char/random.c ssize_t n; n 1996 drivers/char/random.c n = extract_entropy_user(&blocking_pool, buf, nbytes); n 1997 drivers/char/random.c if (n < 0) n 1998 drivers/char/random.c return n; n 1999 drivers/char/random.c trace_random_read(n*8, (nbytes-n)*8, n 2002 drivers/char/random.c if (n > 0) n 2003 drivers/char/random.c return n; n 492 drivers/char/sonypi.c unsigned int n = iterations; \ n 493 drivers/char/sonypi.c while (--n && (command)) \ n 495 drivers/char/sonypi.c if (!n && (verbose || !quiet)) \ n 677 drivers/char/sonypi.c int n = 100; n 679 drivers/char/sonypi.c while (n--) { n 858 drivers/char/virtio_console.c unsigned int n; n 870 drivers/char/virtio_console.c if (sgl->n == sgl->size) n 880 drivers/char/virtio_console.c sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); n 899 drivers/char/virtio_console.c sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); n 901 drivers/char/virtio_console.c sgl->n++; n 952 drivers/char/virtio_console.c sgl.n = 0; n 960 drivers/char/virtio_console.c ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); n 334 drivers/clk/analogbits/wrpll-cln28hpc.c u64 n; n 342 drivers/clk/analogbits/wrpll-cln28hpc.c n = parent_rate * fbdiv * (c->divf + 1); n 343 drivers/clk/analogbits/wrpll-cln28hpc.c n = div_u64(n, c->divr + 1); n 344 drivers/clk/analogbits/wrpll-cln28hpc.c n >>= c->divq; n 346 drivers/clk/analogbits/wrpll-cln28hpc.c return n; n 11 drivers/clk/at91/at91sam9260.c char *n; n 17 drivers/clk/at91/at91sam9260.c char *n; n 74 drivers/clk/at91/at91sam9260.c { .n = "uhpck", .p = "usbck", .id = 6 }, n 75 drivers/clk/at91/at91sam9260.c { .n = "udpck", .p = "usbck", .id = 7 }, n 76 drivers/clk/at91/at91sam9260.c { .n = "pck0", .p = "prog0", .id = 8 }, n 77 drivers/clk/at91/at91sam9260.c { .n = "pck1", .p = "prog1", .id = 9 }, n 81 drivers/clk/at91/at91sam9260.c { .n = "pioA_clk", .id = 2 }, n 82 drivers/clk/at91/at91sam9260.c { .n = "pioB_clk", .id = 3 }, n 83 drivers/clk/at91/at91sam9260.c { .n = "pioC_clk", .id = 4 }, n 84 drivers/clk/at91/at91sam9260.c { .n = "adc_clk", .id = 5 }, n 85 drivers/clk/at91/at91sam9260.c { .n = "usart0_clk", .id = 6 }, n 86 drivers/clk/at91/at91sam9260.c { .n = "usart1_clk", .id = 7 }, n 87 drivers/clk/at91/at91sam9260.c { .n = "usart2_clk", .id = 8 }, n 88 drivers/clk/at91/at91sam9260.c { .n = "mci0_clk", .id = 9 }, n 89 drivers/clk/at91/at91sam9260.c { .n = "udc_clk", .id = 10 }, n 90 drivers/clk/at91/at91sam9260.c { .n = "twi0_clk", .id = 11 }, n 91 drivers/clk/at91/at91sam9260.c { .n = "spi0_clk", .id = 12 }, n 92 drivers/clk/at91/at91sam9260.c { .n = "spi1_clk", .id = 13 }, n 93 drivers/clk/at91/at91sam9260.c { .n = "ssc0_clk", .id = 14 }, n 94 drivers/clk/at91/at91sam9260.c { .n = "tc0_clk", .id = 17 }, n 95 drivers/clk/at91/at91sam9260.c { .n = "tc1_clk", .id = 18 }, n 96 drivers/clk/at91/at91sam9260.c { .n = "tc2_clk", .id = 19 }, n 97 drivers/clk/at91/at91sam9260.c { .n = "ohci_clk", .id = 20 }, n 98 drivers/clk/at91/at91sam9260.c { .n = "macb0_clk", .id = 21 }, n 99 drivers/clk/at91/at91sam9260.c { .n = "isi_clk", .id = 22 }, n 100 drivers/clk/at91/at91sam9260.c { .n = "usart3_clk", .id = 23 }, n 101 drivers/clk/at91/at91sam9260.c { .n = "uart0_clk", .id = 24 }, n 102 drivers/clk/at91/at91sam9260.c { .n = "uart1_clk", .id = 25 }, n 103 drivers/clk/at91/at91sam9260.c { .n = "tc3_clk", .id = 26 }, n 104 drivers/clk/at91/at91sam9260.c { .n = "tc4_clk", .id = 27 }, n 105 drivers/clk/at91/at91sam9260.c { .n = "tc5_clk", .id = 28 }, n 215 drivers/clk/at91/at91sam9260.c { .n = "uhpck", .p = "usbck", .id = 6 }, n 216 drivers/clk/at91/at91sam9260.c { .n = "udpck", .p = "usbck", .id = 7 }, n 217 drivers/clk/at91/at91sam9260.c { .n = "pck0", .p = "prog0", .id = 8 }, n 218 drivers/clk/at91/at91sam9260.c { .n = "pck1", .p = "prog1", .id = 9 }, n 219 drivers/clk/at91/at91sam9260.c { .n = "pck2", .p = "prog2", .id = 10 }, n 220 drivers/clk/at91/at91sam9260.c { .n = "pck3", .p = "prog3", .id = 11 }, n 221 drivers/clk/at91/at91sam9260.c { .n = "hclk0", .p = "masterck", .id = 16 }, n 222 drivers/clk/at91/at91sam9260.c { .n = "hclk1", .p = "masterck", .id = 17 }, n 226 drivers/clk/at91/at91sam9260.c { .n = "pioA_clk", .id = 2, }, n 227 drivers/clk/at91/at91sam9260.c { .n = "pioB_clk", .id = 3, }, n 228 drivers/clk/at91/at91sam9260.c { .n = "pioC_clk", .id = 4, }, n 229 drivers/clk/at91/at91sam9260.c { .n = "usart0_clk", .id = 6, }, n 230 drivers/clk/at91/at91sam9260.c { .n = "usart1_clk", .id = 7, }, n 231 drivers/clk/at91/at91sam9260.c { .n = "usart2_clk", .id = 8, }, n 232 drivers/clk/at91/at91sam9260.c { .n = "mci0_clk", .id = 9, }, n 233 drivers/clk/at91/at91sam9260.c { .n = "udc_clk", .id = 10, }, n 234 drivers/clk/at91/at91sam9260.c { .n = "twi0_clk", .id = 11, }, n 235 drivers/clk/at91/at91sam9260.c { .n = "spi0_clk", .id = 12, }, n 236 drivers/clk/at91/at91sam9260.c { .n = "spi1_clk", .id = 13, }, n 237 drivers/clk/at91/at91sam9260.c { .n = "ssc0_clk", .id = 14, }, n 238 drivers/clk/at91/at91sam9260.c { .n = "ssc1_clk", .id = 15, }, n 239 drivers/clk/at91/at91sam9260.c { .n = "ssc2_clk", .id = 16, }, n 240 drivers/clk/at91/at91sam9260.c { .n = "tc0_clk", .id = 17, }, n 241 drivers/clk/at91/at91sam9260.c { .n = "tc1_clk", .id = 18, }, n 242 drivers/clk/at91/at91sam9260.c { .n = "tc2_clk", .id = 19, }, n 243 drivers/clk/at91/at91sam9260.c { .n = "ohci_clk", .id = 20, }, n 244 drivers/clk/at91/at91sam9260.c { .n = "lcd_clk", .id = 21, }, n 279 drivers/clk/at91/at91sam9260.c { .n = "uhpck", .p = "usbck", .id = 6 }, n 280 drivers/clk/at91/at91sam9260.c { .n = "udpck", .p = "usbck", .id = 7 }, n 281 drivers/clk/at91/at91sam9260.c { .n = "pck0", .p = "prog0", .id = 8 }, n 282 drivers/clk/at91/at91sam9260.c { .n = "pck1", .p = "prog1", .id = 9 }, n 283 drivers/clk/at91/at91sam9260.c { .n = "pck2", .p = "prog2", .id = 10 }, n 284 drivers/clk/at91/at91sam9260.c { .n = "pck3", .p = "prog3", .id = 11 }, n 288 drivers/clk/at91/at91sam9260.c { .n = "pioA_clk", .id = 2, }, n 289 drivers/clk/at91/at91sam9260.c { .n = "pioB_clk", .id = 3, }, n 290 drivers/clk/at91/at91sam9260.c { .n = "pioCDE_clk", .id = 4, }, n 291 drivers/clk/at91/at91sam9260.c { .n = "usart0_clk", .id = 7, }, n 292 drivers/clk/at91/at91sam9260.c { .n = "usart1_clk", .id = 8, }, n 293 drivers/clk/at91/at91sam9260.c { .n = "usart2_clk", .id = 9, }, n 294 drivers/clk/at91/at91sam9260.c { .n = "mci0_clk", .id = 10, }, n 295 drivers/clk/at91/at91sam9260.c { .n = "mci1_clk", .id = 11, }, n 296 drivers/clk/at91/at91sam9260.c { .n = "can_clk", .id = 12, }, n 297 drivers/clk/at91/at91sam9260.c { .n = "twi0_clk", .id = 13, }, n 298 drivers/clk/at91/at91sam9260.c { .n = "spi0_clk", .id = 14, }, n 299 drivers/clk/at91/at91sam9260.c { .n = "spi1_clk", .id = 15, }, n 300 drivers/clk/at91/at91sam9260.c { .n = "ssc0_clk", .id = 16, }, n 301 drivers/clk/at91/at91sam9260.c { .n = "ssc1_clk", .id = 17, }, n 302 drivers/clk/at91/at91sam9260.c { .n = "ac97_clk", .id = 18, }, n 303 drivers/clk/at91/at91sam9260.c { .n = "tcb_clk", .id = 19, }, n 304 drivers/clk/at91/at91sam9260.c { .n = "pwm_clk", .id = 20, }, n 305 drivers/clk/at91/at91sam9260.c { .n = "macb0_clk", .id = 21, }, n 306 drivers/clk/at91/at91sam9260.c { .n = "g2de_clk", .id = 23, }, n 307 drivers/clk/at91/at91sam9260.c { .n = "udc_clk", .id = 24, }, n 308 drivers/clk/at91/at91sam9260.c { .n = "isi_clk", .id = 25, }, n 309 drivers/clk/at91/at91sam9260.c { .n = "lcd_clk", .id = 26, }, n 310 drivers/clk/at91/at91sam9260.c { .n = "dma_clk", .id = 27, }, n 311 drivers/clk/at91/at91sam9260.c { .n = "ohci_clk", .id = 29, }, n 440 drivers/clk/at91/at91sam9260.c hw = at91_clk_register_system(regmap, data->sck[i].n, n 451 drivers/clk/at91/at91sam9260.c data->pck[i].n, n 30 drivers/clk/at91/at91sam9rl.c char *n; n 34 drivers/clk/at91/at91sam9rl.c { .n = "pck0", .p = "prog0", .id = 8 }, n 35 drivers/clk/at91/at91sam9rl.c { .n = "pck1", .p = "prog1", .id = 9 }, n 39 drivers/clk/at91/at91sam9rl.c char *n; n 42 drivers/clk/at91/at91sam9rl.c { .n = "pioA_clk", .id = 2, }, n 43 drivers/clk/at91/at91sam9rl.c { .n = "pioB_clk", .id = 3, }, n 44 drivers/clk/at91/at91sam9rl.c { .n = "pioC_clk", .id = 4, }, n 45 drivers/clk/at91/at91sam9rl.c { .n = "pioD_clk", .id = 5, }, n 46 drivers/clk/at91/at91sam9rl.c { .n = "usart0_clk", .id = 6, }, n 47 drivers/clk/at91/at91sam9rl.c { .n = "usart1_clk", .id = 7, }, n 48 drivers/clk/at91/at91sam9rl.c { .n = "usart2_clk", .id = 8, }, n 49 drivers/clk/at91/at91sam9rl.c { .n = "usart3_clk", .id = 9, }, n 50 drivers/clk/at91/at91sam9rl.c { .n = "mci0_clk", .id = 10, }, n 51 drivers/clk/at91/at91sam9rl.c { .n = "twi0_clk", .id = 11, }, n 52 drivers/clk/at91/at91sam9rl.c { .n = "twi1_clk", .id = 12, }, n 53 drivers/clk/at91/at91sam9rl.c { .n = "spi0_clk", .id = 13, }, n 54 drivers/clk/at91/at91sam9rl.c { .n = "ssc0_clk", .id = 14, }, n 55 drivers/clk/at91/at91sam9rl.c { .n = "ssc1_clk", .id = 15, }, n 56 drivers/clk/at91/at91sam9rl.c { .n = "tc0_clk", .id = 16, }, n 57 drivers/clk/at91/at91sam9rl.c { .n = "tc1_clk", .id = 17, }, n 58 drivers/clk/at91/at91sam9rl.c { .n = "tc2_clk", .id = 18, }, n 59 drivers/clk/at91/at91sam9rl.c { .n = "pwm_clk", .id = 19, }, n 60 drivers/clk/at91/at91sam9rl.c { .n = "adc_clk", .id = 20, }, n 61 drivers/clk/at91/at91sam9rl.c { .n = "dma0_clk", .id = 21, }, n 62 drivers/clk/at91/at91sam9rl.c { .n = "udphs_clk", .id = 22, }, n 63 drivers/clk/at91/at91sam9rl.c { .n = "lcd_clk", .id = 23, }, n 144 drivers/clk/at91/at91sam9rl.c hw = at91_clk_register_system(regmap, at91sam9rl_systemck[i].n, n 155 drivers/clk/at91/at91sam9rl.c at91sam9rl_periphck[i].n, n 40 drivers/clk/at91/at91sam9x5.c char *n; n 44 drivers/clk/at91/at91sam9x5.c { .n = "ddrck", .p = "masterck", .id = 2 }, n 45 drivers/clk/at91/at91sam9x5.c { .n = "smdck", .p = "smdclk", .id = 4 }, n 46 drivers/clk/at91/at91sam9x5.c { .n = "uhpck", .p = "usbck", .id = 6 }, n 47 drivers/clk/at91/at91sam9x5.c { .n = "udpck", .p = "usbck", .id = 7 }, n 48 drivers/clk/at91/at91sam9x5.c { .n = "pck0", .p = "prog0", .id = 8 }, n 49 drivers/clk/at91/at91sam9x5.c { .n = "pck1", .p = "prog1", .id = 9 }, n 60 drivers/clk/at91/at91sam9x5.c char *n; n 65 drivers/clk/at91/at91sam9x5.c { .n = "pioAB_clk", .id = 2, }, n 66 drivers/clk/at91/at91sam9x5.c { .n = "pioCD_clk", .id = 3, }, n 67 drivers/clk/at91/at91sam9x5.c { .n = "smd_clk", .id = 4, }, n 68 drivers/clk/at91/at91sam9x5.c { .n = "usart0_clk", .id = 5, }, n 69 drivers/clk/at91/at91sam9x5.c { .n = "usart1_clk", .id = 6, }, n 70 drivers/clk/at91/at91sam9x5.c { .n = "usart2_clk", .id = 7, }, n 71 drivers/clk/at91/at91sam9x5.c { .n = "twi0_clk", .id = 9, }, n 72 drivers/clk/at91/at91sam9x5.c { .n = "twi1_clk", .id = 10, }, n 73 drivers/clk/at91/at91sam9x5.c { .n = "twi2_clk", .id = 11, }, n 74 drivers/clk/at91/at91sam9x5.c { .n = "mci0_clk", .id = 12, }, n 75 drivers/clk/at91/at91sam9x5.c { .n = "spi0_clk", .id = 13, }, n 76 drivers/clk/at91/at91sam9x5.c { .n = "spi1_clk", .id = 14, }, n 77 drivers/clk/at91/at91sam9x5.c { .n = "uart0_clk", .id = 15, }, n 78 drivers/clk/at91/at91sam9x5.c { .n = "uart1_clk", .id = 16, }, n 79 drivers/clk/at91/at91sam9x5.c { .n = "tcb0_clk", .id = 17, }, n 80 drivers/clk/at91/at91sam9x5.c { .n = "pwm_clk", .id = 18, }, n 81 drivers/clk/at91/at91sam9x5.c { .n = "adc_clk", .id = 19, }, n 82 drivers/clk/at91/at91sam9x5.c { .n = "dma0_clk", .id = 20, }, n 83 drivers/clk/at91/at91sam9x5.c { .n = "dma1_clk", .id = 21, }, n 84 drivers/clk/at91/at91sam9x5.c { .n = "uhphs_clk", .id = 22, }, n 85 drivers/clk/at91/at91sam9x5.c { .n = "udphs_clk", .id = 23, }, n 86 drivers/clk/at91/at91sam9x5.c { .n = "mci1_clk", .id = 26, }, n 87 drivers/clk/at91/at91sam9x5.c { .n = "ssc0_clk", .id = 28, }, n 91 drivers/clk/at91/at91sam9x5.c { .n = "lcdc_clk", .id = 25, }, n 96 drivers/clk/at91/at91sam9x5.c { .n = "usart3_clk", .id = 8, }, n 97 drivers/clk/at91/at91sam9x5.c { .n = "macb0_clk", .id = 24, }, n 98 drivers/clk/at91/at91sam9x5.c { .n = "isi_clk", .id = 25, }, n 103 drivers/clk/at91/at91sam9x5.c { .n = "macb0_clk", .id = 24, }, n 104 drivers/clk/at91/at91sam9x5.c { .n = "lcdc_clk", .id = 25, }, n 109 drivers/clk/at91/at91sam9x5.c { .n = "usart3_clk", .id = 8, }, n 110 drivers/clk/at91/at91sam9x5.c { .n = "macb0_clk", .id = 24, }, n 111 drivers/clk/at91/at91sam9x5.c { .n = "macb1_clk", .id = 27, }, n 112 drivers/clk/at91/at91sam9x5.c { .n = "can0_clk", .id = 29, }, n 113 drivers/clk/at91/at91sam9x5.c { .n = "can1_clk", .id = 30, }, n 118 drivers/clk/at91/at91sam9x5.c { .n = "macb0_clk", .id = 24, }, n 119 drivers/clk/at91/at91sam9x5.c { .n = "lcdc_clk", .id = 25, }, n 120 drivers/clk/at91/at91sam9x5.c { .n = "can0_clk", .id = 29, }, n 121 drivers/clk/at91/at91sam9x5.c { .n = "can1_clk", .id = 30, }, n 233 drivers/clk/at91/at91sam9x5.c hw = at91_clk_register_system(regmap, at91sam9x5_systemck[i].n, n 253 drivers/clk/at91/at91sam9x5.c at91sam9x5_periphck[i].n, n 266 drivers/clk/at91/at91sam9x5.c extra_pcks[i].n, n 61 drivers/clk/at91/sam9x60.c char *n; n 65 drivers/clk/at91/sam9x60.c { .n = "ddrck", .p = "masterck", .id = 2 }, n 66 drivers/clk/at91/sam9x60.c { .n = "uhpck", .p = "usbck", .id = 6 }, n 67 drivers/clk/at91/sam9x60.c { .n = "pck0", .p = "prog0", .id = 8 }, n 68 drivers/clk/at91/sam9x60.c { .n = "pck1", .p = "prog1", .id = 9 }, n 69 drivers/clk/at91/sam9x60.c { .n = "qspick", .p = "masterck", .id = 19 }, n 73 drivers/clk/at91/sam9x60.c char *n; n 76 drivers/clk/at91/sam9x60.c { .n = "pioA_clk", .id = 2, }, n 77 drivers/clk/at91/sam9x60.c { .n = "pioB_clk", .id = 3, }, n 78 drivers/clk/at91/sam9x60.c { .n = "pioC_clk", .id = 4, }, n 79 drivers/clk/at91/sam9x60.c { .n = "flex0_clk", .id = 5, }, n 80 drivers/clk/at91/sam9x60.c { .n = "flex1_clk", .id = 6, }, n 81 drivers/clk/at91/sam9x60.c { .n = "flex2_clk", .id = 7, }, n 82 drivers/clk/at91/sam9x60.c { .n = "flex3_clk", .id = 8, }, n 83 drivers/clk/at91/sam9x60.c { .n = "flex6_clk", .id = 9, }, n 84 drivers/clk/at91/sam9x60.c { .n = "flex7_clk", .id = 10, }, n 85 drivers/clk/at91/sam9x60.c { .n = "flex8_clk", .id = 11, }, n 86 drivers/clk/at91/sam9x60.c { .n = "sdmmc0_clk", .id = 12, }, n 87 drivers/clk/at91/sam9x60.c { .n = "flex4_clk", .id = 13, }, n 88 drivers/clk/at91/sam9x60.c { .n = "flex5_clk", .id = 14, }, n 89 drivers/clk/at91/sam9x60.c { .n = "flex9_clk", .id = 15, }, n 90 drivers/clk/at91/sam9x60.c { .n = "flex10_clk", .id = 16, }, n 91 drivers/clk/at91/sam9x60.c { .n = "tcb0_clk", .id = 17, }, n 92 drivers/clk/at91/sam9x60.c { .n = "pwm_clk", .id = 18, }, n 93 drivers/clk/at91/sam9x60.c { .n = "adc_clk", .id = 19, }, n 94 drivers/clk/at91/sam9x60.c { .n = "dma0_clk", .id = 20, }, n 95 drivers/clk/at91/sam9x60.c { .n = "matrix_clk", .id = 21, }, n 96 drivers/clk/at91/sam9x60.c { .n = "uhphs_clk", .id = 22, }, n 97 drivers/clk/at91/sam9x60.c { .n = "udphs_clk", .id = 23, }, n 98 drivers/clk/at91/sam9x60.c { .n = "macb0_clk", .id = 24, }, n 99 drivers/clk/at91/sam9x60.c { .n = "lcd_clk", .id = 25, }, n 100 drivers/clk/at91/sam9x60.c { .n = "sdmmc1_clk", .id = 26, }, n 101 drivers/clk/at91/sam9x60.c { .n = "macb1_clk", .id = 27, }, n 102 drivers/clk/at91/sam9x60.c { .n = "ssc_clk", .id = 28, }, n 103 drivers/clk/at91/sam9x60.c { .n = "can0_clk", .id = 29, }, n 104 drivers/clk/at91/sam9x60.c { .n = "can1_clk", .id = 30, }, n 105 drivers/clk/at91/sam9x60.c { .n = "flex11_clk", .id = 32, }, n 106 drivers/clk/at91/sam9x60.c { .n = "flex12_clk", .id = 33, }, n 107 drivers/clk/at91/sam9x60.c { .n = "i2s_clk", .id = 34, }, n 108 drivers/clk/at91/sam9x60.c { .n = "qspi_clk", .id = 35, }, n 109 drivers/clk/at91/sam9x60.c { .n = "gfx2d_clk", .id = 36, }, n 110 drivers/clk/at91/sam9x60.c { .n = "pit64b_clk", .id = 37, }, n 111 drivers/clk/at91/sam9x60.c { .n = "trng_clk", .id = 38, }, n 112 drivers/clk/at91/sam9x60.c { .n = "aes_clk", .id = 39, }, n 113 drivers/clk/at91/sam9x60.c { .n = "tdes_clk", .id = 40, }, n 114 drivers/clk/at91/sam9x60.c { .n = "sha_clk", .id = 41, }, n 115 drivers/clk/at91/sam9x60.c { .n = "classd_clk", .id = 42, }, n 116 drivers/clk/at91/sam9x60.c { .n = "isi_clk", .id = 43, }, n 117 drivers/clk/at91/sam9x60.c { .n = "pioD_clk", .id = 44, }, n 118 drivers/clk/at91/sam9x60.c { .n = "tcb1_clk", .id = 45, }, n 119 drivers/clk/at91/sam9x60.c { .n = "dbgu_clk", .id = 47, }, n 120 drivers/clk/at91/sam9x60.c { .n = "mpddr_clk", .id = 49, }, n 124 drivers/clk/at91/sam9x60.c char *n; n 129 drivers/clk/at91/sam9x60.c { .n = "flex0_gclk", .id = 5, }, n 130 drivers/clk/at91/sam9x60.c { .n = "flex1_gclk", .id = 6, }, n 131 drivers/clk/at91/sam9x60.c { .n = "flex2_gclk", .id = 7, }, n 132 drivers/clk/at91/sam9x60.c { .n = "flex3_gclk", .id = 8, }, n 133 drivers/clk/at91/sam9x60.c { .n = "flex6_gclk", .id = 9, }, n 134 drivers/clk/at91/sam9x60.c { .n = "flex7_gclk", .id = 10, }, n 135 drivers/clk/at91/sam9x60.c { .n = "flex8_gclk", .id = 11, }, n 136 drivers/clk/at91/sam9x60.c { .n = "sdmmc0_gclk", .id = 12, .r = { .min = 0, .max = 105000000 }, }, n 137 drivers/clk/at91/sam9x60.c { .n = "flex4_gclk", .id = 13, }, n 138 drivers/clk/at91/sam9x60.c { .n = "flex5_gclk", .id = 14, }, n 139 drivers/clk/at91/sam9x60.c { .n = "flex9_gclk", .id = 15, }, n 140 drivers/clk/at91/sam9x60.c { .n = "flex10_gclk", .id = 16, }, n 141 drivers/clk/at91/sam9x60.c { .n = "tcb0_gclk", .id = 17, }, n 142 drivers/clk/at91/sam9x60.c { .n = "adc_gclk", .id = 19, }, n 143 drivers/clk/at91/sam9x60.c { .n = "lcd_gclk", .id = 25, .r = { .min = 0, .max = 140000000 }, }, n 144 drivers/clk/at91/sam9x60.c { .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, }, n 145 drivers/clk/at91/sam9x60.c { .n = "flex11_gclk", .id = 32, }, n 146 drivers/clk/at91/sam9x60.c { .n = "flex12_gclk", .id = 33, }, n 147 drivers/clk/at91/sam9x60.c { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, n 149 drivers/clk/at91/sam9x60.c { .n = "pit64b_gclk", .id = 37, }, n 150 drivers/clk/at91/sam9x60.c { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, n 152 drivers/clk/at91/sam9x60.c { .n = "tcb1_gclk", .id = 45, }, n 153 drivers/clk/at91/sam9x60.c { .n = "dbgu_gclk", .id = 47, }, n 264 drivers/clk/at91/sam9x60.c hw = at91_clk_register_system(regmap, sam9x60_systemck[i].n, n 276 drivers/clk/at91/sam9x60.c sam9x60_periphck[i].n, n 289 drivers/clk/at91/sam9x60.c sam9x60_gck[i].n, n 39 drivers/clk/at91/sama5d2.c char *n; n 43 drivers/clk/at91/sama5d2.c { .n = "ddrck", .p = "masterck", .id = 2 }, n 44 drivers/clk/at91/sama5d2.c { .n = "lcdck", .p = "masterck", .id = 3 }, n 45 drivers/clk/at91/sama5d2.c { .n = "uhpck", .p = "usbck", .id = 6 }, n 46 drivers/clk/at91/sama5d2.c { .n = "udpck", .p = "usbck", .id = 7 }, n 47 drivers/clk/at91/sama5d2.c { .n = "pck0", .p = "prog0", .id = 8 }, n 48 drivers/clk/at91/sama5d2.c { .n = "pck1", .p = "prog1", .id = 9 }, n 49 drivers/clk/at91/sama5d2.c { .n = "pck2", .p = "prog2", .id = 10 }, n 50 drivers/clk/at91/sama5d2.c { .n = "iscck", .p = "masterck", .id = 18 }, n 54 drivers/clk/at91/sama5d2.c char *n; n 58 drivers/clk/at91/sama5d2.c { .n = "macb0_clk", .id = 5, .r = { .min = 0, .max = 83000000 }, }, n 59 drivers/clk/at91/sama5d2.c { .n = "tdes_clk", .id = 11, .r = { .min = 0, .max = 83000000 }, }, n 60 drivers/clk/at91/sama5d2.c { .n = "matrix1_clk", .id = 14, }, n 61 drivers/clk/at91/sama5d2.c { .n = "hsmc_clk", .id = 17, }, n 62 drivers/clk/at91/sama5d2.c { .n = "pioA_clk", .id = 18, .r = { .min = 0, .max = 83000000 }, }, n 63 drivers/clk/at91/sama5d2.c { .n = "flx0_clk", .id = 19, .r = { .min = 0, .max = 83000000 }, }, n 64 drivers/clk/at91/sama5d2.c { .n = "flx1_clk", .id = 20, .r = { .min = 0, .max = 83000000 }, }, n 65 drivers/clk/at91/sama5d2.c { .n = "flx2_clk", .id = 21, .r = { .min = 0, .max = 83000000 }, }, n 66 drivers/clk/at91/sama5d2.c { .n = "flx3_clk", .id = 22, .r = { .min = 0, .max = 83000000 }, }, n 67 drivers/clk/at91/sama5d2.c { .n = "flx4_clk", .id = 23, .r = { .min = 0, .max = 83000000 }, }, n 68 drivers/clk/at91/sama5d2.c { .n = "uart0_clk", .id = 24, .r = { .min = 0, .max = 83000000 }, }, n 69 drivers/clk/at91/sama5d2.c { .n = "uart1_clk", .id = 25, .r = { .min = 0, .max = 83000000 }, }, n 70 drivers/clk/at91/sama5d2.c { .n = "uart2_clk", .id = 26, .r = { .min = 0, .max = 83000000 }, }, n 71 drivers/clk/at91/sama5d2.c { .n = "uart3_clk", .id = 27, .r = { .min = 0, .max = 83000000 }, }, n 72 drivers/clk/at91/sama5d2.c { .n = "uart4_clk", .id = 28, .r = { .min = 0, .max = 83000000 }, }, n 73 drivers/clk/at91/sama5d2.c { .n = "twi0_clk", .id = 29, .r = { .min = 0, .max = 83000000 }, }, n 74 drivers/clk/at91/sama5d2.c { .n = "twi1_clk", .id = 30, .r = { .min = 0, .max = 83000000 }, }, n 75 drivers/clk/at91/sama5d2.c { .n = "spi0_clk", .id = 33, .r = { .min = 0, .max = 83000000 }, }, n 76 drivers/clk/at91/sama5d2.c { .n = "spi1_clk", .id = 34, .r = { .min = 0, .max = 83000000 }, }, n 77 drivers/clk/at91/sama5d2.c { .n = "tcb0_clk", .id = 35, .r = { .min = 0, .max = 83000000 }, }, n 78 drivers/clk/at91/sama5d2.c { .n = "tcb1_clk", .id = 36, .r = { .min = 0, .max = 83000000 }, }, n 79 drivers/clk/at91/sama5d2.c { .n = "pwm_clk", .id = 38, .r = { .min = 0, .max = 83000000 }, }, n 80 drivers/clk/at91/sama5d2.c { .n = "adc_clk", .id = 40, .r = { .min = 0, .max = 83000000 }, }, n 81 drivers/clk/at91/sama5d2.c { .n = "uhphs_clk", .id = 41, .r = { .min = 0, .max = 83000000 }, }, n 82 drivers/clk/at91/sama5d2.c { .n = "udphs_clk", .id = 42, .r = { .min = 0, .max = 83000000 }, }, n 83 drivers/clk/at91/sama5d2.c { .n = "ssc0_clk", .id = 43, .r = { .min = 0, .max = 83000000 }, }, n 84 drivers/clk/at91/sama5d2.c { .n = "ssc1_clk", .id = 44, .r = { .min = 0, .max = 83000000 }, }, n 85 drivers/clk/at91/sama5d2.c { .n = "trng_clk", .id = 47, .r = { .min = 0, .max = 83000000 }, }, n 86 drivers/clk/at91/sama5d2.c { .n = "pdmic_clk", .id = 48, .r = { .min = 0, .max = 83000000 }, }, n 87 drivers/clk/at91/sama5d2.c { .n = "securam_clk", .id = 51, }, n 88 drivers/clk/at91/sama5d2.c { .n = "i2s0_clk", .id = 54, .r = { .min = 0, .max = 83000000 }, }, n 89 drivers/clk/at91/sama5d2.c { .n = "i2s1_clk", .id = 55, .r = { .min = 0, .max = 83000000 }, }, n 90 drivers/clk/at91/sama5d2.c { .n = "can0_clk", .id = 56, .r = { .min = 0, .max = 83000000 }, }, n 91 drivers/clk/at91/sama5d2.c { .n = "can1_clk", .id = 57, .r = { .min = 0, .max = 83000000 }, }, n 92 drivers/clk/at91/sama5d2.c { .n = "classd_clk", .id = 59, .r = { .min = 0, .max = 83000000 }, }, n 96 drivers/clk/at91/sama5d2.c char *n; n 99 drivers/clk/at91/sama5d2.c { .n = "dma0_clk", .id = 6, }, n 100 drivers/clk/at91/sama5d2.c { .n = "dma1_clk", .id = 7, }, n 101 drivers/clk/at91/sama5d2.c { .n = "aes_clk", .id = 9, }, n 102 drivers/clk/at91/sama5d2.c { .n = "aesb_clk", .id = 10, }, n 103 drivers/clk/at91/sama5d2.c { .n = "sha_clk", .id = 12, }, n 104 drivers/clk/at91/sama5d2.c { .n = "mpddr_clk", .id = 13, }, n 105 drivers/clk/at91/sama5d2.c { .n = "matrix0_clk", .id = 15, }, n 106 drivers/clk/at91/sama5d2.c { .n = "sdmmc0_hclk", .id = 31, }, n 107 drivers/clk/at91/sama5d2.c { .n = "sdmmc1_hclk", .id = 32, }, n 108 drivers/clk/at91/sama5d2.c { .n = "lcdc_clk", .id = 45, }, n 109 drivers/clk/at91/sama5d2.c { .n = "isc_clk", .id = 46, }, n 110 drivers/clk/at91/sama5d2.c { .n = "qspi0_clk", .id = 52, }, n 111 drivers/clk/at91/sama5d2.c { .n = "qspi1_clk", .id = 53, }, n 115 drivers/clk/at91/sama5d2.c char *n; n 120 drivers/clk/at91/sama5d2.c { .n = "sdmmc0_gclk", .id = 31, }, n 121 drivers/clk/at91/sama5d2.c { .n = "sdmmc1_gclk", .id = 32, }, n 122 drivers/clk/at91/sama5d2.c { .n = "tcb0_gclk", .id = 35, .r = { .min = 0, .max = 83000000 }, }, n 123 drivers/clk/at91/sama5d2.c { .n = "tcb1_gclk", .id = 36, .r = { .min = 0, .max = 83000000 }, }, n 124 drivers/clk/at91/sama5d2.c { .n = "pwm_gclk", .id = 38, .r = { .min = 0, .max = 83000000 }, }, n 125 drivers/clk/at91/sama5d2.c { .n = "isc_gclk", .id = 46, }, n 126 drivers/clk/at91/sama5d2.c { .n = "pdmic_gclk", .id = 48, }, n 127 drivers/clk/at91/sama5d2.c { .n = "i2s0_gclk", .id = 54, .pll = true }, n 128 drivers/clk/at91/sama5d2.c { .n = "i2s1_gclk", .id = 55, .pll = true }, n 129 drivers/clk/at91/sama5d2.c { .n = "can0_gclk", .id = 56, .r = { .min = 0, .max = 80000000 }, }, n 130 drivers/clk/at91/sama5d2.c { .n = "can1_gclk", .id = 57, .r = { .min = 0, .max = 80000000 }, }, n 131 drivers/clk/at91/sama5d2.c { .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 }, n 273 drivers/clk/at91/sama5d2.c hw = at91_clk_register_system(regmap, sama5d2_systemck[i].n, n 285 drivers/clk/at91/sama5d2.c sama5d2_periphck[i].n, n 298 drivers/clk/at91/sama5d2.c sama5d2_periph32ck[i].n, n 317 drivers/clk/at91/sama5d2.c sama5d2_gck[i].n, n 38 drivers/clk/at91/sama5d4.c char *n; n 42 drivers/clk/at91/sama5d4.c { .n = "ddrck", .p = "masterck", .id = 2 }, n 43 drivers/clk/at91/sama5d4.c { .n = "lcdck", .p = "masterck", .id = 3 }, n 44 drivers/clk/at91/sama5d4.c { .n = "smdck", .p = "smdclk", .id = 4 }, n 45 drivers/clk/at91/sama5d4.c { .n = "uhpck", .p = "usbck", .id = 6 }, n 46 drivers/clk/at91/sama5d4.c { .n = "udpck", .p = "usbck", .id = 7 }, n 47 drivers/clk/at91/sama5d4.c { .n = "pck0", .p = "prog0", .id = 8 }, n 48 drivers/clk/at91/sama5d4.c { .n = "pck1", .p = "prog1", .id = 9 }, n 49 drivers/clk/at91/sama5d4.c { .n = "pck2", .p = "prog2", .id = 10 }, n 53 drivers/clk/at91/sama5d4.c char *n; n 56 drivers/clk/at91/sama5d4.c { .n = "pioD_clk", .id = 5 }, n 57 drivers/clk/at91/sama5d4.c { .n = "usart0_clk", .id = 6 }, n 58 drivers/clk/at91/sama5d4.c { .n = "usart1_clk", .id = 7 }, n 59 drivers/clk/at91/sama5d4.c { .n = "icm_clk", .id = 9 }, n 60 drivers/clk/at91/sama5d4.c { .n = "aes_clk", .id = 12 }, n 61 drivers/clk/at91/sama5d4.c { .n = "tdes_clk", .id = 14 }, n 62 drivers/clk/at91/sama5d4.c { .n = "sha_clk", .id = 15 }, n 63 drivers/clk/at91/sama5d4.c { .n = "matrix1_clk", .id = 17 }, n 64 drivers/clk/at91/sama5d4.c { .n = "hsmc_clk", .id = 22 }, n 65 drivers/clk/at91/sama5d4.c { .n = "pioA_clk", .id = 23 }, n 66 drivers/clk/at91/sama5d4.c { .n = "pioB_clk", .id = 24 }, n 67 drivers/clk/at91/sama5d4.c { .n = "pioC_clk", .id = 25 }, n 68 drivers/clk/at91/sama5d4.c { .n = "pioE_clk", .id = 26 }, n 69 drivers/clk/at91/sama5d4.c { .n = "uart0_clk", .id = 27 }, n 70 drivers/clk/at91/sama5d4.c { .n = "uart1_clk", .id = 28 }, n 71 drivers/clk/at91/sama5d4.c { .n = "usart2_clk", .id = 29 }, n 72 drivers/clk/at91/sama5d4.c { .n = "usart3_clk", .id = 30 }, n 73 drivers/clk/at91/sama5d4.c { .n = "usart4_clk", .id = 31 }, n 74 drivers/clk/at91/sama5d4.c { .n = "twi0_clk", .id = 32 }, n 75 drivers/clk/at91/sama5d4.c { .n = "twi1_clk", .id = 33 }, n 76 drivers/clk/at91/sama5d4.c { .n = "twi2_clk", .id = 34 }, n 77 drivers/clk/at91/sama5d4.c { .n = "mci0_clk", .id = 35 }, n 78 drivers/clk/at91/sama5d4.c { .n = "mci1_clk", .id = 36 }, n 79 drivers/clk/at91/sama5d4.c { .n = "spi0_clk", .id = 37 }, n 80 drivers/clk/at91/sama5d4.c { .n = "spi1_clk", .id = 38 }, n 81 drivers/clk/at91/sama5d4.c { .n = "spi2_clk", .id = 39 }, n 82 drivers/clk/at91/sama5d4.c { .n = "tcb0_clk", .id = 40 }, n 83 drivers/clk/at91/sama5d4.c { .n = "tcb1_clk", .id = 41 }, n 84 drivers/clk/at91/sama5d4.c { .n = "tcb2_clk", .id = 42 }, n 85 drivers/clk/at91/sama5d4.c { .n = "pwm_clk", .id = 43 }, n 86 drivers/clk/at91/sama5d4.c { .n = "adc_clk", .id = 44 }, n 87 drivers/clk/at91/sama5d4.c { .n = "dbgu_clk", .id = 45 }, n 88 drivers/clk/at91/sama5d4.c { .n = "uhphs_clk", .id = 46 }, n 89 drivers/clk/at91/sama5d4.c { .n = "udphs_clk", .id = 47 }, n 90 drivers/clk/at91/sama5d4.c { .n = "ssc0_clk", .id = 48 }, n 91 drivers/clk/at91/sama5d4.c { .n = "ssc1_clk", .id = 49 }, n 92 drivers/clk/at91/sama5d4.c { .n = "trng_clk", .id = 53 }, n 93 drivers/clk/at91/sama5d4.c { .n = "macb0_clk", .id = 54 }, n 94 drivers/clk/at91/sama5d4.c { .n = "macb1_clk", .id = 55 }, n 95 drivers/clk/at91/sama5d4.c { .n = "fuse_clk", .id = 57 }, n 96 drivers/clk/at91/sama5d4.c { .n = "securam_clk", .id = 59 }, n 97 drivers/clk/at91/sama5d4.c { .n = "smd_clk", .id = 61 }, n 98 drivers/clk/at91/sama5d4.c { .n = "twi3_clk", .id = 62 }, n 99 drivers/clk/at91/sama5d4.c { .n = "catb_clk", .id = 63 }, n 103 drivers/clk/at91/sama5d4.c char *n; n 106 drivers/clk/at91/sama5d4.c { .n = "dma0_clk", .id = 8 }, n 107 drivers/clk/at91/sama5d4.c { .n = "cpkcc_clk", .id = 10 }, n 108 drivers/clk/at91/sama5d4.c { .n = "aesb_clk", .id = 13 }, n 109 drivers/clk/at91/sama5d4.c { .n = "mpddr_clk", .id = 16 }, n 110 drivers/clk/at91/sama5d4.c { .n = "matrix0_clk", .id = 18 }, n 111 drivers/clk/at91/sama5d4.c { .n = "vdec_clk", .id = 19 }, n 112 drivers/clk/at91/sama5d4.c { .n = "dma1_clk", .id = 50 }, n 113 drivers/clk/at91/sama5d4.c { .n = "lcdc_clk", .id = 51 }, n 114 drivers/clk/at91/sama5d4.c { .n = "isi_clk", .id = 52 }, n 230 drivers/clk/at91/sama5d4.c hw = at91_clk_register_system(regmap, sama5d4_systemck[i].n, n 242 drivers/clk/at91/sama5d4.c sama5d4_periphck[i].n, n 255 drivers/clk/at91/sama5d4.c sama5d4_periph32ck[i].n, n 500 drivers/clk/berlin/bg2.c int n, ret; n 550 drivers/clk/berlin/bg2.c for (n = 0; n < 8; n++) { n 552 drivers/clk/berlin/bg2.c clk_names[AVPLL_A1 + n], n, "avpll_vcoA", n 564 drivers/clk/berlin/bg2.c for (n = 0; n < 8; n++) { n 566 drivers/clk/berlin/bg2.c clk_names[AVPLL_B1 + n], n, "avpll_vcoB", n 641 drivers/clk/berlin/bg2.c for (n = 0; n < ARRAY_SIZE(bg2_divs); n++) { n 642 drivers/clk/berlin/bg2.c const struct berlin2_div_data *dd = &bg2_divs[n]; n 648 drivers/clk/berlin/bg2.c hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase, n 654 drivers/clk/berlin/bg2.c for (n = 0; n < ARRAY_SIZE(bg2_gates); n++) { n 655 drivers/clk/berlin/bg2.c const struct berlin2_gate_data *gd = &bg2_gates[n]; n 657 drivers/clk/berlin/bg2.c hws[CLKID_GETH0 + n] = clk_hw_register_gate(NULL, gd->name, n 667 drivers/clk/berlin/bg2.c for (n = 0; n < MAX_CLKS; n++) { n 668 drivers/clk/berlin/bg2.c if (!IS_ERR(hws[n])) n 671 drivers/clk/berlin/bg2.c pr_err("%pOF: Unable to register leaf clock %d\n", np, n); n 286 drivers/clk/berlin/bg2q.c int n, ret; n 334 drivers/clk/berlin/bg2q.c for (n = 0; n < ARRAY_SIZE(bg2q_divs); n++) { n 335 drivers/clk/berlin/bg2q.c const struct berlin2_div_data *dd = &bg2q_divs[n]; n 341 drivers/clk/berlin/bg2q.c hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase, n 347 drivers/clk/berlin/bg2q.c for (n = 0; n < ARRAY_SIZE(bg2q_gates); n++) { n 348 drivers/clk/berlin/bg2q.c const struct berlin2_gate_data *gd = &bg2q_gates[n]; n 350 drivers/clk/berlin/bg2q.c hws[CLKID_GFX2DAXI + n] = clk_hw_register_gate(NULL, gd->name, n 364 drivers/clk/berlin/bg2q.c for (n = 0; n < MAX_CLKS; n++) { n 365 drivers/clk/berlin/bg2q.c if (!IS_ERR(hws[n])) n 368 drivers/clk/berlin/bg2q.c pr_err("%pOF: Unable to register leaf clock %d\n", np, n); n 262 drivers/clk/clk-asm9260.c int n; n 286 drivers/clk/clk-asm9260.c for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) { n 287 drivers/clk/clk-asm9260.c const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n]; n 297 drivers/clk/clk-asm9260.c for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) { n 298 drivers/clk/clk-asm9260.c const struct asm9260_gate_data *gd = &asm9260_mux_gates[n]; n 306 drivers/clk/clk-asm9260.c for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) { n 307 drivers/clk/clk-asm9260.c const struct asm9260_div_clk *dc = &asm9260_div_clks[n]; n 316 drivers/clk/clk-asm9260.c for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) { n 317 drivers/clk/clk-asm9260.c const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n]; n 325 drivers/clk/clk-asm9260.c for (n = 0; n < MAX_CLKS; n++) { n 326 drivers/clk/clk-asm9260.c if (!IS_ERR(hws[n])) n 330 drivers/clk/clk-asm9260.c np, n); n 137 drivers/clk/clk-aspeed.c u32 n = (val >> 5) & 0x3f; n 141 drivers/clk/clk-aspeed.c mult = (2 - od) * (n + 2); n 159 drivers/clk/clk-aspeed.c u32 n = val & 0x1f; n 161 drivers/clk/clk-aspeed.c mult = (m + 1) / (n + 1); n 168 drivers/clk/clk-ast2600.c u32 n = (val >> 13) & 0x3f; n 170 drivers/clk/clk-ast2600.c mult = (m + 1) / (n + 1); n 188 drivers/clk/clk-ast2600.c u32 n = val & 0xf; n 191 drivers/clk/clk-ast2600.c div = n + 1; n 315 drivers/clk/clk-cdce706.c unsigned long n, m; n 326 drivers/clk/clk-cdce706.c &n, &m); n 327 drivers/clk/clk-cdce706.c div_rate64 = (u64)gp_rate * n; n 338 drivers/clk/clk-cdce706.c __func__, gp_rate, n, m, div, div_rate); n 526 drivers/clk/clk-cdce706.c unsigned m, n, v; n 531 drivers/clk/clk-cdce706.c ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n); n 538 drivers/clk/clk-cdce706.c cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) << n 78 drivers/clk/clk-cdce925.c u16 n; /* 1..4095 */ n 93 drivers/clk/clk-cdce925.c u16 n, u16 m) n 95 drivers/clk/clk-cdce925.c if ((!m || !n) || (m == n)) n 97 drivers/clk/clk-cdce925.c return mult_frac(parent_rate, (unsigned long)n, (unsigned long)m); n 106 drivers/clk/clk-cdce925.c return cdce925_pll_calculate_rate(parent_rate, data->n, data->m); n 110 drivers/clk/clk-cdce925.c unsigned long parent_rate, u16 *n, u16 *m) n 119 drivers/clk/clk-cdce925.c *n = 0; n 141 drivers/clk/clk-cdce925.c *n = un; n 149 drivers/clk/clk-cdce925.c u16 n, m; n 151 drivers/clk/clk-cdce925.c cdce925_pll_find_rate(rate, *parent_rate, &n, &m); n 152 drivers/clk/clk-cdce925.c return (long)cdce925_pll_calculate_rate(*parent_rate, n, m); n 162 drivers/clk/clk-cdce925.c data->n = 0; n 178 drivers/clk/clk-cdce925.c cdce925_pll_find_rate(rate, parent_rate, &data->n, &data->m); n 184 drivers/clk/clk-cdce925.c static u8 cdce925_pll_calc_p(u16 n, u16 m) n 187 drivers/clk/clk-cdce925.c u16 r = n / m; n 200 drivers/clk/clk-cdce925.c static u8 cdce925_pll_calc_range_bits(struct clk_hw *hw, u16 n, u16 m) n 205 drivers/clk/clk-cdce925.c rate = mult_frac(rate, (unsigned long)n, (unsigned long)m); n 220 drivers/clk/clk-cdce925.c u16 n = data->n; n 230 drivers/clk/clk-cdce925.c if ((!m || !n) || (m == n)) { n 237 drivers/clk/clk-cdce925.c p = cdce925_pll_calc_p(n, m); n 239 drivers/clk/clk-cdce925.c nn = n * BIT(p); n 252 drivers/clk/clk-cdce925.c n, m, p, q, r); n 254 drivers/clk/clk-cdce925.c pll[0] = n >> 4; n 255 drivers/clk/clk-cdce925.c pll[1] = ((n & 0x0F) << 4) | ((r >> 5) & 0x0F); n 258 drivers/clk/clk-cdce925.c cdce925_pll_calc_range_bits(hw, n, m); n 38 drivers/clk/clk-fractional-divider.c unsigned long m, n; n 55 drivers/clk/clk-fractional-divider.c n = (val & fd->nmask) >> fd->nshift; n 59 drivers/clk/clk-fractional-divider.c n++; n 62 drivers/clk/clk-fractional-divider.c if (!n || !m) n 66 drivers/clk/clk-fractional-divider.c do_div(ret, n); n 73 drivers/clk/clk-fractional-divider.c unsigned long *m, unsigned long *n) n 89 drivers/clk/clk-fractional-divider.c m, n); n 96 drivers/clk/clk-fractional-divider.c unsigned long m, n; n 103 drivers/clk/clk-fractional-divider.c fd->approximation(hw, rate, parent_rate, &m, &n); n 105 drivers/clk/clk-fractional-divider.c clk_fd_general_approximation(hw, rate, parent_rate, &m, &n); n 108 drivers/clk/clk-fractional-divider.c do_div(ret, n); n 118 drivers/clk/clk-fractional-divider.c unsigned long m, n; n 123 drivers/clk/clk-fractional-divider.c &m, &n); n 127 drivers/clk/clk-fractional-divider.c n--; n 137 drivers/clk/clk-fractional-divider.c val |= (m << fd->mshift) | (n << fd->nshift); n 17 drivers/clk/clk-milbeaut.c #define CLKSEL(n) (((n) - 1) * 4 + M10V_CLKSEL1) n 1372 drivers/clk/clk-si5351.c int ret, n; n 1420 drivers/clk/clk-si5351.c for (n = 0; n < 2; n++) { n 1421 drivers/clk/clk-si5351.c ret = _si5351_pll_reparent(drvdata, n, pdata->pll_src[n]); n 1425 drivers/clk/clk-si5351.c n, pdata->pll_src[n]); n 1430 drivers/clk/clk-si5351.c for (n = 0; n < 8; n++) { n 1431 drivers/clk/clk-si5351.c ret = _si5351_msynth_reparent(drvdata, n, n 1432 drivers/clk/clk-si5351.c pdata->clkout[n].multisynth_src); n 1436 drivers/clk/clk-si5351.c n, pdata->clkout[n].multisynth_src); n 1440 drivers/clk/clk-si5351.c ret = _si5351_clkout_reparent(drvdata, n, n 1441 drivers/clk/clk-si5351.c pdata->clkout[n].clkout_src); n 1445 drivers/clk/clk-si5351.c n, pdata->clkout[n].clkout_src); n 1449 drivers/clk/clk-si5351.c ret = _si5351_clkout_set_drive_strength(drvdata, n, n 1450 drivers/clk/clk-si5351.c pdata->clkout[n].drive); n 1454 drivers/clk/clk-si5351.c n, pdata->clkout[n].drive); n 1458 drivers/clk/clk-si5351.c ret = _si5351_clkout_set_disable_state(drvdata, n, n 1459 drivers/clk/clk-si5351.c pdata->clkout[n].disable_state); n 1463 drivers/clk/clk-si5351.c n, pdata->clkout[n].disable_state); n 1568 drivers/clk/clk-si5351.c for (n = 0; n < num_clocks; n++) { n 1569 drivers/clk/clk-si5351.c drvdata->msynth[n].num = n; n 1570 drivers/clk/clk-si5351.c drvdata->msynth[n].drvdata = drvdata; n 1571 drivers/clk/clk-si5351.c drvdata->msynth[n].hw.init = &init; n 1573 drivers/clk/clk-si5351.c init.name = si5351_msynth_names[n]; n 1576 drivers/clk/clk-si5351.c if (pdata->clkout[n].pll_master) n 1581 drivers/clk/clk-si5351.c &drvdata->msynth[n].hw); n 1592 drivers/clk/clk-si5351.c for (n = 0; n < num_clocks; n++) { n 1593 drivers/clk/clk-si5351.c parent_names[0] = si5351_msynth_names[n]; n 1594 drivers/clk/clk-si5351.c parent_names[1] = (n < 4) ? si5351_msynth_names[0] : n 1597 drivers/clk/clk-si5351.c drvdata->clkout[n].num = n; n 1598 drivers/clk/clk-si5351.c drvdata->clkout[n].drvdata = drvdata; n 1599 drivers/clk/clk-si5351.c drvdata->clkout[n].hw.init = &init; n 1601 drivers/clk/clk-si5351.c init.name = si5351_clkout_names[n]; n 1604 drivers/clk/clk-si5351.c if (pdata->clkout[n].clkout_src == SI5351_CLKOUT_SRC_MSYNTH_N) n 1609 drivers/clk/clk-si5351.c &drvdata->clkout[n].hw); n 1617 drivers/clk/clk-si5351.c if (pdata->clkout[n].rate != 0) { n 1619 drivers/clk/clk-si5351.c ret = clk_set_rate(drvdata->clkout[n].hw.clk, n 1620 drivers/clk/clk-si5351.c pdata->clkout[n].rate); n 355 drivers/clk/clk-si544.c s64 n = (s64)delta * DELTA_M_MAX; n 357 drivers/clk/clk-si544.c return div_s64(n, max_delta); n 639 drivers/clk/clk-stm32f4.c unsigned long n; n 641 drivers/clk/clk-stm32f4.c n = (readl(base + pll->offset) >> 6) & 0x1ff; n 643 drivers/clk/clk-stm32f4.c return parent_rate * n; n 651 drivers/clk/clk-stm32f4.c unsigned long n; n 653 drivers/clk/clk-stm32f4.c n = rate / *prate; n 655 drivers/clk/clk-stm32f4.c if (n < pll->n_start) n 656 drivers/clk/clk-stm32f4.c n = pll->n_start; n 657 drivers/clk/clk-stm32f4.c else if (n > 432) n 658 drivers/clk/clk-stm32f4.c n = 432; n 660 drivers/clk/clk-stm32f4.c return *prate * n; n 669 drivers/clk/clk-stm32f4.c unsigned long n; n 678 drivers/clk/clk-stm32f4.c n = rate / parent_rate; n 682 drivers/clk/clk-stm32f4.c writel(val | ((n & 0x1ff) << 6), base + pll->offset); n 1691 drivers/clk/clk-stm32f4.c int n; n 1763 drivers/clk/clk-stm32f4.c for (n = 0; n < MAX_POST_DIV; n++) { n 1767 drivers/clk/clk-stm32f4.c post_div = &post_div_data[n]; n 1811 drivers/clk/clk-stm32f4.c for (n = 0; n < data->gates_num; n++) { n 1816 drivers/clk/clk-stm32f4.c gd = &data->gates_data[n]; n 1868 drivers/clk/clk-stm32f4.c for (n = 0; n < data->aux_clk_num; n++) { n 1872 drivers/clk/clk-stm32f4.c aux_clk = &data->aux_clk[n]; n 748 drivers/clk/clk-stm32h7.c unsigned long m, n; n 758 drivers/clk/clk-stm32h7.c n = ((val & mask) >> fd->nshift) + 1; n 760 drivers/clk/clk-stm32h7.c if (!n || !m) n 763 drivers/clk/clk-stm32h7.c rate = (u64)parent_rate * n; n 1200 drivers/clk/clk-stm32h7.c int n; n 1213 drivers/clk/clk-stm32h7.c for (n = 0; n < STM32H7_MAX_CLKS; n++) n 1214 drivers/clk/clk-stm32h7.c hws[n] = ERR_PTR(-ENOENT); n 1261 drivers/clk/clk-stm32h7.c for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++) n 1262 drivers/clk/clk-stm32h7.c hws[MCLK_BANK + n] = clk_hw_register_mux(NULL, n 1263 drivers/clk/clk-stm32h7.c stm32_mclk[n].name, n 1264 drivers/clk/clk-stm32h7.c stm32_mclk[n].parents, n 1265 drivers/clk/clk-stm32h7.c stm32_mclk[n].num_parents, n 1266 drivers/clk/clk-stm32h7.c stm32_mclk[n].flags, n 1267 drivers/clk/clk-stm32h7.c stm32_mclk[n].offset + base, n 1268 drivers/clk/clk-stm32h7.c stm32_mclk[n].shift, n 1269 drivers/clk/clk-stm32h7.c stm32_mclk[n].width, n 1276 drivers/clk/clk-stm32h7.c for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++) n 1277 drivers/clk/clk-stm32h7.c hws[OSC_BANK + n] = clk_register_ready_gate(NULL, n 1278 drivers/clk/clk-stm32h7.c stm32_oclk[n].name, n 1279 drivers/clk/clk-stm32h7.c stm32_oclk[n].parent, n 1280 drivers/clk/clk-stm32h7.c stm32_oclk[n].gate_offset + base, n 1281 drivers/clk/clk-stm32h7.c stm32_oclk[n].bit_idx, n 1282 drivers/clk/clk-stm32h7.c stm32_oclk[n].bit_rdy, n 1283 drivers/clk/clk-stm32h7.c stm32_oclk[n].flags, n 1302 drivers/clk/clk-stm32h7.c hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL, n 1306 drivers/clk/clk-stm32h7.c for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) { n 1310 drivers/clk/clk-stm32h7.c clk_register_stm32_pll(NULL, stm32_pll[n].name, n 1311 drivers/clk/clk-stm32h7.c stm32_pll[n].parent_name, stm32_pll[n].flags, n 1312 drivers/clk/clk-stm32h7.c stm32_pll[n].cfg, n 1317 drivers/clk/clk-stm32h7.c int idx = n * 3 + odf; n 1319 drivers/clk/clk-stm32h7.c get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf], n 1323 drivers/clk/clk-stm32h7.c stm32_odf[n][odf].name, n 1324 drivers/clk/clk-stm32h7.c stm32_odf[n][odf].parent_name, n 1325 drivers/clk/clk-stm32h7.c stm32_odf[n][odf].num_parents, n 1329 drivers/clk/clk-stm32h7.c stm32_odf[n][odf].flags); n 1334 drivers/clk/clk-stm32h7.c for (n = 0; n < ARRAY_SIZE(pclk); n++) n 1335 drivers/clk/clk-stm32h7.c hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name, n 1336 drivers/clk/clk-stm32h7.c pclk[n].parent, n 1337 drivers/clk/clk-stm32h7.c pclk[n].flags, base + pclk[n].gate_offset, n 1338 drivers/clk/clk-stm32h7.c pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock); n 1341 drivers/clk/clk-stm32h7.c for (n = 0; n < ARRAY_SIZE(kclk); n++) { n 1342 drivers/clk/clk-stm32h7.c get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg, n 1345 drivers/clk/clk-stm32h7.c hws[KERN_BANK + n] = clk_hw_register_composite(NULL, n 1346 drivers/clk/clk-stm32h7.c kclk[n].name, n 1347 drivers/clk/clk-stm32h7.c kclk[n].parent_name, n 1348 drivers/clk/clk-stm32h7.c kclk[n].num_parents, n 1352 drivers/clk/clk-stm32h7.c kclk[n].flags); n 1370 drivers/clk/clk-stm32h7.c for (n = 0; n < ARRAY_SIZE(mco_clk); n++) { n 1371 drivers/clk/clk-stm32h7.c get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg, n 1374 drivers/clk/clk-stm32h7.c hws[MCO_BANK + n] = clk_hw_register_composite(NULL, n 1375 drivers/clk/clk-stm32h7.c mco_clk[n].name, n 1376 drivers/clk/clk-stm32h7.c mco_clk[n].parent_name, n 1377 drivers/clk/clk-stm32h7.c mco_clk[n].num_parents, n 1381 drivers/clk/clk-stm32h7.c mco_clk[n].flags); n 707 drivers/clk/clk-stm32mp1.c int ret, n; n 715 drivers/clk/clk-stm32mp1.c for (n = 0; n < clk_mmux->mmux->nbr_clk; n++) n 716 drivers/clk/clk-stm32mp1.c if (clk_mmux->mmux->hws[n] != hw) n 717 drivers/clk/clk-stm32mp1.c clk_hw_reparent(clk_mmux->mmux->hws[n], hwp); n 2051 drivers/clk/clk-stm32mp1.c int err, n, max_binding; n 2072 drivers/clk/clk-stm32mp1.c for (n = 0; n < max_binding; n++) n 2073 drivers/clk/clk-stm32mp1.c hws[n] = ERR_PTR(-ENOENT); n 2075 drivers/clk/clk-stm32mp1.c for (n = 0; n < data->num; n++) { n 2077 drivers/clk/clk-stm32mp1.c &data->cfg[n]); n 2080 drivers/clk/clk-stm32mp1.c data->cfg[n].name); n 60 drivers/clk/clk-versaclock5.c #define VC5_REF_DIVIDER_REF_DIV(n) ((n) & 0x3f) n 67 drivers/clk/clk-versaclock5.c #define VC5_FEEDBACK_FRAC_DIV(n) (0x19 + (n)) n 80 drivers/clk/clk-versaclock5.c #define VC5_OUT_DIV_FRAC(idx, n) (0x22 + ((idx) * 0x10) + (n)) n 83 drivers/clk/clk-versaclock5.c #define VC5_OUT_DIV_STEP_SPREAD(idx, n) (0x26 + ((idx) * 0x10) + (n)) n 84 drivers/clk/clk-versaclock5.c #define VC5_OUT_DIV_SPREAD_MOD(idx, n) (0x29 + ((idx) * 0x10) + (n)) n 85 drivers/clk/clk-versaclock5.c #define VC5_OUT_DIV_SKEW_INT(idx, n) (0x2b + ((idx) * 0x10) + (n)) n 86 drivers/clk/clk-versaclock5.c #define VC5_OUT_DIV_INT(idx, n) (0x2d + ((idx) * 0x10) + (n)) n 91 drivers/clk/clk-versaclock5.c #define VC5_CLK_OUTPUT_CFG(idx, n) (0x60 + ((idx) * 0x2) + (n)) n 677 drivers/clk/clk-versaclock5.c const unsigned int n) n 681 drivers/clk/clk-versaclock5.c return (n == 0) ? 0 : 3; n 687 drivers/clk/clk-versaclock5.c return n; n 699 drivers/clk/clk-versaclock5.c unsigned int n, idx = 0; n 813 drivers/clk/clk-versaclock5.c for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) { n 814 drivers/clk/clk-versaclock5.c idx = vc5_map_index_to_output(vc5->chip_info->model, n); n 821 drivers/clk/clk-versaclock5.c vc5->clk_fod[n].num = idx; n 822 drivers/clk/clk-versaclock5.c vc5->clk_fod[n].vc5 = vc5; n 823 drivers/clk/clk-versaclock5.c vc5->clk_fod[n].hw.init = &init; n 824 drivers/clk/clk-versaclock5.c ret = devm_clk_hw_register(&client->dev, &vc5->clk_fod[n].hw); n 850 drivers/clk/clk-versaclock5.c for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) { n 851 drivers/clk/clk-versaclock5.c idx = vc5_map_index_to_output(vc5->chip_info->model, n - 1); n 853 drivers/clk/clk-versaclock5.c if (n == 1) n 856 drivers/clk/clk-versaclock5.c parent_names[1] = vc5_clk_out_names[n - 1]; n 864 drivers/clk/clk-versaclock5.c vc5->clk_out[n].num = idx; n 865 drivers/clk/clk-versaclock5.c vc5->clk_out[n].vc5 = vc5; n 866 drivers/clk/clk-versaclock5.c vc5->clk_out[n].hw.init = &init; n 868 drivers/clk/clk-versaclock5.c &vc5->clk_out[n].hw); n 25 drivers/clk/davinci/pll-da850.c #define OCSEL_OCSRC_PLL0_SYSCLK(n) (0x16 + (n)) n 27 drivers/clk/davinci/pll-da850.c #define OCSEL_OCSRC_PLL1_SYSCLK(n) (0x16 + (n)) n 961 drivers/clk/davinci/pll.c #define DEBUG_REG(n) \ n 963 drivers/clk/davinci/pll.c .name = #n, \ n 964 drivers/clk/davinci/pll.c .offset = n, \ n 70 drivers/clk/davinci/pll.h #define SYSCLK(i, n, p, w, f) \ n 71 drivers/clk/davinci/pll.h static const struct davinci_pll_sysclk_info n = { \ n 72 drivers/clk/davinci/pll.h .name = #n, \ n 38 drivers/clk/davinci/psc.c #define PDSTAT(n) (0x200 + 4 * (n)) n 39 drivers/clk/davinci/psc.c #define PDCTL(n) (0x300 + 4 * (n)) n 40 drivers/clk/davinci/psc.c #define MDSTAT(n) (0x800 + 4 * (n)) n 41 drivers/clk/davinci/psc.c #define MDCTL(n) (0xa00 + 4 * (n)) n 30 drivers/clk/davinci/psc.h #define LPSC_CLKDEV1(n, c, d) \ n 31 drivers/clk/davinci/psc.h static const struct davinci_lpsc_clkdev_info n[] __initconst = { \ n 36 drivers/clk/davinci/psc.h #define LPSC_CLKDEV2(n, c1, d1, c2, d2) \ n 37 drivers/clk/davinci/psc.h static const struct davinci_lpsc_clkdev_info n[] __initconst = { \ n 43 drivers/clk/davinci/psc.h #define LPSC_CLKDEV3(n, c1, d1, c2, d2, c3, d3) \ n 44 drivers/clk/davinci/psc.h static const struct davinci_lpsc_clkdev_info n[] __initconst = { \ n 69 drivers/clk/davinci/psc.h #define LPSC(m, d, n, p, c, f) \ n 71 drivers/clk/davinci/psc.h .name = #n, \ n 53 drivers/clk/imx/clk-vf610.c #define CCM_CCGRx_CGn(n) ((n) * 2) n 77 drivers/clk/ingenic/cgu.c unsigned m, n, od_enc, od; n 92 drivers/clk/ingenic/cgu.c n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0); n 93 drivers/clk/ingenic/cgu.c n += pll_info->n_offset; n 109 drivers/clk/ingenic/cgu.c return div_u64((u64)parent_rate * m, n * od); n 118 drivers/clk/ingenic/cgu.c unsigned m, n, od; n 127 drivers/clk/ingenic/cgu.c n = parent_rate / (10 * MHZ); n 128 drivers/clk/ingenic/cgu.c n = min_t(unsigned, n, 1 << clk_info->pll.n_bits); n 129 drivers/clk/ingenic/cgu.c n = max_t(unsigned, n, pll_info->n_offset); n 131 drivers/clk/ingenic/cgu.c m = (rate / MHZ) * od * n / (parent_rate / MHZ); n 138 drivers/clk/ingenic/cgu.c *pn = n; n 142 drivers/clk/ingenic/cgu.c return div_u64((u64)parent_rate * m, n * od); n 176 drivers/clk/ingenic/cgu.c unsigned int m, n, od; n 180 drivers/clk/ingenic/cgu.c &m, &n, &od); n 192 drivers/clk/ingenic/cgu.c ctl |= (n - pll_info->n_offset) << pll_info->n_shift; n 37 drivers/clk/meson/axg.c .n = { n 101 drivers/clk/meson/axg.c .n = { n 198 drivers/clk/meson/axg.c .n = { n 270 drivers/clk/meson/axg.c .n = { n 696 drivers/clk/meson/axg.c .n = 3, n 723 drivers/clk/meson/axg.c .n = { n 56 drivers/clk/meson/clk-pll.c unsigned int m, unsigned int n, n 69 drivers/clk/meson/clk-pll.c return DIV_ROUND_UP_ULL(rate, n); n 77 drivers/clk/meson/clk-pll.c unsigned int m, n, frac; n 79 drivers/clk/meson/clk-pll.c n = meson_parm_read(clk->map, &pll->n); n 86 drivers/clk/meson/clk-pll.c if (n == 0) n 95 drivers/clk/meson/clk-pll.c return __pll_params_to_rate(parent_rate, m, n, frac, pll); n 101 drivers/clk/meson/clk-pll.c unsigned int n, n 105 drivers/clk/meson/clk-pll.c u64 val = (u64)rate * n; n 108 drivers/clk/meson/clk-pll.c if (rate < parent_rate * m / n) n 141 drivers/clk/meson/clk-pll.c unsigned int *n, n 144 drivers/clk/meson/clk-pll.c if (!pll->table[index].n) n 148 drivers/clk/meson/clk-pll.c *n = pll->table[index].n; n 155 drivers/clk/meson/clk-pll.c unsigned int n, n 158 drivers/clk/meson/clk-pll.c u64 val = (u64)rate * n; n 170 drivers/clk/meson/clk-pll.c unsigned int *n, n 173 drivers/clk/meson/clk-pll.c *n = index + 1; n 176 drivers/clk/meson/clk-pll.c if (*n >= (1 << pll->n.width)) n 179 drivers/clk/meson/clk-pll.c if (*n == 1) { n 190 drivers/clk/meson/clk-pll.c *m = meson_clk_get_pll_range_m(rate, parent_rate, *n, pll); n 203 drivers/clk/meson/clk-pll.c unsigned int *n, n 208 drivers/clk/meson/clk-pll.c index, m, n, pll); n 210 drivers/clk/meson/clk-pll.c return meson_clk_get_pll_table_index(index, m, n, pll); n 222 drivers/clk/meson/clk-pll.c unsigned int i, m, n; n 227 drivers/clk/meson/clk-pll.c i, &m, &n, pll); n 231 drivers/clk/meson/clk-pll.c now = __pll_params_to_rate(parent_rate, m, n, 0, pll); n 235 drivers/clk/meson/clk-pll.c *best_n = n; n 250 drivers/clk/meson/clk-pll.c unsigned int m, n, frac; n 254 drivers/clk/meson/clk-pll.c ret = meson_clk_get_pll_settings(rate, *parent_rate, &m, &n, pll); n 258 drivers/clk/meson/clk-pll.c round = __pll_params_to_rate(*parent_rate, m, n, 0, pll); n 267 drivers/clk/meson/clk-pll.c frac = __pll_params_with_frac(rate, *parent_rate, m, n, pll); n 269 drivers/clk/meson/clk-pll.c return __pll_params_to_rate(*parent_rate, m, n, frac, pll); n 366 drivers/clk/meson/clk-pll.c unsigned int enabled, m, n, frac = 0, ret; n 374 drivers/clk/meson/clk-pll.c ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll); n 382 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->n, n); n 386 drivers/clk/meson/clk-pll.c frac = __pll_params_with_frac(rate, parent_rate, m, n, pll); n 16 drivers/clk/meson/clk-pll.h unsigned int n; n 27 drivers/clk/meson/clk-pll.h .n = (_n), \ n 35 drivers/clk/meson/clk-pll.h struct parm n; n 41 drivers/clk/meson/g12a.c .n = { n 110 drivers/clk/meson/g12a.c .n = { n 169 drivers/clk/meson/g12a.c .n = { n 1622 drivers/clk/meson/g12a.c .n = { n 1687 drivers/clk/meson/g12a.c .n = { n 1762 drivers/clk/meson/g12a.c .n = { n 1854 drivers/clk/meson/g12a.c .n = { n 1948 drivers/clk/meson/g12a.c .n = { n 97 drivers/clk/meson/gxbb.c .n = { n 174 drivers/clk/meson/gxbb.c .n = { n 222 drivers/clk/meson/gxbb.c .n = { n 384 drivers/clk/meson/gxbb.c .n = { n 446 drivers/clk/meson/gxbb.c .n = { n 495 drivers/clk/meson/gxbb.c .n = { n 76 drivers/clk/meson/meson8b.c .n = { n 140 drivers/clk/meson/meson8b.c .n = { n 220 drivers/clk/meson/meson8b.c .n = { n 1930 drivers/clk/meson/meson8b.c .n = { n 114 drivers/clk/mvebu/common.c int n; n 156 drivers/clk/mvebu/common.c for (n = 0; n < desc->num_ratios; n++) { n 157 drivers/clk/mvebu/common.c const char *rclk_name = desc->ratios[n].name; n 161 drivers/clk/mvebu/common.c 2+n, &rclk_name); n 162 drivers/clk/mvebu/common.c desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div); n 163 drivers/clk/mvebu/common.c clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name, n 165 drivers/clk/mvebu/common.c WARN_ON(IS_ERR(clk_data.clks[2+n])); n 204 drivers/clk/mvebu/common.c int n; n 209 drivers/clk/mvebu/common.c for (n = 0; n < ctrl->num_gates; n++) { n 211 drivers/clk/mvebu/common.c to_clk_gate(__clk_get_hw(ctrl->gates[n])); n 213 drivers/clk/mvebu/common.c return ctrl->gates[n]; n 240 drivers/clk/mvebu/common.c int n; n 267 drivers/clk/mvebu/common.c for (n = 0; desc[n].name;) n 268 drivers/clk/mvebu/common.c n++; n 270 drivers/clk/mvebu/common.c ctrl->num_gates = n; n 276 drivers/clk/mvebu/common.c for (n = 0; n < ctrl->num_gates; n++) { n 278 drivers/clk/mvebu/common.c (desc[n].parent) ? desc[n].parent : default_parent; n 279 drivers/clk/mvebu/common.c ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent, n 280 drivers/clk/mvebu/common.c desc[n].flags, base, desc[n].bit_idx, n 282 drivers/clk/mvebu/common.c WARN_ON(IS_ERR(ctrl->gates[n])); n 274 drivers/clk/mvebu/kirkwood.c int n; n 279 drivers/clk/mvebu/kirkwood.c for (n = 0; n < ctrl->num_muxes; n++) { n 281 drivers/clk/mvebu/kirkwood.c to_clk_mux(__clk_get_hw(ctrl->muxes[n])); n 283 drivers/clk/mvebu/kirkwood.c return ctrl->muxes[n]; n 293 drivers/clk/mvebu/kirkwood.c int n; n 307 drivers/clk/mvebu/kirkwood.c for (n = 0; desc[n].name;) n 308 drivers/clk/mvebu/kirkwood.c n++; n 310 drivers/clk/mvebu/kirkwood.c ctrl->num_muxes = n; n 316 drivers/clk/mvebu/kirkwood.c for (n = 0; n < ctrl->num_muxes; n++) { n 317 drivers/clk/mvebu/kirkwood.c ctrl->muxes[n] = clk_register_mux(NULL, desc[n].name, n 318 drivers/clk/mvebu/kirkwood.c desc[n].parents, desc[n].num_parents, n 319 drivers/clk/mvebu/kirkwood.c desc[n].flags, base, desc[n].shift, n 320 drivers/clk/mvebu/kirkwood.c desc[n].width, desc[n].flags, ctrl->lock); n 321 drivers/clk/mvebu/kirkwood.c WARN_ON(IS_ERR(ctrl->muxes[n])); n 35 drivers/clk/nxp/clk-lpc18xx-cgu.c #define LPC18XX_CGU_IDIV_CTRL(n) (0x048 + (n) * sizeof(u32)) n 535 drivers/clk/nxp/clk-lpc18xx-cgu.c void __iomem *base, int n) n 537 drivers/clk/nxp/clk-lpc18xx-cgu.c void __iomem *reg = base + LPC18XX_CGU_IDIV_CTRL(n); n 555 drivers/clk/nxp/clk-lpc18xx-cgu.c void __iomem *reg_base, int n) n 557 drivers/clk/nxp/clk-lpc18xx-cgu.c void __iomem *reg = reg_base + LPC18XX_CGU_BASE_CLK(n); n 570 drivers/clk/nxp/clk-lpc18xx-cgu.c if (n == BASE_SAFE_CLK) n 587 drivers/clk/nxp/clk-lpc32xx.c u64 m = 0, n = 0, p = 0; n 610 drivers/clk/nxp/clk-lpc32xx.c n = n_i; n 624 drivers/clk/nxp/clk-lpc32xx.c clk->n_div = n; n 633 drivers/clk/nxp/clk-lpc32xx.c o = div64_u64(i * m, n * (1 << p)); n 637 drivers/clk/nxp/clk-lpc32xx.c clk_hw_get_name(hw), rate, m, n, p); n 640 drivers/clk/nxp/clk-lpc32xx.c clk_hw_get_name(hw), rate, m, n, p, o); n 83 drivers/clk/qcom/clk-pll.c u32 l, m, n, config; n 89 drivers/clk/qcom/clk-pll.c regmap_read(pll->clkr.regmap, pll->n_reg, &n); n 93 drivers/clk/qcom/clk-pll.c n &= 0x7ffff; n 96 drivers/clk/qcom/clk-pll.c if (n) { n 99 drivers/clk/qcom/clk-pll.c do_div(tmp, n); n 161 drivers/clk/qcom/clk-pll.c regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, f->n); n 226 drivers/clk/qcom/clk-pll.c regmap_write(regmap, pll->n_reg, config->n); n 321 drivers/clk/qcom/clk-pll.c regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, f->n); n 23 drivers/clk/qcom/clk-pll.h u16 n; n 64 drivers/clk/qcom/clk-pll.h u32 n; n 132 drivers/clk/qcom/clk-rcg.c static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md) n 140 drivers/clk/qcom/clk-rcg.c if (n) { n 143 drivers/clk/qcom/clk-rcg.c md |= ~n & mask_w; n 163 drivers/clk/qcom/clk-rcg.c static u32 mn_to_ns(struct mn *mn, u32 m, u32 n, u32 ns) n 171 drivers/clk/qcom/clk-rcg.c if (n) { n 172 drivers/clk/qcom/clk-rcg.c n = n - m; n 173 drivers/clk/qcom/clk-rcg.c n = ~n; n 174 drivers/clk/qcom/clk-rcg.c n &= BIT(mn->width) - 1; n 175 drivers/clk/qcom/clk-rcg.c n <<= mn->n_val_shift; n 176 drivers/clk/qcom/clk-rcg.c ns |= n; n 182 drivers/clk/qcom/clk-rcg.c static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val) n 190 drivers/clk/qcom/clk-rcg.c if (n) { n 236 drivers/clk/qcom/clk-rcg.c md = mn_to_md(mn, f->m, f->n, md); n 240 drivers/clk/qcom/clk-rcg.c ns = mn_to_ns(mn, f->m, f->n, ns); n 247 drivers/clk/qcom/clk-rcg.c ns = mn_to_reg(mn, f->m, f->n, ns); n 252 drivers/clk/qcom/clk-rcg.c reg = mn_to_reg(mn, f->m, f->n, reg); n 308 drivers/clk/qcom/clk-rcg.c f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m); n 326 drivers/clk/qcom/clk-rcg.c calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 pre_div) n 334 drivers/clk/qcom/clk-rcg.c do_div(tmp, n); n 345 drivers/clk/qcom/clk-rcg.c u32 pre_div, m = 0, n = 0, ns, md, mode = 0; n 354 drivers/clk/qcom/clk-rcg.c n = ns_m_to_n(mn, ns, m); n 363 drivers/clk/qcom/clk-rcg.c return calc_rate(parent_rate, m, n, mode, pre_div); n 370 drivers/clk/qcom/clk-rcg.c u32 m, n, pre_div, ns, md, mode, reg; n 380 drivers/clk/qcom/clk-rcg.c m = n = pre_div = mode = 0; n 386 drivers/clk/qcom/clk-rcg.c n = ns_m_to_n(mn, ns, m); n 396 drivers/clk/qcom/clk-rcg.c return calc_rate(parent_rate, m, n, mode, pre_div); n 419 drivers/clk/qcom/clk-rcg.c if (f->n) { n 421 drivers/clk/qcom/clk-rcg.c tmp = tmp * f->n; n 491 drivers/clk/qcom/clk-rcg.c md = mn_to_md(mn, f->m, f->n, md); n 498 drivers/clk/qcom/clk-rcg.c ctl = mn_to_reg(mn, f->m, f->n, ctl); n 501 drivers/clk/qcom/clk-rcg.c ns = mn_to_reg(mn, f->m, f->n, ns); n 503 drivers/clk/qcom/clk-rcg.c ns = mn_to_ns(mn, f->m, f->n, ns); n 653 drivers/clk/qcom/clk-rcg.c f.n = frac->den; n 10 drivers/clk/qcom/clk-rcg.h #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } n 17 drivers/clk/qcom/clk-rcg.h u16 n; n 147 drivers/clk/qcom/clk-rcg2.c calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) n 157 drivers/clk/qcom/clk-rcg2.c do_div(tmp, n); n 168 drivers/clk/qcom/clk-rcg2.c u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; n 176 drivers/clk/qcom/clk-rcg2.c regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n); n 177 drivers/clk/qcom/clk-rcg2.c n = ~n; n 178 drivers/clk/qcom/clk-rcg2.c n &= mask; n 179 drivers/clk/qcom/clk-rcg2.c n += m; n 188 drivers/clk/qcom/clk-rcg2.c return calc_rate(parent_rate, m, n, mode, hid_div); n 232 drivers/clk/qcom/clk-rcg2.c if (f->n) { n 234 drivers/clk/qcom/clk-rcg2.c tmp = tmp * f->n; n 273 drivers/clk/qcom/clk-rcg2.c if (rcg->mnd_width && f->n) { n 281 drivers/clk/qcom/clk-rcg2.c RCG_N_OFFSET(rcg), mask, ~(f->n - f->m)); n 286 drivers/clk/qcom/clk-rcg2.c RCG_D_OFFSET(rcg), mask, ~f->n); n 295 drivers/clk/qcom/clk-rcg2.c if (rcg->mnd_width && f->n && (f->m != f->n)) n 439 drivers/clk/qcom/clk-rcg2.c f.n = frac->den; n 702 drivers/clk/qcom/clk-rcg2.c f.n = frac->den; n 994 drivers/clk/qcom/clk-rcg2.c f->n = val; n 997 drivers/clk/qcom/clk-rcg2.c f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); n 1039 drivers/clk/qcom/clk-rcg2.c u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; n 1073 drivers/clk/qcom/clk-rcg2.c rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); n 1074 drivers/clk/qcom/clk-rcg2.c n = ~n; n 1075 drivers/clk/qcom/clk-rcg2.c n &= mask; n 1076 drivers/clk/qcom/clk-rcg2.c n += m; n 1079 drivers/clk/qcom/clk-rcg2.c return calc_rate(parent_rate, m, n, mode, pre_div); n 219 drivers/clk/qcom/gcc-ipq806x.c .n = _n, \ n 28 drivers/clk/qcom/gcc-sdm660.c #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } n 45 drivers/clk/qcom/lcc-ipq806x.c .n = 0xc7, n 3031 drivers/clk/qcom/mmcc-apq8084.c .n = 32, n 3045 drivers/clk/qcom/mmcc-apq8084.c .n = 16, n 42 drivers/clk/qcom/mmcc-msm8960.c #define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n } n 143 drivers/clk/qcom/mmcc-msm8960.c .n = 3, n 2310 drivers/clk/qcom/mmcc-msm8974.c .n = 32, n 2324 drivers/clk/qcom/mmcc-msm8974.c .n = 16, n 1031 drivers/clk/renesas/renesas-cpg-mssr.c const unsigned int *clks, unsigned int n) n 1035 drivers/clk/renesas/renesas-cpg-mssr.c for (i = 0, j = 0; i < num_mod_clks && j < n; i++) n 1045 drivers/clk/renesas/renesas-cpg-mssr.c unsigned int n) n 1049 drivers/clk/renesas/renesas-cpg-mssr.c for (i = 0, j = 0; i < num_mod_clks && j < n; i++) n 193 drivers/clk/renesas/renesas-cpg-mssr.h const unsigned int *clks, unsigned int n); n 197 drivers/clk/renesas/renesas-cpg-mssr.h unsigned int n); n 181 drivers/clk/rockchip/clk.c unsigned long *m, unsigned long *n) n 206 drivers/clk/rockchip/clk.c m, n); n 76 drivers/clk/spear/clk-vco-pll.c rate = (((2 * rate / 10000) * rtbl[index].m) / (mode * rtbl[index].n)); n 251 drivers/clk/spear/clk-vco-pll.c val |= (rtbl[i].n & PLL_DIV_N_MASK) << PLL_DIV_N_SHIFT; n 89 drivers/clk/spear/clk.h u8 n; n 235 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */ n 236 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */ n 237 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */ n 238 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */ n 239 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */ n 240 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */ n 241 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */ n 246 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */ n 247 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */ n 248 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */ n 249 drivers/clk/spear/spear1310_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */ n 168 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */ n 169 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */ n 170 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */ n 171 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */ n 172 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */ n 173 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */ n 174 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */ n 175 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */ n 180 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */ n 181 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */ n 182 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */ n 183 drivers/clk/spear/spear1340_clock.c {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */ n 101 drivers/clk/spear/spear3xx_clock.c {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */ n 102 drivers/clk/spear/spear3xx_clock.c {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */ n 103 drivers/clk/spear/spear3xx_clock.c {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */ n 85 drivers/clk/spear/spear6xx_clock.c {.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */ n 86 drivers/clk/spear/spear6xx_clock.c {.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */ n 87 drivers/clk/spear/spear6xx_clock.c {.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */ n 294 drivers/clk/st/clkgen-fsyn.c unsigned long pdiv = 1, n; n 308 drivers/clk/st/clkgen-fsyn.c n = output * pdiv / input; n 309 drivers/clk/st/clkgen-fsyn.c if (n < 16) n 310 drivers/clk/st/clkgen-fsyn.c n = 16; n 311 drivers/clk/st/clkgen-fsyn.c fs->ndiv = n - 16; /* Converting formula value to reg value */ n 256 drivers/clk/st/clkgen-pll.c unsigned long i, n; n 274 drivers/clk/st/clkgen-pll.c n = i * output / (2 * input); n 277 drivers/clk/st/clkgen-pll.c if (n < 8) n 279 drivers/clk/st/clkgen-pll.c if (n > 200) n 282 drivers/clk/st/clkgen-pll.c new_freq = (input * 2 * n) / i; n 288 drivers/clk/st/clkgen-pll.c pll->ndiv = n; n 418 drivers/clk/st/clkgen-pll.c unsigned long i, infin, n; n 433 drivers/clk/st/clkgen-pll.c n = output / (infin * 2); n 434 drivers/clk/st/clkgen-pll.c if (n < 8 || n > 246) n 436 drivers/clk/st/clkgen-pll.c if (n < 246) n 437 drivers/clk/st/clkgen-pll.c n++; /* To work around 'y' when n=x.y */ n 439 drivers/clk/st/clkgen-pll.c for (; n >= 8 && deviation; n--) { n 440 drivers/clk/st/clkgen-pll.c new_freq = infin * 2 * n; n 447 drivers/clk/st/clkgen-pll.c pll->ndiv = n; n 30 drivers/clk/sunxi-ng/ccu-sun4i-a10.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 58 drivers/clk/sunxi-ng/ccu-sun4i-a10.c { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 }, n 59 drivers/clk/sunxi-ng/ccu-sun4i-a10.c { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 }, n 64 drivers/clk/sunxi-ng/ccu-sun4i-a10.c .n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0), n 98 drivers/clk/sunxi-ng/ccu-sun4i-a10.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 113 drivers/clk/sunxi-ng/ccu-sun4i-a10.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 126 drivers/clk/sunxi-ng/ccu-sun4i-a10.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 152 drivers/clk/sunxi-ng/ccu-sun4i-a10.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 201 drivers/clk/sunxi-ng/ccu-sun4i-a10.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 29 drivers/clk/sunxi-ng/ccu-sun50i-a64.c .n = _SUNXI_CCU_MULT(8, 5), n 57 drivers/clk/sunxi-ng/ccu-sun50i-a64.c { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 }, n 58 drivers/clk/sunxi-ng/ccu-sun50i-a64.c { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 }, n 109 drivers/clk/sunxi-ng/ccu-sun50i-a64.c .n = _SUNXI_CCU_MULT(8, 5), n 123 drivers/clk/sunxi-ng/ccu-sun50i-a64.c .n = _SUNXI_CCU_MULT(8, 5), n 176 drivers/clk/sunxi-ng/ccu-sun50i-a64.c .n = _SUNXI_CCU_MULT(8, 4), n 53 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 68 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 85 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 102 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 121 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 139 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 157 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 172 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 187 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 209 drivers/clk/sunxi-ng/ccu-sun50i-h6.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 28 drivers/clk/sunxi-ng/ccu-sun5i.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 56 drivers/clk/sunxi-ng/ccu-sun5i.c { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 }, n 57 drivers/clk/sunxi-ng/ccu-sun5i.c { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 }, n 62 drivers/clk/sunxi-ng/ccu-sun5i.c .n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0), n 100 drivers/clk/sunxi-ng/ccu-sun5i.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 115 drivers/clk/sunxi-ng/ccu-sun5i.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 142 drivers/clk/sunxi-ng/ccu-sun5i.c .n = _SUNXI_CCU_MULT_OFFSET(8, 5, 0), n 55 drivers/clk/sunxi-ng/ccu-sun6i-a31.c { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 }, n 56 drivers/clk/sunxi-ng/ccu-sun6i-a31.c { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 }, n 31 drivers/clk/sunxi-ng/ccu-sun8i-a23.c .n = _SUNXI_CCU_MULT(8, 5), n 59 drivers/clk/sunxi-ng/ccu-sun8i-a23.c { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 }, n 60 drivers/clk/sunxi-ng/ccu-sun8i-a23.c { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 }, n 29 drivers/clk/sunxi-ng/ccu-sun8i-a33.c .n = _SUNXI_CCU_MULT(8, 5), n 57 drivers/clk/sunxi-ng/ccu-sun8i-a33.c { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 }, n 58 drivers/clk/sunxi-ng/ccu-sun8i-a33.c { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 }, n 74 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c { .rate = 45158400, .pattern = 0xc00121ff, .m = 29, .n = 54 }, n 75 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c { .rate = 49152000, .pattern = 0xc000e147, .m = 30, .n = 61 }, n 81 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 101 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 118 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 134 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_MIN(8, 8, 12), n 150 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 166 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 182 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 198 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 214 drivers/clk/sunxi-ng/ccu-sun8i-a83t.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 51 drivers/clk/sunxi-ng/ccu-sun8i-h3.c { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 }, n 52 drivers/clk/sunxi-ng/ccu-sun8i-h3.c { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 }, n 30 drivers/clk/sunxi-ng/ccu-sun8i-r40.c .n = _SUNXI_CCU_MULT(8, 5), n 102 drivers/clk/sunxi-ng/ccu-sun8i-r40.c .n = _SUNXI_CCU_MULT(8, 5), n 136 drivers/clk/sunxi-ng/ccu-sun8i-r40.c .n = _SUNXI_CCU_MULT(8, 5), n 165 drivers/clk/sunxi-ng/ccu-sun8i-r40.c .n = _SUNXI_CCU_MULT(8, 5), n 215 drivers/clk/sunxi-ng/ccu-sun8i-r40.c .n = _SUNXI_CCU_MULT(8, 4), n 73 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 88 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 104 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 120 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 136 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 151 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 167 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 183 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 199 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 215 drivers/clk/sunxi-ng/ccu-sun9i-a80.c .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0), n 30 drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c .n = _SUNXI_CCU_MULT(8, 5), n 99 drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c .n = _SUNXI_CCU_MULT(8, 5), n 14 drivers/clk/sunxi-ng/ccu_nk.c unsigned long n, min_n, max_n; n 41 drivers/clk/sunxi-ng/ccu_nk.c nk->n = best_n; n 69 drivers/clk/sunxi-ng/ccu_nk.c unsigned long rate, n, k; n 74 drivers/clk/sunxi-ng/ccu_nk.c n = reg >> nk->n.shift; n 75 drivers/clk/sunxi-ng/ccu_nk.c n &= (1 << nk->n.width) - 1; n 76 drivers/clk/sunxi-ng/ccu_nk.c n += nk->n.offset; n 77 drivers/clk/sunxi-ng/ccu_nk.c if (!n) n 78 drivers/clk/sunxi-ng/ccu_nk.c n++; n 86 drivers/clk/sunxi-ng/ccu_nk.c rate = parent_rate * n * k; n 102 drivers/clk/sunxi-ng/ccu_nk.c _nk.min_n = nk->n.min ?: 1; n 103 drivers/clk/sunxi-ng/ccu_nk.c _nk.max_n = nk->n.max ?: 1 << nk->n.width; n 108 drivers/clk/sunxi-ng/ccu_nk.c rate = *parent_rate * _nk.n * _nk.k; n 127 drivers/clk/sunxi-ng/ccu_nk.c _nk.min_n = nk->n.min ?: 1; n 128 drivers/clk/sunxi-ng/ccu_nk.c _nk.max_n = nk->n.max ?: 1 << nk->n.width; n 137 drivers/clk/sunxi-ng/ccu_nk.c reg &= ~GENMASK(nk->n.width + nk->n.shift - 1, nk->n.shift); n 141 drivers/clk/sunxi-ng/ccu_nk.c reg |= (_nk.n - nk->n.offset) << nk->n.shift; n 25 drivers/clk/sunxi-ng/ccu_nk.h struct ccu_mult_internal n; n 42 drivers/clk/sunxi-ng/ccu_nk.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 14 drivers/clk/sunxi-ng/ccu_nkm.c unsigned long n, min_n, max_n; n 45 drivers/clk/sunxi-ng/ccu_nkm.c nkm->n = best_n; n 75 drivers/clk/sunxi-ng/ccu_nkm.c unsigned long n, m, k, rate; n 80 drivers/clk/sunxi-ng/ccu_nkm.c n = reg >> nkm->n.shift; n 81 drivers/clk/sunxi-ng/ccu_nkm.c n &= (1 << nkm->n.width) - 1; n 82 drivers/clk/sunxi-ng/ccu_nkm.c n += nkm->n.offset; n 83 drivers/clk/sunxi-ng/ccu_nkm.c if (!n) n 84 drivers/clk/sunxi-ng/ccu_nkm.c n++; n 98 drivers/clk/sunxi-ng/ccu_nkm.c rate = parent_rate * n * k / m; n 115 drivers/clk/sunxi-ng/ccu_nkm.c _nkm.min_n = nkm->n.min ?: 1; n 116 drivers/clk/sunxi-ng/ccu_nkm.c _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width; n 127 drivers/clk/sunxi-ng/ccu_nkm.c rate = *parent_rate * _nkm.n * _nkm.k / _nkm.m; n 155 drivers/clk/sunxi-ng/ccu_nkm.c _nkm.min_n = nkm->n.min ?: 1; n 156 drivers/clk/sunxi-ng/ccu_nkm.c _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width; n 167 drivers/clk/sunxi-ng/ccu_nkm.c reg &= ~GENMASK(nkm->n.width + nkm->n.shift - 1, nkm->n.shift); n 171 drivers/clk/sunxi-ng/ccu_nkm.c reg |= (_nkm.n - nkm->n.offset) << nkm->n.shift; n 24 drivers/clk/sunxi-ng/ccu_nkm.h struct ccu_mult_internal n; n 44 drivers/clk/sunxi-ng/ccu_nkm.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 65 drivers/clk/sunxi-ng/ccu_nkm.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 14 drivers/clk/sunxi-ng/ccu_nkmp.c unsigned long n, min_n, max_n; n 21 drivers/clk/sunxi-ng/ccu_nkmp.c unsigned long n, unsigned long k, n 26 drivers/clk/sunxi-ng/ccu_nkmp.c rate *= n * k; n 64 drivers/clk/sunxi-ng/ccu_nkmp.c nkmp->n = best_n; n 95 drivers/clk/sunxi-ng/ccu_nkmp.c unsigned long n, m, k, p, rate; n 100 drivers/clk/sunxi-ng/ccu_nkmp.c n = reg >> nkmp->n.shift; n 101 drivers/clk/sunxi-ng/ccu_nkmp.c n &= (1 << nkmp->n.width) - 1; n 102 drivers/clk/sunxi-ng/ccu_nkmp.c n += nkmp->n.offset; n 103 drivers/clk/sunxi-ng/ccu_nkmp.c if (!n) n 104 drivers/clk/sunxi-ng/ccu_nkmp.c n++; n 121 drivers/clk/sunxi-ng/ccu_nkmp.c rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p); n 144 drivers/clk/sunxi-ng/ccu_nkmp.c _nkmp.min_n = nkmp->n.min ?: 1; n 145 drivers/clk/sunxi-ng/ccu_nkmp.c _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; n 155 drivers/clk/sunxi-ng/ccu_nkmp.c rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k, n 175 drivers/clk/sunxi-ng/ccu_nkmp.c _nkmp.min_n = nkmp->n.min ?: 1; n 176 drivers/clk/sunxi-ng/ccu_nkmp.c _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; n 192 drivers/clk/sunxi-ng/ccu_nkmp.c if (nkmp->n.width) n 193 drivers/clk/sunxi-ng/ccu_nkmp.c n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, n 194 drivers/clk/sunxi-ng/ccu_nkmp.c nkmp->n.shift); n 210 drivers/clk/sunxi-ng/ccu_nkmp.c reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask; n 24 drivers/clk/sunxi-ng/ccu_nkmp.h struct ccu_mult_internal n; n 44 drivers/clk/sunxi-ng/ccu_nkmp.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 15 drivers/clk/sunxi-ng/ccu_nm.c unsigned long n, min_n, max_n; n 20 drivers/clk/sunxi-ng/ccu_nm.c unsigned long n, unsigned long m) n 24 drivers/clk/sunxi-ng/ccu_nm.c rate *= n; n 53 drivers/clk/sunxi-ng/ccu_nm.c nm->n = best_n; n 83 drivers/clk/sunxi-ng/ccu_nm.c unsigned long n, m; n 97 drivers/clk/sunxi-ng/ccu_nm.c n = reg >> nm->n.shift; n 98 drivers/clk/sunxi-ng/ccu_nm.c n &= (1 << nm->n.width) - 1; n 99 drivers/clk/sunxi-ng/ccu_nm.c n += nm->n.offset; n 100 drivers/clk/sunxi-ng/ccu_nm.c if (!n) n 101 drivers/clk/sunxi-ng/ccu_nm.c n++; n 110 drivers/clk/sunxi-ng/ccu_nm.c rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n); n 112 drivers/clk/sunxi-ng/ccu_nm.c rate = ccu_nm_calc_rate(parent_rate, n, m); n 155 drivers/clk/sunxi-ng/ccu_nm.c _nm.min_n = nm->n.min ?: 1; n 156 drivers/clk/sunxi-ng/ccu_nm.c _nm.max_n = nm->n.max ?: 1 << nm->n.width; n 161 drivers/clk/sunxi-ng/ccu_nm.c rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m); n 199 drivers/clk/sunxi-ng/ccu_nm.c _nm.min_n = nm->n.min ?: 1; n 200 drivers/clk/sunxi-ng/ccu_nm.c _nm.max_n = nm->n.max ?: 1 << nm->n.width; n 209 drivers/clk/sunxi-ng/ccu_nm.c &_nm.m, &_nm.n); n 218 drivers/clk/sunxi-ng/ccu_nm.c reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift); n 221 drivers/clk/sunxi-ng/ccu_nm.c reg |= (_nm.n - nm->n.offset) << nm->n.shift; n 26 drivers/clk/sunxi-ng/ccu_nm.h struct ccu_mult_internal n; n 47 drivers/clk/sunxi-ng/ccu_nm.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 70 drivers/clk/sunxi-ng/ccu_nm.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 95 drivers/clk/sunxi-ng/ccu_nm.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 123 drivers/clk/sunxi-ng/ccu_nm.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 147 drivers/clk/sunxi-ng/ccu_nm.h .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \ n 108 drivers/clk/sunxi-ng/ccu_sdm.c u32 m, u32 n) n 129 drivers/clk/sunxi-ng/ccu_sdm.c sdm->table[i].m == m && sdm->table[i].n == n) n 139 drivers/clk/sunxi-ng/ccu_sdm.c unsigned long *m, unsigned long *n) n 149 drivers/clk/sunxi-ng/ccu_sdm.c *n = sdm->table[i].n; n 28 drivers/clk/sunxi-ng/ccu_sdm.h u32 n; n 65 drivers/clk/sunxi-ng/ccu_sdm.h u32 m, u32 n); n 70 drivers/clk/sunxi-ng/ccu_sdm.h unsigned long *m, unsigned long *n); n 43 drivers/clk/sunxi/clk-factors.c u8 n = 1, k = 0, p = 0, m = 0; n 54 drivers/clk/sunxi/clk-factors.c n = FACTOR_GET(config->nshift, config->nwidth, reg); n 65 drivers/clk/sunxi/clk-factors.c .n = n, n 83 drivers/clk/sunxi/clk-factors.c rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1); n 153 drivers/clk/sunxi/clk-factors.c reg = FACTOR_SET(config->nshift, config->nwidth, reg, req.n); n 26 drivers/clk/sunxi/clk-factors.h u8 n; n 28 drivers/clk/sunxi/clk-sun9i-core.c int n; n 33 drivers/clk/sunxi/clk-sun9i-core.c n = DIV_ROUND_UP(req->rate, 6000000); n 36 drivers/clk/sunxi/clk-sun9i-core.c if (n > 255) { n 38 drivers/clk/sunxi/clk-sun9i-core.c n = (n + 1) / 2; n 42 drivers/clk/sunxi/clk-sun9i-core.c if (n > 255) { n 44 drivers/clk/sunxi/clk-sun9i-core.c n = (n + 1) / 2; n 48 drivers/clk/sunxi/clk-sun9i-core.c if (n > 255) n 49 drivers/clk/sunxi/clk-sun9i-core.c n = 255; n 50 drivers/clk/sunxi/clk-sun9i-core.c else if (n < 12) n 51 drivers/clk/sunxi/clk-sun9i-core.c n = 12; n 53 drivers/clk/sunxi/clk-sun9i-core.c req->rate = ((24000000 * n) >> p) / (m + 1); n 54 drivers/clk/sunxi/clk-sun9i-core.c req->n = n; n 71 drivers/clk/sunxi/clk-sunxi.c req->n = div / 4; n 137 drivers/clk/sunxi/clk-sunxi.c req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz) n 144 drivers/clk/sunxi/clk-sunxi.c if ((req->n + 1) > 31 && (req->m + 1) > 1) { n 145 drivers/clk/sunxi/clk-sunxi.c req->n = (req->n + 1) / 2 - 1; n 191 drivers/clk/sunxi/clk-sunxi.c req->n = div / 4 - 1; n 218 drivers/clk/sunxi/clk-sunxi.c req->n = DIV_ROUND_UP(div, (req->k + 1)); n 240 drivers/clk/sunxi/clk-sunxi.c req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1; n 535 drivers/clk/tegra/clk-pll.c cfg->n = sel->n; n 582 drivers/clk/tegra/clk-pll.c cfg->n = cfg->output_rate / cfreq; n 586 drivers/clk/tegra/clk-pll.c cfg->n > divn_max(pll) || (1 << p_div) > divp_max(pll) || n 591 drivers/clk/tegra/clk-pll.c cfg->output_rate = cfg->n * DIV_ROUND_UP(parent_rate, cfg->m); n 661 drivers/clk/tegra/clk-pll.c (cfg->n << div_nmp->override_divn_shift); n 670 drivers/clk/tegra/clk-pll.c (cfg->n << divn_shift(pll)) | n 696 drivers/clk/tegra/clk-pll.c cfg->n = (val >> div_nmp->override_divn_shift) & divn_mask(pll); n 701 drivers/clk/tegra/clk-pll.c cfg->n = (val >> div_nmp->divn_shift) & divn_mask(pll); n 727 drivers/clk/tegra/clk-pll.c if (cfg->n >= PLLDU_LFCON_SET_DIVN) n 810 drivers/clk/tegra/clk-pll.c if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_cfg.p != cfg.p || n 885 drivers/clk/tegra/clk-pll.c rate *= cfg.n; n 968 drivers/clk/tegra/clk-pll.c val |= sel.n << divn_shift(pll); n 1188 drivers/clk/tegra/clk-pll.c cfg->n = cfg->output_rate * cfg->m / parent_rate; n 1197 drivers/clk/tegra/clk-pll.c if (cfg->n > divn_max(pll) || cfg->output_rate > pll->params->vco_max) n 1301 drivers/clk/tegra/clk-pll.c if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_cfg.p != cfg.p) n 1329 drivers/clk/tegra/clk-pll.c output_rate *= cfg.n; n 1407 drivers/clk/tegra/clk-pll.c unsigned long input_rate, u32 n) n 1433 drivers/clk/tegra/clk-pll.c val |= n <= n_threshold ? n 1462 drivers/clk/tegra/clk-pll.c if (old_cfg.n == cfg.n && old_cfg.p == cfg.p) n 1469 drivers/clk/tegra/clk-pll.c ret = _pllcx_update_dynamic_coef(pll, parent_rate, cfg.n); n 1489 drivers/clk/tegra/clk-pll.c u16 m, n; n 1493 drivers/clk/tegra/clk-pll.c n = rate * m / parent_rate; n 1495 drivers/clk/tegra/clk-pll.c output_rate *= n; n 1500 drivers/clk/tegra/clk-pll.c cfg->n = n; n 1521 drivers/clk/tegra/clk-pll.c if (cfg.m != old_cfg.m || cfg.n != old_cfg.n) { n 1549 drivers/clk/tegra/clk-pll.c rate *= cfg.n; n 1611 drivers/clk/tegra/clk-pll.c val |= sel.n << divn_shift(pll); n 2176 drivers/clk/tegra/clk-pll.c cfg.n = cfg.m * pll_params->vco_min / parent_rate; n 2199 drivers/clk/tegra/clk-pll.c _pllcx_update_dynamic_coef(pll, parent_rate, cfg.n); n 2318 drivers/clk/tegra/clk-pll.c cfg.n = cfg.m * pll_params->vco_min / parent_rate; n 2446 drivers/clk/tegra/clk-pll.c val |= sel.n << divn_shift(pll); n 265 drivers/clk/tegra/clk-tegra210.c #define sdin_get_n_eff(cfg) ((cfg)->n * PLL_SDM_COEFF + ((cfg)->sdm_data ? \ n 1363 drivers/clk/tegra/clk-tegra210.c val |= cfg->n << PLLX_MISC2_NDIV_NEW_SHIFT; n 1377 drivers/clk/tegra/clk-tegra210.c base |= cfg->n << pllx->params->div_nmp->divn_shift; n 1386 drivers/clk/tegra/clk-tegra210.c __clk_get_name(pllx->hw.clk), cfg->m, cfg->n, cfg->p, n 1387 drivers/clk/tegra/clk-tegra210.c cfg->input_rate / cfg->m * cfg->n / n 1434 drivers/clk/tegra/clk-tegra210.c cfg->n = p_rate / cf; n 1439 drivers/clk/tegra/clk-tegra210.c unsigned long rem = p_rate - cf * cfg->n; n 1451 drivers/clk/tegra/clk-tegra210.c cfg->output_rate *= cfg->n; n 1473 drivers/clk/tegra/clk-tegra210.c cfg->n = sdin_get_n_eff(cfg); n 2849 drivers/clk/tegra/clk-tegra210.c reg |= fentry->n << 8; n 108 drivers/clk/tegra/clk.h u32 n; n 67 drivers/clk/ti/clkt_dpll.c static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n) n 76 drivers/clk/ti/clkt_dpll.c fint = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)) / n; n 93 drivers/clk/ti/clkt_dpll.c n); n 94 drivers/clk/ti/clkt_dpll.c dd->max_divider = n; n 98 drivers/clk/ti/clkt_dpll.c n); n 99 drivers/clk/ti/clkt_dpll.c dd->min_divider = n; n 103 drivers/clk/ti/clkt_dpll.c pr_debug("rejecting n=%d due to Fint failure\n", n); n 111 drivers/clk/ti/clkt_dpll.c unsigned int m, unsigned int n) n 116 drivers/clk/ti/clkt_dpll.c do_div(num, n); n 140 drivers/clk/ti/clkt_dpll.c static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, n 155 drivers/clk/ti/clkt_dpll.c *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); n 169 drivers/clk/ti/clkt_dpll.c *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); n 286 drivers/clk/ti/clkt_dpll.c int m, n, r, scaled_max_m; n 314 drivers/clk/ti/clkt_dpll.c for (n = dd->min_divider; n <= dd->max_divider; n++) { n 316 drivers/clk/ti/clkt_dpll.c r = _dpll_test_fint(clk, n); n 323 drivers/clk/ti/clkt_dpll.c m = scaled_rt_rp * n; n 334 drivers/clk/ti/clkt_dpll.c r = _dpll_test_mult(&m, n, &new_rate, target_rate, n 349 drivers/clk/ti/clkt_dpll.c min_delta_n = n; n 353 drivers/clk/ti/clkt_dpll.c clk_name, m, n, new_rate); n 93 drivers/clk/ti/dpll3xxx.c static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) n 98 drivers/clk/ti/dpll3xxx.c fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n; n 123 drivers/clk/ti/dpll3xxx.c pr_debug("clock: unknown freqsel setting for %d\n", n); n 247 drivers/clk/ti/dpll3xxx.c static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) n 252 drivers/clk/ti/dpll3xxx.c fint = (clkinp / n) * m; n 272 drivers/clk/ti/dpll3xxx.c static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) n 284 drivers/clk/ti/dpll3xxx.c mod1 = (clkinp * m) % (250 * n); n 285 drivers/clk/ti/dpll3xxx.c sd = (clkinp * m) / (250 * n); n 968 drivers/clk/ti/dpll3xxx.c unsigned int rate, m, n; n 1003 drivers/clk/ti/dpll3xxx.c dd->last_rounded_n = d->n; n 1004 drivers/clk/ti/dpll3xxx.c dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n); n 345 drivers/clk/zte/clk.c unsigned long rate, m, n; n 348 drivers/clk/zte/clk.c n = (reg_frac >> 16) & 0xffff; n 350 drivers/clk/zte/clk.c m = (reg_int & 0xffff) * n + m; n 351 drivers/clk/zte/clk.c rate = (parent_rate * n) / m; n 361 drivers/clk/zte/clk.c unsigned long m, n, div; n 370 drivers/clk/zte/clk.c n = rate; n 372 drivers/clk/zte/clk.c div = gcd(m, n); n 374 drivers/clk/zte/clk.c n = n / div; n 376 drivers/clk/zte/clk.c if ((m >> 16) || (n >> 16)) { n 377 drivers/clk/zte/clk.c if (m > n) { n 378 drivers/clk/zte/clk.c n = n * 0xffff / m; n 381 drivers/clk/zte/clk.c m = m * 0xffff / n; n 382 drivers/clk/zte/clk.c n = 0xffff; n 385 drivers/clk/zte/clk.c reg_frac = m | (n << 16); n 387 drivers/clk/zte/clk.c div_table->rate = parent_rate * n / (reg_int * n + m); n 34 drivers/clocksource/arm_arch_timer.c #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) n 36 drivers/clocksource/arm_arch_timer.c #define CNTACR(n) (0x40 + ((n) * 4)) n 1429 drivers/clocksource/arm_arch_timer.c u32 n; n 1432 drivers/clocksource/arm_arch_timer.c if (of_property_read_u32(frame_node, "frame-number", &n)) { n 1437 drivers/clocksource/arm_arch_timer.c if (n >= ARCH_TIMER_MEM_MAX_FRAMES) { n 1443 drivers/clocksource/arm_arch_timer.c frame = &timer_mem->frame[n]; n 82 drivers/clocksource/asm9260_timer.c #define BM_MCR_INT_EN(n) (1 << (n * 3 + 0)) n 84 drivers/clocksource/asm9260_timer.c #define BM_MCR_RES_EN(n) (1 << (n * 3 + 1)) n 86 drivers/clocksource/asm9260_timer.c #define BM_MCR_STOP_EN(n) (1 << (n * 3 + 2)) n 25 drivers/clocksource/bcm2835_timer.c #define REG_COMPARE(n) (0x0c + (n) * 4) n 47 drivers/clocksource/mxs_timer.c #define HW_TIMROT_TIMCTRLn(n) (0x20 + (n) * 0x40) n 49 drivers/clocksource/mxs_timer.c #define HW_TIMROT_TIMCOUNTn(n) (0x30 + (n) * 0x40) n 51 drivers/clocksource/mxs_timer.c #define HW_TIMROT_RUNNING_COUNTn(n) (0x30 + (n) * 0x40) n 52 drivers/clocksource/mxs_timer.c #define HW_TIMROT_FIXED_COUNTn(n) (0x40 + (n) * 0x40) n 110 drivers/clocksource/sh_mtu2.c #define TIOC_IOCH(n) ((n) << 4) n 111 drivers/clocksource/sh_mtu2.c #define TIOC_IOCL(n) ((n) << 0) n 83 drivers/clocksource/timer-riscv.c static int __init riscv_timer_init_dt(struct device_node *n) n 87 drivers/clocksource/timer-riscv.c hartid = riscv_of_processor_hartid(n); n 90 drivers/clocksource/timer-riscv.c n, hartid); n 18 drivers/clocksource/timer-vf-pit.c #define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n)) n 95 drivers/connector/cn_queue.c struct cn_callback_entry *cbq, *n; n 99 drivers/connector/cn_queue.c list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { n 132 drivers/connector/cn_queue.c struct cn_callback_entry *cbq, *n; n 135 drivers/connector/cn_queue.c list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) n 467 drivers/counter/counter.c struct counter_device_attr *p, *n; n 469 drivers/counter/counter.c list_for_each_entry_safe(p, n, attr_list, l) { n 92 drivers/cpufreq/s3c24xx-cpufreq.c static inline int closer(unsigned int target, unsigned int n, unsigned int c) n 95 drivers/cpufreq/s3c24xx-cpufreq.c int diff_new = abs(target - n); n 150 drivers/cpuidle/coupled.c int n = dev->coupled->online_count; n 155 drivers/cpuidle/coupled.c while (atomic_read(a) < n) n 158 drivers/cpuidle/coupled.c if (atomic_inc_return(a) == n * 2) { n 163 drivers/cpuidle/coupled.c while (atomic_read(a) > n) n 749 drivers/cpuidle/cpuidle.c static inline void latency_notifier_init(struct notifier_block *n) n 751 drivers/cpuidle/cpuidle.c pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); n 298 drivers/crypto/amcc/crypto4xx_core.c static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) n 303 drivers/crypto/amcc/crypto4xx_core.c if (n >= PPC4XX_NUM_GD) n 307 drivers/crypto/amcc/crypto4xx_core.c tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD; n 397 drivers/crypto/amcc/crypto4xx_core.c static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n) n 402 drivers/crypto/amcc/crypto4xx_core.c if (n >= PPC4XX_NUM_SD) n 406 drivers/crypto/amcc/crypto4xx_core.c tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD; n 2087 drivers/crypto/axis/artpec6_crypto.c struct artpec6_crypto_req_common *n; n 2101 drivers/crypto/axis/artpec6_crypto.c list_for_each_entry_safe(req, n, &ac->pending, list) { n 2141 drivers/crypto/axis/artpec6_crypto.c list_for_each_entry_safe(req, n, &complete_done, list) { n 2149 drivers/crypto/axis/artpec6_crypto.c list_for_each_entry_safe(req, n, &complete_in_progress, n 1926 drivers/crypto/caam/caamhash.c struct caam_hash_alg *t_alg, *n; n 1931 drivers/crypto/caam/caamhash.c list_for_each_entry_safe(t_alg, n, &hash_list, entry) { n 373 drivers/crypto/caam/caampkc.c pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); n 418 drivers/crypto/caam/caampkc.c pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); n 633 drivers/crypto/caam/caampkc.c if (unlikely(!key->n || !key->e)) n 773 drivers/crypto/caam/caampkc.c if (unlikely(!key->n || !key->d)) n 803 drivers/crypto/caam/caampkc.c kfree(key->n); n 895 drivers/crypto/caam/caampkc.c rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); n 896 drivers/crypto/caam/caampkc.c if (!rsa_key->n) n 1001 drivers/crypto/caam/caampkc.c rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); n 1002 drivers/crypto/caam/caampkc.c if (!rsa_key->n) n 70 drivers/crypto/caam/caampkc.h u8 *n; n 21 drivers/crypto/caam/desc_constr.h #define __DESC_JOB_IO_LEN(n) (CAAM_CMD_SZ * 5 + (n) * 3) n 145 drivers/crypto/ccp/ccp-crypto-rsa.c raw_key.n, raw_key.n_sz); n 209 drivers/crypto/ccp/ccp-dev-v5.c unsigned int head_idx, n; n 216 drivers/crypto/ccp/ccp-dev-v5.c n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; n 218 drivers/crypto/ccp/ccp-dev-v5.c return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ n 107 drivers/crypto/ccp/ccp-dev.h #define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) n 2653 drivers/crypto/ccree/cc_aead.c struct cc_crypto_alg *t_alg, *n; n 2659 drivers/crypto/ccree/cc_aead.c list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, n 1671 drivers/crypto/ccree/cc_cipher.c struct cc_crypto_alg *t_alg, *n; n 1676 drivers/crypto/ccree/cc_cipher.c list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list, n 201 drivers/crypto/chelsio/chcr_core.h static inline unsigned int sgl_len(unsigned int n) n 203 drivers/crypto/chelsio/chcr_core.h n--; n 204 drivers/crypto/chelsio/chcr_core.h return (3 * n) / 2 + (n & 1) + 2; n 645 drivers/crypto/chelsio/chcr_ipsec.c static inline unsigned int flits_to_desc(unsigned int n) n 647 drivers/crypto/chelsio/chcr_ipsec.c WARN_ON(n > SGE_MAX_WR_LEN / 8); n 648 drivers/crypto/chelsio/chcr_ipsec.c return DIV_ROUND_UP(n, 8); n 662 drivers/crypto/chelsio/chcr_ipsec.c static inline void txq_advance(struct sge_txq *q, unsigned int n) n 664 drivers/crypto/chelsio/chcr_ipsec.c q->in_use += n; n 665 drivers/crypto/chelsio/chcr_ipsec.c q->pidx += n; n 1033 drivers/crypto/chelsio/chtls/chtls_cm.c struct neighbour *n; n 1049 drivers/crypto/chelsio/chtls/chtls_cm.c n = dst_neigh_lookup(dst, &iph->saddr); n 1050 drivers/crypto/chelsio/chtls/chtls_cm.c if (!n) n 1053 drivers/crypto/chelsio/chtls/chtls_cm.c ndev = n->dev; n 1062 drivers/crypto/chelsio/chtls/chtls_cm.c csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0); n 1101 drivers/crypto/chelsio/chtls/chtls_cm.c neigh_release(n); n 30 drivers/crypto/exynos-rng.c #define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4)) n 32 drivers/crypto/exynos-rng.c #define EXYNOS_RNG_OUT(n) (EXYNOS_RNG_OUT_BASE + (n * 0x4)) n 1325 drivers/crypto/hifn_795x.c unsigned int n, len; n 1327 drivers/crypto/hifn_795x.c n = nbytes; n 1328 drivers/crypto/hifn_795x.c while (n) { n 1331 drivers/crypto/hifn_795x.c len = min(src->length, n); n 1333 drivers/crypto/hifn_795x.c hifn_setup_src_desc(dev, spage, soff, len, n - len == 0); n 1336 drivers/crypto/hifn_795x.c n -= len; n 1340 drivers/crypto/hifn_795x.c n = nbytes; n 1341 drivers/crypto/hifn_795x.c while (n) { n 1353 drivers/crypto/hifn_795x.c len = min(len, n); n 1355 drivers/crypto/hifn_795x.c hifn_setup_dst_desc(dev, dpage, doff, len, n - len == 0); n 1359 drivers/crypto/hifn_795x.c n -= len; n 2419 drivers/crypto/hifn_795x.c struct hifn_crypto_alg *a, *n; n 2421 drivers/crypto/hifn_795x.c list_for_each_entry_safe(a, n, &dev->alg_list, entry) { n 16 drivers/crypto/hisilicon/sgl.c u32 n; n 21 drivers/crypto/hisilicon/sgl.c ret = kstrtou32(val, 10, &n); n 22 drivers/crypto/hisilicon/sgl.c if (ret != 0 || n > HISI_ACC_SGL_SGE_NR_MAX || n == 0) n 226 drivers/crypto/hisilicon/zip/zip_main.c u32 n, q_num; n 251 drivers/crypto/hisilicon/zip/zip_main.c ret = kstrtou32(val, 10, &n); n 252 drivers/crypto/hisilicon/zip/zip_main.c if (ret != 0 || n > q_num || n == 0) n 128 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DFE_CFG(n) (0x0000 + (128 * (n))) n 129 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DFE_THR_CTRL(n) (0x0000 + (128 * (n))) n 130 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DFE_THR_STAT(n) (0x0004 + (128 * (n))) n 131 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DSE_CFG(n) (0x0000 + (128 * (n))) n 132 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DSE_THR_CTRL(n) (0x0000 + (128 * (n))) n 133 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DSE_THR_STAT(n) (0x0004 + (128 * (n))) n 134 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_RA_PE_CTRL(n) (0x0010 + (8 * (n))) n 147 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_IN_DBUF_THRES(n) (0x0000 + (0x2000 * (n))) n 148 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_IN_TBUF_THRES(n) (0x0100 + (0x2000 * (n))) n 149 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_SCRATCH_RAM(n) (0x0800 + (0x2000 * (n))) n 150 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_PUE_CTRL(n) (0x0c80 + (0x2000 * (n))) n 151 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_PUTF_CTRL(n) (0x0d00 + (0x2000 * (n))) n 152 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) n 153 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) n 154 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_PPTF_CTRL(n) (0x0e00 + (0x2000 * (n))) n 155 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) n 156 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) n 157 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) n 158 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) n 159 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) n 160 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n))) n 161 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n))) n 162 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n))) n 163 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n))) n 164 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n))) n 181 drivers/crypto/inside-secure/safexcel.h #define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n))) n 182 drivers/crypto/inside-secure/safexcel.h #define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n))) n 183 drivers/crypto/inside-secure/safexcel.h #define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n))) n 186 drivers/crypto/inside-secure/safexcel.h #define EIP197_FLUE_IFC_LUT(n) (0xf6820 + (4 * (n))) n 196 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_xDR_CFG_WR_CACHE(n) (((n) & 0x7) << 25) n 197 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_xDR_CFG_RD_CACHE(n) (((n) & 0x7) << 29) n 200 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_CDR_THRESH_PROC_PKT(n) (n) n 203 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_CDR_THRESH_TIMEOUT(n) ((n) << 24) /* x256 clk cycles */ n 206 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_RDR_THRESH_PROC_PKT(n) (n) n 208 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_RDR_THRESH_TIMEOUT(n) ((n) << 24) /* x256 clk cycles */ n 216 drivers/crypto/inside-secure/safexcel.h #define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2) n 217 drivers/crypto/inside-secure/safexcel.h #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24) n 249 drivers/crypto/inside-secure/safexcel.h #define EIP197_CDR_IRQ(n) BIT((n) * 2) n 250 drivers/crypto/inside-secure/safexcel.h #define EIP197_RDR_IRQ(n) BIT((n) * 2 + 1) n 253 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n) ((n) << 0) n 254 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n) (((n) & 0x7) << 4) n 255 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n) ((n) << 8) n 257 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16) n 258 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20) n 259 drivers/crypto/inside-secure/safexcel.h #define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24) n 269 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_ICE_UENG_START_OFFSET(n) ((n) << 16) n 274 drivers/crypto/inside-secure/safexcel.h #define EIP197_G_IRQ_DFE(n) BIT((n) << 1) n 275 drivers/crypto/inside-secure/safexcel.h #define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1) n 277 drivers/crypto/inside-secure/safexcel.h #define EIP197_G_IRQ_PE(n) BIT((n) + 20) n 284 drivers/crypto/inside-secure/safexcel.h #define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0) n 285 drivers/crypto/inside-secure/safexcel.h #define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4) n 286 drivers/crypto/inside-secure/safexcel.h #define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20) n 292 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8) n 293 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_IN_xBUF_THRES_MAX(n) ((n) << 12) n 296 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_OUT_DBUF_THRES_MIN(n) ((n) << 0) n 297 drivers/crypto/inside-secure/safexcel.h #define EIP197_PE_OUT_DBUF_THRES_MAX(n) ((n) << 4) n 326 drivers/crypto/inside-secure/safexcel.h #define EIP197_CONTEXT_SIZE(n) (n) n 354 drivers/crypto/inside-secure/safexcel.h #define CONTEXT_CONTROL_SIZE(n) ((n) << 8) n 415 drivers/crypto/inside-secure/safexcel.h #define EIP197_TRC_PARAMS_RC_SZ_LARGE(n) ((n) << 18) n 423 drivers/crypto/inside-secure/safexcel.h #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) n 145 drivers/crypto/mxs-dcp.c #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40)) n 147 drivers/crypto/mxs-dcp.c #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40)) n 149 drivers/crypto/mxs-dcp.c #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40)) n 150 drivers/crypto/mxs-dcp.c #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40)) n 1700 drivers/crypto/n2_core.c struct spu_queue *p, *n; n 1702 drivers/crypto/n2_core.c list_for_each_entry_safe(p, n, list, list) { n 709 drivers/crypto/nx/nx-842-powernv.c struct nx842_coproc *coproc, *n; n 717 drivers/crypto/nx/nx-842-powernv.c list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { n 946 drivers/crypto/nx/nx-842-powernv.c struct nx842_coproc *coproc, *n; n 961 drivers/crypto/nx/nx-842-powernv.c list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { n 258 drivers/crypto/nx/nx-842.c int ret, n; n 293 drivers/crypto/nx/nx-842.c n = hdr->groups++; n 299 drivers/crypto/nx/nx-842.c h = !n && add_header ? hdrsize : 0; n 304 drivers/crypto/nx/nx-842.c ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); n 440 drivers/crypto/nx/nx-842.c int n, ret, hdr_len; n 495 drivers/crypto/nx/nx-842.c for (n = 0; n < hdr->groups; n++) { n 497 drivers/crypto/nx/nx-842.c if (n + 1 == hdr->groups) n 500 drivers/crypto/nx/nx-842.c ret = decompress(ctx, &p, &hdr->group[n], &c, ignore); n 156 drivers/crypto/nx/nx.c unsigned int n, offset = 0, len = *src_len; n 175 drivers/crypto/nx/nx.c n = scatterwalk_clamp(&walk, len); n 176 drivers/crypto/nx/nx.c if (!n) { n 180 drivers/crypto/nx/nx.c n = scatterwalk_clamp(&walk, len); n 184 drivers/crypto/nx/nx.c nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); n 185 drivers/crypto/nx/nx.c len -= n; n 188 drivers/crypto/nx/nx.c scatterwalk_advance(&walk, n); n 20 drivers/crypto/omap-crypto.c int n = sg_nents(*sg); n 24 drivers/crypto/omap-crypto.c new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); n 28 drivers/crypto/omap-crypto.c sg_init_table(new_sg, n); n 643 drivers/crypto/omap-sham.c int n = sg_nents(sg); n 648 drivers/crypto/omap-sham.c n++; n 650 drivers/crypto/omap-sham.c ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); n 654 drivers/crypto/omap-sham.c sg_init_table(ctx->sg, n); n 734 drivers/crypto/omap-sham.c int n = 0; n 758 drivers/crypto/omap-sham.c n++; n 802 drivers/crypto/omap-sham.c rctx->sg_len = n; n 148 drivers/crypto/qat/qat_common/icp_qat_hw.h #define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1))) n 714 drivers/crypto/qat/qat_common/qat_algs.c int n = sg_nents(sgl); n 720 drivers/crypto/qat/qat_common/qat_algs.c size_t sz_out, sz = struct_size(bufl, bufers, n + 1); n 722 drivers/crypto/qat/qat_common/qat_algs.c if (unlikely(!n)) n 734 drivers/crypto/qat/qat_common/qat_algs.c for_each_sg(sgl, sg, n, i) { n 756 drivers/crypto/qat/qat_common/qat_algs.c n = sg_nents(sglout); n 757 drivers/crypto/qat/qat_common/qat_algs.c sz_out = struct_size(buflout, bufers, n + 1); n 767 drivers/crypto/qat/qat_common/qat_algs.c for_each_sg(sglout, sg, n, i) { n 794 drivers/crypto/qat/qat_common/qat_algs.c n = sg_nents(sglout); n 795 drivers/crypto/qat/qat_common/qat_algs.c for (i = 0; i < n; i++) n 805 drivers/crypto/qat/qat_common/qat_algs.c n = sg_nents(sgl); n 806 drivers/crypto/qat/qat_common/qat_algs.c for (i = 0; i < n; i++) n 72 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_addr_t n; n 77 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_addr_t n; n 104 drivers/crypto/qat/qat_common/qat_asym_algs.c char *n; n 697 drivers/crypto/qat/qat_common/qat_asym_algs.c if (unlikely(!ctx->n || !ctx->e)) n 720 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->in.rsa.enc.n = ctx->dma_n; n 831 drivers/crypto/qat/qat_common/qat_asym_algs.c if (unlikely(!ctx->n || !ctx->d)) n 863 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->in.rsa.dec.n = ctx->dma_n; n 992 drivers/crypto/qat/qat_common/qat_asym_algs.c ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); n 993 drivers/crypto/qat/qat_common/qat_asym_algs.c if (!ctx->n) n 996 drivers/crypto/qat/qat_common/qat_asym_algs.c memcpy(ctx->n, ptr, ctx->key_sz); n 1000 drivers/crypto/qat/qat_common/qat_asym_algs.c ctx->n = NULL; n 1160 drivers/crypto/qat/qat_common/qat_asym_algs.c if (ctx->n) n 1161 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); n 1189 drivers/crypto/qat/qat_common/qat_asym_algs.c ctx->n = NULL; n 1218 drivers/crypto/qat/qat_common/qat_asym_algs.c ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz); n 1231 drivers/crypto/qat/qat_common/qat_asym_algs.c if (!ctx->n || !ctx->e) { n 1286 drivers/crypto/qat/qat_common/qat_asym_algs.c if (ctx->n) n 1287 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); n 1295 drivers/crypto/qat/qat_common/qat_asym_algs.c ctx->n = NULL; n 410 drivers/crypto/qce/ablkcipher.c struct qce_alg_template *tmpl, *n; n 412 drivers/crypto/qce/ablkcipher.c list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) { n 162 drivers/crypto/qce/common.c unsigned int n; n 164 drivers/crypto/qce/common.c n = len / sizeof(u32); n 165 drivers/crypto/qce/common.c for (; n > 0; n--) { n 527 drivers/crypto/qce/sha.c struct qce_alg_template *tmpl, *n; n 529 drivers/crypto/qce/sha.c list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { n 1073 drivers/crypto/s5p-sss.c unsigned int skip = ctx->skip, n = sg_nents(sg); n 1078 drivers/crypto/s5p-sss.c n++; n 1080 drivers/crypto/s5p-sss.c ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); n 1086 drivers/crypto/s5p-sss.c sg_init_table(ctx->sg, n); n 1144 drivers/crypto/s5p-sss.c unsigned int skip = ctx->skip, nbytes = new_len, n = 0; n 1155 drivers/crypto/s5p-sss.c n++; n 1190 drivers/crypto/s5p-sss.c ctx->sg_len = n; n 1198 drivers/crypto/s5p-sss.c ctx->sg_len = n; n 1070 drivers/crypto/stm32/stm32-cryp.c unsigned int n) n 1072 drivers/crypto/stm32/stm32-cryp.c scatterwalk_advance(&cryp->out_walk, n); n 1082 drivers/crypto/stm32/stm32-cryp.c return (u32 *)((u8 *)dst + n); n 1086 drivers/crypto/stm32/stm32-cryp.c unsigned int n) n 1088 drivers/crypto/stm32/stm32-cryp.c scatterwalk_advance(&cryp->in_walk, n); n 1098 drivers/crypto/stm32/stm32-cryp.c return (u32 *)((u8 *)src + n); n 3110 drivers/crypto/talitos.c struct talitos_crypto_alg *t_alg, *n; n 3113 drivers/crypto/talitos.c list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { n 325 drivers/dax/bus.c static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n) n 257 drivers/dax/super.c static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) n 21 drivers/dma-buf/selftest.c #define selftest(n, f) [__idx_##n] = { .name = #n, .func = f }, n 32 drivers/dma-buf/selftest.c #define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n)) n 33 drivers/dma-buf/selftest.c #define selftest_0(n, func, id) \ n 34 drivers/dma-buf/selftest.c module_param_named(id, selftests[__idx_##n].enabled, bool, 0400); n 35 drivers/dma-buf/selftest.c #define selftest(n, func) selftest_0(n, func, param(n)) n 323 drivers/dma/acpi-dma.c size_t n; n 338 drivers/dma/acpi-dma.c if (pdata->n++ == pdata->index) { n 170 drivers/dma/bcm2835-dma.c #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ n 171 drivers/dma/bcm2835-dma.c #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) n 38 drivers/dma/dma-jz4780.c #define JZ_DMA_REG_CHAN(n) (n * 0x20) n 444 drivers/dma/dmatest.c static void result(const char *err, unsigned int n, unsigned int src_off, n 448 drivers/dma/dmatest.c current->comm, n, err, src_off, dst_off, len, data); n 451 drivers/dma/dmatest.c static void dbg_result(const char *err, unsigned int n, unsigned int src_off, n 456 drivers/dma/dmatest.c current->comm, n, err, src_off, dst_off, len, data); n 459 drivers/dma/dmatest.c #define verbose_result(err, n, src_off, dst_off, len, data) ({ \ n 461 drivers/dma/dmatest.c result(err, n, src_off, dst_off, len, data); \ n 463 drivers/dma/dmatest.c dbg_result(err, n, src_off, dst_off, len, data);\ n 119 drivers/dma/dw/regs.h #define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) n 147 drivers/dma/dw/regs.h #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ n 148 drivers/dma/dw/regs.h #define DWC_CTLL_SRC_WIDTH(n) ((n)<<4) n 155 drivers/dma/dw/regs.h #define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ n 156 drivers/dma/dw/regs.h #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) n 159 drivers/dma/dw/regs.h #define DWC_CTLL_FC(n) ((n) << 20) n 165 drivers/dma/dw/regs.h #define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */ n 166 drivers/dma/dw/regs.h #define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */ n 57 drivers/dma/fsl-edma-common.h #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F) n 908 drivers/dma/ipu/ipu_idmac.c static int idmac_desc_alloc(struct idmac_channel *ichan, int n) n 911 drivers/dma/ipu/ipu_idmac.c vmalloc(array_size(n, sizeof(struct idmac_tx_desc))); n 920 drivers/dma/ipu/ipu_idmac.c ichan->n_tx_desc = n; n 925 drivers/dma/ipu/ipu_idmac.c while (n--) { n 22 drivers/dma/lpc18xx-dmamux.c #define LPC18XX_DMAMUX_VAL(v, n) ((v) << (n * 2)) n 23 drivers/dma/lpc18xx-dmamux.c #define LPC18XX_DMAMUX_MASK(n) (0x3 << (n * 2)) n 52 drivers/dma/mediatek/mtk-uart-apdma.c #define VFF_TX_THRE(n) (n) n 54 drivers/dma/mediatek/mtk-uart-apdma.c #define VFF_RX_THRE(n) ((n) * 3 / 4) n 29 drivers/dma/mmp_pdma.c #define DSADR(n) (0x0204 + ((n) << 4)) n 30 drivers/dma/mmp_pdma.c #define DTADR(n) (0x0208 + ((n) << 4)) n 50 drivers/dma/mmp_pdma.c #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) n 54 drivers/dma/mxs-dma.c #define HW_APBHX_CHn_NXTCMDAR(d, n) \ n 55 drivers/dma/mxs-dma.c (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) n 56 drivers/dma/mxs-dma.c #define HW_APBHX_CHn_SEMA(d, n) \ n 57 drivers/dma/mxs-dma.c (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) n 58 drivers/dma/mxs-dma.c #define HW_APBHX_CHn_BAR(d, n) \ n 59 drivers/dma/mxs-dma.c (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) n 60 drivers/dma/mxs-dma.c #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) n 1238 drivers/dma/nbpfaxi.c static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) n 1241 drivers/dma/nbpfaxi.c struct nbpf_channel *chan = nbpf->chan + n; n 1245 drivers/dma/nbpfaxi.c chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; n 1252 drivers/dma/nbpfaxi.c dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); n 1254 drivers/dma/nbpfaxi.c snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); n 82 drivers/dma/pl330.c #define FTC(n) (_FTC + (n)*0x4) n 85 drivers/dma/pl330.c #define CS(n) (_CS + (n)*0x8) n 89 drivers/dma/pl330.c #define CPC(n) (_CPC + (n)*0x8) n 92 drivers/dma/pl330.c #define SA(n) (_SA + (n)*0x20) n 95 drivers/dma/pl330.c #define DA(n) (_DA + (n)*0x20) n 98 drivers/dma/pl330.c #define CC(n) (_CC + (n)*0x20) n 119 drivers/dma/pl330.c #define LC0(n) (_LC0 + (n)*0x20) n 122 drivers/dma/pl330.c #define LC1(n) (_LC1 + (n)*0x20) n 27 drivers/dma/pxa_dma.c #define DCSR(n) (0x0000 + ((n) << 2)) n 28 drivers/dma/pxa_dma.c #define DALGN(n) 0x00a0 n 30 drivers/dma/pxa_dma.c #define DDADR(n) (0x0200 + ((n) << 4)) n 31 drivers/dma/pxa_dma.c #define DSADR(n) (0x0204 + ((n) << 4)) n 32 drivers/dma/pxa_dma.c #define DTADR(n) (0x0208 + ((n) << 4)) n 33 drivers/dma/pxa_dma.c #define DCMD(n) (0x020c + ((n) << 4)) n 268 drivers/dma/sh/rcar-dmac.c #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) n 274 drivers/dma/sh/rcar-dmac.c #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) n 275 drivers/dma/sh/rcar-dmac.c #define RCAR_DMACHCRB_PRI(n) ((n) << 0) n 278 drivers/dma/sh/rcar-dmac.c #define RCAR_DMABUFCR_MBU(n) ((n) << 16) n 279 drivers/dma/sh/rcar-dmac.c #define RCAR_DMABUFCR_ULB(n) ((n) << 0) n 284 drivers/dma/sh/rcar-dmac.c #define RCAR_DMADPCR_DIPT(n) ((n) << 24) n 65 drivers/dma/st_fdma.h #define FDMA_NODE_CTRL_REQ_MAP_DREQ(n) ((n)&FDMA_NODE_CTRL_REQ_MAP_MASK) n 220 drivers/dma/st_fdma.h #define FDMA_REQ_CTRL_NUM_OPS(n) (FDMA_REQ_CTRL_NUM_OPS_MASK & \ n 221 drivers/dma/st_fdma.h ((n) << 24)) n 237 drivers/dma/st_fdma.h #define FDMA_REQ_CTRL_HOLDOFF(n) ((n) & FDMA_REQ_CTRL_HOLDOFF_MASK) n 48 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25) n 50 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23) n 52 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21) n 54 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16) n 56 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13) n 58 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11) n 59 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11) n 61 drivers/dma/stm32-dma.c #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6) n 97 drivers/dma/stm32-dma.c #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK) n 137 drivers/dma/stm32-dma.c #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) n 36 drivers/dma/stm32-mdma.c #define STM32_MDMA_SHIFT(n) (ffs(n) - 1) n 37 drivers/dma/stm32-mdma.c #define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \ n 39 drivers/dma/stm32-mdma.c #define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \ n 83 drivers/dma/stm32-mdma.c #define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \ n 102 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \ n 104 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \ n 107 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \ n 111 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \ n 113 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \ n 116 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \ n 118 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \ n 121 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \ n 124 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \ n 127 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \ n 130 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \ n 133 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \ n 136 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \ n 139 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \ n 142 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \ n 154 drivers/dma/stm32-mdma.c #define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \ n 156 drivers/dma/stm32-mdma.c #define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \ n 162 drivers/dma/stm32-mdma.c #define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \ n 174 drivers/dma/stm32-mdma.c #define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \ n 177 drivers/dma/stm32-mdma.c #define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \ n 188 drivers/dma/stm32-mdma.c #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ n 47 drivers/dma/sun4i-dma.c #define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27) n 74 drivers/dma/sun4i-dma.c #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24) n 75 drivers/dma/sun4i-dma.c #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16) n 76 drivers/dma/sun4i-dma.c #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8) n 77 drivers/dma/sun4i-dma.c #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0) n 86 drivers/dma/sun4i-dma.c #define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20) n 93 drivers/dma/sun4i-dma.c #define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20) n 77 drivers/dma/ti/cppi41.c #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10) n 78 drivers/dma/ti/cppi41.c #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10) n 79 drivers/dma/ti/cppi41.c #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10) n 80 drivers/dma/ti/cppi41.c #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10) n 115 drivers/dma/xilinx/xilinx_dma.c #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) n 116 drivers/dma/xilinx/xilinx_dma.c #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) n 642 drivers/edac/amd64_edac.c int n; n 645 drivers/edac/amd64_edac.c n = intlv_shift_table[intlv_en]; n 646 drivers/edac/amd64_edac.c return n; n 80 drivers/edac/cpc925_edac.c #define APIMASK_ADI(n) CPC925_BIT(((n)+1)) n 121 drivers/edac/edac_mc.c int i, n, count = 0; n 125 drivers/edac/edac_mc.c n = snprintf(p, len, "%s %d ", n 128 drivers/edac/edac_mc.c p += n; n 129 drivers/edac/edac_mc.c len -= n; n 130 drivers/edac/edac_mc.c count += n; n 320 drivers/edac/edac_mc.c int i, j, row, chn, n, len, off; n 447 drivers/edac/edac_mc.c n = snprintf(p, len, "mc#%u", mc_num); n 448 drivers/edac/edac_mc.c p += n; n 449 drivers/edac/edac_mc.c len -= n; n 451 drivers/edac/edac_mc.c n = snprintf(p, len, "%s#%u", n 454 drivers/edac/edac_mc.c p += n; n 455 drivers/edac/edac_mc.c len -= n; n 855 drivers/edac/edac_mc.c int row, i, j, n; n 862 drivers/edac/edac_mc.c n = 0; n 865 drivers/edac/edac_mc.c n += dimm->nr_pages; n 867 drivers/edac/edac_mc.c if (n == 0) n 320 drivers/edac/i3200_edac.c int n; n 322 drivers/edac/i3200_edac.c n = drbs[channel][rank]; n 323 drivers/edac/i3200_edac.c if (!n) n 327 drivers/edac/i3200_edac.c n -= drbs[channel][rank - 1]; n 330 drivers/edac/i3200_edac.c n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1]; n 332 drivers/edac/i3200_edac.c n <<= (I3200_DRB_SHIFT - PAGE_SHIFT); n 333 drivers/edac/i3200_edac.c return n; n 1039 drivers/edac/i5000_edac.c int space, n; n 1060 drivers/edac/i5000_edac.c n = snprintf(p, space, "--------------------------" n 1062 drivers/edac/i5000_edac.c p += n; n 1063 drivers/edac/i5000_edac.c space -= n; n 1068 drivers/edac/i5000_edac.c n = snprintf(p, space, "slot %2d ", slot); n 1069 drivers/edac/i5000_edac.c p += n; n 1070 drivers/edac/i5000_edac.c space -= n; n 1076 drivers/edac/i5000_edac.c n = snprintf(p, space, "%4d MB %dR| ", n 1079 drivers/edac/i5000_edac.c n = snprintf(p, space, "%4d MB | ", 0); n 1080 drivers/edac/i5000_edac.c p += n; n 1081 drivers/edac/i5000_edac.c space -= n; n 1083 drivers/edac/i5000_edac.c p += n; n 1084 drivers/edac/i5000_edac.c space -= n; n 1091 drivers/edac/i5000_edac.c n = snprintf(p, space, "--------------------------" n 1093 drivers/edac/i5000_edac.c p += n; n 1094 drivers/edac/i5000_edac.c space -= n; n 1100 drivers/edac/i5000_edac.c n = snprintf(p, space, " "); n 1101 drivers/edac/i5000_edac.c p += n; n 1102 drivers/edac/i5000_edac.c space -= n; n 1104 drivers/edac/i5000_edac.c n = snprintf(p, space, "channel %d | ", channel); n 1105 drivers/edac/i5000_edac.c p += n; n 1106 drivers/edac/i5000_edac.c space -= n; n 1112 drivers/edac/i5000_edac.c n = snprintf(p, space, " "); n 1113 drivers/edac/i5000_edac.c p += n; n 1115 drivers/edac/i5000_edac.c n = snprintf(p, space, " branch %d | ", branch); n 1116 drivers/edac/i5000_edac.c p += n; n 1117 drivers/edac/i5000_edac.c space -= n; n 864 drivers/edac/i5400_edac.c int n; n 869 drivers/edac/i5400_edac.c n = dimm; n 871 drivers/edac/i5400_edac.c if (n >= DIMMS_PER_CHANNEL) { n 878 drivers/edac/i5400_edac.c mtr = pvt->b0_mtr[n]; n 880 drivers/edac/i5400_edac.c mtr = pvt->b1_mtr[n]; n 961 drivers/edac/i5400_edac.c int space, n; n 984 drivers/edac/i5400_edac.c n = snprintf(p, space, "---------------------------" n 986 drivers/edac/i5400_edac.c p += n; n 987 drivers/edac/i5400_edac.c space -= n; n 992 drivers/edac/i5400_edac.c n = snprintf(p, space, "dimm %2d ", dimm); n 993 drivers/edac/i5400_edac.c p += n; n 994 drivers/edac/i5400_edac.c space -= n; n 999 drivers/edac/i5400_edac.c n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); n 1000 drivers/edac/i5400_edac.c p += n; n 1001 drivers/edac/i5400_edac.c space -= n; n 1009 drivers/edac/i5400_edac.c n = snprintf(p, space, "---------------------------" n 1011 drivers/edac/i5400_edac.c p += n; n 1012 drivers/edac/i5400_edac.c space -= n; n 1018 drivers/edac/i5400_edac.c n = snprintf(p, space, " "); n 1019 drivers/edac/i5400_edac.c p += n; n 1020 drivers/edac/i5400_edac.c space -= n; n 1022 drivers/edac/i5400_edac.c n = snprintf(p, space, "channel %d | ", channel); n 1023 drivers/edac/i5400_edac.c p += n; n 1024 drivers/edac/i5400_edac.c space -= n; n 1027 drivers/edac/i5400_edac.c space -= n; n 1032 drivers/edac/i5400_edac.c n = snprintf(p, space, " "); n 1033 drivers/edac/i5400_edac.c p += n; n 1035 drivers/edac/i5400_edac.c n = snprintf(p, space, " branch %d | ", branch); n 1036 drivers/edac/i5400_edac.c p += n; n 1037 drivers/edac/i5400_edac.c space -= n; n 687 drivers/edac/i7300_edac.c int space, n; n 693 drivers/edac/i7300_edac.c n = snprintf(p, space, " "); n 694 drivers/edac/i7300_edac.c p += n; n 695 drivers/edac/i7300_edac.c space -= n; n 697 drivers/edac/i7300_edac.c n = snprintf(p, space, "channel %d | ", channel); n 698 drivers/edac/i7300_edac.c p += n; n 699 drivers/edac/i7300_edac.c space -= n; n 704 drivers/edac/i7300_edac.c n = snprintf(p, space, "-------------------------------" n 706 drivers/edac/i7300_edac.c p += n; n 707 drivers/edac/i7300_edac.c space -= n; n 713 drivers/edac/i7300_edac.c n = snprintf(p, space, "csrow/SLOT %d ", slot); n 714 drivers/edac/i7300_edac.c p += n; n 715 drivers/edac/i7300_edac.c space -= n; n 719 drivers/edac/i7300_edac.c n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); n 720 drivers/edac/i7300_edac.c p += n; n 721 drivers/edac/i7300_edac.c space -= n; n 729 drivers/edac/i7300_edac.c n = snprintf(p, space, "-------------------------------" n 731 drivers/edac/i7300_edac.c p += n; n 732 drivers/edac/i7300_edac.c space -= n; n 169 drivers/edac/ie31200_edac.c #define IE31200_PAGES(n, skl) \ n 170 drivers/edac/ie31200_edac.c (n << (28 + (2 * skl) - PAGE_SHIFT)) n 752 drivers/edac/pnd2_edac.c #define C(n) (0x10 | (n)) /* column */ n 753 drivers/edac/pnd2_edac.c #define B(n) (0x20 | (n)) /* bank */ n 754 drivers/edac/pnd2_edac.c #define R(n) (0x40 | (n)) /* row */ n 132 drivers/edac/ppc4xx_edac.c #define SDRAM_MBCF_SZ_TO_MiB(n) (SDRAM_MBCF_SZ_MiB_MIN \ n 133 drivers/edac/ppc4xx_edac.c << (SDRAM_MBCF_SZ_DECODE(n))) n 134 drivers/edac/ppc4xx_edac.c #define SDRAM_MBCF_SZ_TO_PAGES(n) (SDRAM_MBCF_SZ_MiB_MIN \ n 136 drivers/edac/ppc4xx_edac.c SDRAM_MBCF_SZ_DECODE(n))) n 316 drivers/edac/ppc4xx_edac.c int n, total = 0; n 319 drivers/edac/ppc4xx_edac.c n = snprintf(buffer, size, "%s: Banks: ", mci->dev_name); n 321 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 324 drivers/edac/ppc4xx_edac.c buffer += n; n 325 drivers/edac/ppc4xx_edac.c size -= n; n 326 drivers/edac/ppc4xx_edac.c total += n; n 330 drivers/edac/ppc4xx_edac.c n = snprintf(buffer, size, "%s%u", n 333 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 336 drivers/edac/ppc4xx_edac.c buffer += n; n 337 drivers/edac/ppc4xx_edac.c size -= n; n 338 drivers/edac/ppc4xx_edac.c total += n; n 342 drivers/edac/ppc4xx_edac.c n = snprintf(buffer, size, "%s; ", rows ? "" : "None"); n 344 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 347 drivers/edac/ppc4xx_edac.c buffer += n; n 348 drivers/edac/ppc4xx_edac.c size -= n; n 349 drivers/edac/ppc4xx_edac.c total += n; n 436 drivers/edac/ppc4xx_edac.c int n, total = 0; n 441 drivers/edac/ppc4xx_edac.c n = snprintf(buffer, size, "; Byte Lane Errors: "); n 443 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 446 drivers/edac/ppc4xx_edac.c buffer += n; n 447 drivers/edac/ppc4xx_edac.c size -= n; n 448 drivers/edac/ppc4xx_edac.c total += n; n 452 drivers/edac/ppc4xx_edac.c n = snprintf(buffer, size, n 456 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 459 drivers/edac/ppc4xx_edac.c buffer += n; n 460 drivers/edac/ppc4xx_edac.c size -= n; n 461 drivers/edac/ppc4xx_edac.c total += n; n 465 drivers/edac/ppc4xx_edac.c n = snprintf(buffer, size, "%s; ", lanes ? "" : "None"); n 467 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 470 drivers/edac/ppc4xx_edac.c buffer += n; n 471 drivers/edac/ppc4xx_edac.c size -= n; n 472 drivers/edac/ppc4xx_edac.c total += n; n 501 drivers/edac/ppc4xx_edac.c int n, total = 0; n 503 drivers/edac/ppc4xx_edac.c n = ppc4xx_edac_generate_bank_message(mci, status, buffer, size); n 505 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 508 drivers/edac/ppc4xx_edac.c buffer += n; n 509 drivers/edac/ppc4xx_edac.c size -= n; n 510 drivers/edac/ppc4xx_edac.c total += n; n 512 drivers/edac/ppc4xx_edac.c n = ppc4xx_edac_generate_checkbit_message(mci, status, buffer, size); n 514 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 517 drivers/edac/ppc4xx_edac.c buffer += n; n 518 drivers/edac/ppc4xx_edac.c size -= n; n 519 drivers/edac/ppc4xx_edac.c total += n; n 521 drivers/edac/ppc4xx_edac.c n = ppc4xx_edac_generate_lane_message(mci, status, buffer, size); n 523 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 526 drivers/edac/ppc4xx_edac.c buffer += n; n 527 drivers/edac/ppc4xx_edac.c size -= n; n 528 drivers/edac/ppc4xx_edac.c total += n; n 598 drivers/edac/ppc4xx_edac.c int n; n 603 drivers/edac/ppc4xx_edac.c n = ppc4xx_edac_generate_ecc_message(mci, status, buffer, size); n 605 drivers/edac/ppc4xx_edac.c if (n < 0 || n >= size) n 608 drivers/edac/ppc4xx_edac.c buffer += n; n 609 drivers/edac/ppc4xx_edac.c size -= n; n 35 drivers/edac/ppc4xx_edac.h #define SDRAM_MBXCF(n) (SDRAM_MBXCF_BASE + (4 * (n))) n 65 drivers/edac/ppc4xx_edac.h #define SDRAM_BESR_M0ID_DECODE(n) PPC_REG_DECODE(3, n) n 106 drivers/edac/ppc4xx_edac.h #define SDRAM_MCOPT1_MCHK_DECODE(n) ((((u32)(n)) >> 28) & 0x3) n 304 drivers/edac/x38_edac.c int n; n 306 drivers/edac/x38_edac.c n = drbs[channel][rank]; n 308 drivers/edac/x38_edac.c n -= drbs[channel][rank - 1]; n 311 drivers/edac/x38_edac.c n -= drbs[0][X38_RANKS_PER_CHANNEL - 1]; n 314 drivers/edac/x38_edac.c n <<= (X38_DRB_SHIFT - PAGE_SHIFT); n 315 drivers/edac/x38_edac.c return n; n 18 drivers/eisa/eisa-bus.c #define SLOT_ADDRESS(r,n) (r->bus_base_addr + (0x1000 * n)) n 413 drivers/firewire/net.c struct fwnet_fragment_info *fi, *n; n 415 drivers/firewire/net.c list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) n 2577 drivers/firewire/ohci.c int n, ret = 0; n 2599 drivers/firewire/ohci.c n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; n 2600 drivers/firewire/ohci.c if (n < 32) n 2601 drivers/firewire/ohci.c reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); n 2603 drivers/firewire/ohci.c reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); n 105 drivers/firewire/ohci.h #define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n)) n 106 drivers/firewire/ohci.h #define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n)) n 107 drivers/firewire/ohci.h #define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n)) n 108 drivers/firewire/ohci.h #define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n)) n 111 drivers/firewire/ohci.h #define OHCI1394_IsoRcvContextBase(n) (0x400 + 32 * (n)) n 112 drivers/firewire/ohci.h #define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n)) n 113 drivers/firewire/ohci.h #define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n)) n 114 drivers/firewire/ohci.h #define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n)) n 115 drivers/firewire/ohci.h #define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n)) n 1385 drivers/firewire/sbp2.c int i, n; n 1387 drivers/firewire/sbp2.c n = scsi_dma_map(orb->cmd); n 1388 drivers/firewire/sbp2.c if (n <= 0) n 1398 drivers/firewire/sbp2.c if (n == 1) { n 1408 drivers/firewire/sbp2.c for_each_sg(sg, sg, n, i) { n 1429 drivers/firewire/sbp2.c COMMAND_ORB_DATA_SIZE(n)); n 1103 drivers/firmware/dmi_scan.c int n; n 1108 drivers/firmware/dmi_scan.c for (n = 0; n < dmi_memdev_nr; n++) { n 1109 drivers/firmware/dmi_scan.c if (handle == dmi_memdev[n].handle) { n 1110 drivers/firmware/dmi_scan.c *bank = dmi_memdev[n].bank; n 1111 drivers/firmware/dmi_scan.c *device = dmi_memdev[n].device; n 1120 drivers/firmware/dmi_scan.c int n; n 1123 drivers/firmware/dmi_scan.c for (n = 0; n < dmi_memdev_nr; n++) { n 1124 drivers/firmware/dmi_scan.c if (handle == dmi_memdev[n].handle) n 1125 drivers/firmware/dmi_scan.c return dmi_memdev[n].size; n 218 drivers/firmware/efi/cper.c u32 len, n; n 223 drivers/firmware/efi/cper.c n = 0; n 226 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "node: %d ", mem->node); n 228 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "card: %d ", mem->card); n 230 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "module: %d ", mem->module); n 232 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank); n 234 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank); n 236 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "device: %d ", mem->device); n 238 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "row: %d ", mem->row); n 240 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "column: %d ", mem->column); n 242 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "bit_position: %d ", n 245 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "requestor_id: 0x%016llx ", n 248 drivers/firmware/efi/cper.c n += scnprintf(msg + n, len - n, "responder_id: 0x%016llx ", n 251 drivers/firmware/efi/cper.c scnprintf(msg + n, len - n, "target_id: 0x%016llx ", n 254 drivers/firmware/efi/cper.c msg[n] = '\0'; n 255 drivers/firmware/efi/cper.c return n; n 260 drivers/firmware/efi/cper.c u32 len, n; n 266 drivers/firmware/efi/cper.c n = 0; n 270 drivers/firmware/efi/cper.c n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); n 272 drivers/firmware/efi/cper.c n = snprintf(msg, len, n 276 drivers/firmware/efi/cper.c msg[n] = '\0'; n 277 drivers/firmware/efi/cper.c return n; n 158 drivers/firmware/efi/earlycon.c unsigned int n, x; n 165 drivers/firmware/efi/earlycon.c n = count; n 168 drivers/firmware/efi/earlycon.c while (n-- > 0) { n 171 drivers/firmware/efi/efi-pstore.c struct efivar_entry *entry, *n; n 177 drivers/firmware/efi/efi-pstore.c list_for_each_entry_safe(entry, n, head, list) { n 178 drivers/firmware/efi/efi-pstore.c efi_pstore_scan_sysfs_enter(entry, n, head); n 181 drivers/firmware/efi/efi-pstore.c ret = efi_pstore_scan_sysfs_exit(entry, n, head, n 188 drivers/firmware/efi/efi-pstore.c *pos = n; n 192 drivers/firmware/efi/efi-pstore.c list_for_each_entry_safe_from((*pos), n, head, list) { n 193 drivers/firmware/efi/efi-pstore.c efi_pstore_scan_sysfs_enter((*pos), n, head); n 196 drivers/firmware/efi/efi-pstore.c ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0); n 202 drivers/firmware/efi/efi-pstore.c *pos = n; n 179 drivers/firmware/efi/efi.c struct attribute *attr, int n) n 224 drivers/firmware/efi/esrt.c struct attribute *attr, int n) n 762 drivers/firmware/efi/libstub/efi-stub-helper.c static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n) n 766 drivers/firmware/efi/libstub/efi-stub-helper.c while (n--) { n 768 drivers/firmware/efi/libstub/efi-stub-helper.c if (n && c >= 0xd800 && c <= 0xdbff && n 772 drivers/firmware/efi/libstub/efi-stub-helper.c n--; n 340 drivers/firmware/efi/vars.c struct efivar_entry *entry, *n; n 345 drivers/firmware/efi/vars.c list_for_each_entry_safe(entry, n, head, list) { n 817 drivers/firmware/efi/vars.c struct efivar_entry *entry, *n; n 821 drivers/firmware/efi/vars.c list_for_each_entry_safe(entry, n, head, list) { n 1089 drivers/firmware/efi/vars.c struct efivar_entry *entry, *n; n 1093 drivers/firmware/efi/vars.c list_for_each_entry_safe(entry, n, head, list) { n 1106 drivers/firmware/efi/vars.c list_for_each_entry_safe_continue((*prev), n, head, list) { n 704 drivers/firmware/google/gsmi.c u64 n = hash; n 705 drivers/firmware/google/gsmi.c n <<= 18; n 706 drivers/firmware/google/gsmi.c hash -= n; n 707 drivers/firmware/google/gsmi.c n <<= 33; n 708 drivers/firmware/google/gsmi.c hash -= n; n 709 drivers/firmware/google/gsmi.c n <<= 3; n 710 drivers/firmware/google/gsmi.c hash += n; n 711 drivers/firmware/google/gsmi.c n <<= 3; n 712 drivers/firmware/google/gsmi.c hash -= n; n 713 drivers/firmware/google/gsmi.c n <<= 4; n 714 drivers/firmware/google/gsmi.c hash += n; n 715 drivers/firmware/google/gsmi.c n <<= 2; n 716 drivers/firmware/google/gsmi.c hash += n; n 220 drivers/firmware/qcom_scm-32.c #define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ n 223 drivers/firmware/qcom_scm-32.c (n & 0xf)) n 140 drivers/firmware/ti_sci.c #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) n 188 drivers/fpga/dfl-afu-error.c struct attribute *attr, int n) n 374 drivers/fpga/dfl-afu-main.c struct attribute *attr, int n) n 471 drivers/fpga/dfl-afu-main.c struct attribute *attr, int n) n 294 drivers/fpga/dfl-fme-error.c struct attribute *attr, int n) n 88 drivers/fpga/dfl.h #define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8)) n 146 drivers/gnss/core.c size_t n = count - written; n 148 drivers/gnss/core.c if (n > GNSS_WRITE_BUF_SIZE) n 149 drivers/gnss/core.c n = GNSS_WRITE_BUF_SIZE; n 151 drivers/gnss/core.c if (copy_from_user(gdev->write_buf, buf, n)) { n 164 drivers/gnss/core.c ret = gdev->ops->write_raw(gdev, gdev->write_buf, n); n 759 drivers/gpio/gpio-aspeed.c u64 n; n 766 drivers/gpio/gpio-aspeed.c n = rate * usecs; n 767 drivers/gpio/gpio-aspeed.c r = do_div(n, 1000000); n 769 drivers/gpio/gpio-aspeed.c if (n >= U32_MAX) n 773 drivers/gpio/gpio-aspeed.c *cycles = n + (!!r); n 504 drivers/gpio/gpio-eic-sprd.c u32 bank, n, girq; n 532 drivers/gpio/gpio-eic-sprd.c for_each_set_bit(n, ®, SPRD_EIC_PER_BANK_NR) { n 533 drivers/gpio/gpio-eic-sprd.c u32 offset = bank * SPRD_EIC_PER_BANK_NR + n; n 56 drivers/gpio/gpio-em.c #define GIO_IDT(n) (GIO_IDT0 + ((n) * 4)) n 23 drivers/gpio/gpio-lpc18xx.c #define LPC18XX_REG_DIR(n) (0x2000 + n * sizeof(u32)) n 362 drivers/gpio/gpio-max3191x.c int n, ret; n 373 drivers/gpio/gpio-max3191x.c n = BITS_TO_LONGS(max3191x->nchips); n 374 drivers/gpio/gpio-max3191x.c max3191x->crc_error = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL); n 375 drivers/gpio/gpio-max3191x.c max3191x->undervolt1 = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL); n 376 drivers/gpio/gpio-max3191x.c max3191x->undervolt2 = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL); n 377 drivers/gpio/gpio-max3191x.c max3191x->overtemp = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL); n 378 drivers/gpio/gpio-max3191x.c max3191x->fault = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL); n 268 drivers/gpio/gpio-pmic-eic-sprd.c u32 n, girq, val; n 278 drivers/gpio/gpio-pmic-eic-sprd.c for_each_set_bit(n, &status, chip->ngpio) { n 280 drivers/gpio/gpio-pmic-eic-sprd.c sprd_pmic_eic_update(chip, n, SPRD_PMIC_EIC_IC, 1); n 282 drivers/gpio/gpio-pmic-eic-sprd.c girq = irq_find_mapping(chip->irq.domain, n); n 289 drivers/gpio/gpio-pmic-eic-sprd.c sprd_pmic_eic_toggle_trigger(chip, girq, n); n 60 drivers/gpio/gpio-pxa.c #define BANK_OFF(n) (((n) / 3) << 8) + (((n) % 3) << 2) n 446 drivers/gpio/gpio-pxa.c int loop, gpio, n, handled = 0; n 458 drivers/gpio/gpio-pxa.c for_each_set_bit(n, &gedr, BITS_PER_LONG) { n 463 drivers/gpio/gpio-pxa.c gpio + n)); n 189 drivers/gpio/gpio-sprd.c u32 bank, n, girq; n 198 drivers/gpio/gpio-sprd.c for_each_set_bit(n, ®, SPRD_GPIO_BANK_NR) { n 200 drivers/gpio/gpio-sprd.c bank * SPRD_GPIO_BANK_NR + n); n 49 drivers/gpio/gpio-vf610.c #define PORT_PCR(n) ((n) * 0x4) n 632 drivers/gpio/gpiolib-acpi.c int n; n 650 drivers/gpio/gpiolib-acpi.c if (lookup->n++ != lookup->index) n 366 drivers/gpio/gpiolib-sysfs.c int n) n 4195 drivers/gpio/gpiolib.c void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) n 4201 drivers/gpio/gpiolib.c for (i = 0; i < n; i++) n 132 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask) n 134 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; n 135 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; n 136 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; n 137 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; n 138 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; n 139 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED; n 252 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg; n 279 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->enabled = false; n 280 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->command_code = 0; n 282 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->enabled = true; n 283 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->command_code = 0x81; n 289 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->enabled = true; n 290 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->command_code = params.command_code; n 295 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c (n->enabled ? "enabled" : "disabled"), n 296 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c n->command_code); n 53 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c int n, cts; n 57 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c n = 128 * freq; n 61 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c div = gcd(n, cts); n 63 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c n /= div; n 70 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c mul = ((128*freq/1000) + (n-1))/n; n 72 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c n *= mul; n 76 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c if (n < (128*freq/1500)) n 78 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c if (n > (128*freq/300)) n 81 drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c *N = n; n 32 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c uint64_t saddr, uint64_t daddr, int n) n 40 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c for (i = 0; i < n; i++) { n 61 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c static void amdgpu_benchmark_log_results(int n, unsigned size, n 66 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c unsigned int throughput = (n * (size >> 10)) / time; n 69 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c kind, n, size >> 10, sdomain, ddomain, time, n 80 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c int r, n; n 90 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c n = AMDGPU_BENCHMARK_ITERATIONS; n 130 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n); n 134 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c amdgpu_benchmark_log_results(n, size, time, n 628 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c unsigned n, alloc_size; n 665 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c n = copy_to_user(out, regs, min(size, alloc_size)); n 667 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c return n ? -EFAULT : 0; n 39 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c unsigned n, size; n 47 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; n 50 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c n -= adev->rings[i]->ring_size; n 52 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c n -= AMDGPU_GPU_PAGE_SIZE; n 54 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c n -= adev->irq.ih.ring_size; n 55 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c n /= size; n 57 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); n 59 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c DRM_ERROR("Failed to allocate %d pointers\n", n); n 85 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c for (i = 0; i < n; i++) { n 91 drivers/gpu/drm/amd/amdgpu/atom.c static void debug_print_spaces(int n) n 93 drivers/gpu/drm/amd/amdgpu/atom.c while (n--) n 216 drivers/gpu/drm/amd/amdgpu/cikd.h #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ n 218 drivers/gpu/drm/amd/amdgpu/cikd.h ((n) & 0x3FFF) << 16) n 225 drivers/gpu/drm/amd/amdgpu/cikd.h #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ n 227 drivers/gpu/drm/amd/amdgpu/cikd.h ((n) & 0x3FFF) << 16) n 229 drivers/gpu/drm/amd/amdgpu/cikd.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 141 drivers/gpu/drm/amd/amdgpu/kv_dpm.c u32 n = 0; n 146 drivers/gpu/drm/amd/amdgpu/kv_dpm.c sclk_voltage_mapping_table->entries[n].sclk_frequency = n 148 drivers/gpu/drm/amd/amdgpu/kv_dpm.c sclk_voltage_mapping_table->entries[n].vid_2bit = n 151 drivers/gpu/drm/amd/amdgpu/kv_dpm.c n++; n 155 drivers/gpu/drm/amd/amdgpu/kv_dpm.c sclk_voltage_mapping_table->num_max_dpm_entries = n; n 39 drivers/gpu/drm/amd/amdgpu/nvd.h #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ n 41 drivers/gpu/drm/amd/amdgpu/nvd.h ((n) & 0x3FFF) << 16) n 48 drivers/gpu/drm/amd/amdgpu/nvd.h #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ n 50 drivers/gpu/drm/amd/amdgpu/nvd.h ((n) & 0x3FFF) << 16) n 52 drivers/gpu/drm/amd/amdgpu/nvd.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 2351 drivers/gpu/drm/amd/amdgpu/si_dpm.c u64 pwr_efficiency_ratio, n, d; n 2356 drivers/gpu/drm/amd/amdgpu/si_dpm.c n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); n 2358 drivers/gpu/drm/amd/amdgpu/si_dpm.c pwr_efficiency_ratio = div64_u64(n, d); n 169 drivers/gpu/drm/amd/amdgpu/si_enums.h #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ n 171 drivers/gpu/drm/amd/amdgpu/si_enums.h ((n) & 0x3FFF) << 16) n 172 drivers/gpu/drm/amd/amdgpu/si_enums.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 1649 drivers/gpu/drm/amd/amdgpu/sid.h #define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ n 1651 drivers/gpu/drm/amd/amdgpu/sid.h ((n) & 0x3FFF) << 16) n 1658 drivers/gpu/drm/amd/amdgpu/sid.h #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ n 1660 drivers/gpu/drm/amd/amdgpu/sid.h ((n) & 0x3FFF) << 16) n 1662 drivers/gpu/drm/amd/amdgpu/sid.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 1917 drivers/gpu/drm/amd/amdgpu/sid.h #define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ n 1921 drivers/gpu/drm/amd/amdgpu/sid.h (((n) & 0xFFFFF) << 0)) n 1923 drivers/gpu/drm/amd/amdgpu/sid.h #define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \ n 1925 drivers/gpu/drm/amd/amdgpu/sid.h (((n) & 0xFFFFF) << 0)) n 1927 drivers/gpu/drm/amd/amdgpu/sid.h #define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \ n 1930 drivers/gpu/drm/amd/amdgpu/sid.h (((n) & 0xFFFFF) << 0)) n 41 drivers/gpu/drm/amd/amdgpu/soc15d.h #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ n 43 drivers/gpu/drm/amd/amdgpu/soc15d.h ((n) & 0x3FFF) << 16) n 50 drivers/gpu/drm/amd/amdgpu/soc15d.h #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ n 52 drivers/gpu/drm/amd/amdgpu/soc15d.h ((n) & 0x3FFF) << 16) n 54 drivers/gpu/drm/amd/amdgpu/soc15d.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 98 drivers/gpu/drm/amd/amdgpu/vid.h #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ n 100 drivers/gpu/drm/amd/amdgpu/vid.h ((n) & 0x3FFF) << 16) n 107 drivers/gpu/drm/amd/amdgpu/vid.h #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ n 109 drivers/gpu/drm/amd/amdgpu/vid.h ((n) & 0x3FFF) << 16) n 111 drivers/gpu/drm/amd/amdgpu/vid.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 769 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct device_process_node *n; n 774 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c n = kzalloc(sizeof(*n), GFP_KERNEL); n 775 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (!n) n 778 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c n->qpd = qpd; n 785 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_add(&n->list, &dqm->queues); n 4930 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c int n; n 4949 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c n = ARRAY_SIZE(common_modes); n 4951 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c for (i = 0; i < n; i++) { n 358 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c int n; n 376 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c n = vsnprintf(NULL, 0, msg, args); n 379 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c if (n <= 0) n 383 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c total = log_ctx->pos + n + 1; n 402 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c n = vscnprintf( n 409 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c if (n > 0) n 410 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c log_ctx->pos += n; n 230 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c int n = 27; n 255 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c n * (n - 1))); n 257 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c n -= 2; n 258 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c } while (n > 2); n 283 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c int n = 26; n 292 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c n * (n - 1))); n 294 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c n -= 2; n 295 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c } while (n != 0); n 309 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c unsigned int n = 9; n 312 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c n + 2, n 313 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c n + 1); n 325 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c n)); n 326 drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c while (--n != 1); n 58 drivers/gpu/drm/amd/display/dc/dc_helper.c uint32_t addr, int n, n 69 drivers/gpu/drm/amd/display/dc/dc_helper.c while (i < n) { n 81 drivers/gpu/drm/amd/display/dc/dc_helper.c uint32_t addr, int n, n 91 drivers/gpu/drm/amd/display/dc/dc_helper.c set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, n 104 drivers/gpu/drm/amd/display/dc/dc_helper.c uint32_t addr, uint32_t reg_val, int n, n 113 drivers/gpu/drm/amd/display/dc/dc_helper.c set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, n 358 drivers/gpu/drm/amd/display/dc/dc_helper.c uint32_t index, uint32_t reg_val, int n, n 371 drivers/gpu/drm/amd/display/dc/dc_helper.c while (i < n) { n 42 drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c #define CRTC_REG_UPDATE_N(reg_name, n, ...) \ n 43 drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c generic_reg_update_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__) n 45 drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c #define CRTC_REG_SET_N(reg_name, n, ...) \ n 46 drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c generic_reg_set_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__) n 1343 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c #define HPD_REG_UPDATE_N(reg_name, n, ...) \ n 1346 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c n, __VA_ARGS__) n 1375 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c #define AUX_REG_UPDATE_N(reg_name, n, ...) \ n 1378 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c n, __VA_ARGS__) n 135 drivers/gpu/drm/amd/display/dc/dm_services.h uint32_t addr, uint32_t reg_val, int n, n 139 drivers/gpu/drm/amd/display/dc/dm_services.h uint32_t addr, int n, n 165 drivers/gpu/drm/amd/display/dc/dm_services.h #define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\ n 167 drivers/gpu/drm/amd/display/dc/dm_services.h n, __VA_ARGS__) n 169 drivers/gpu/drm/amd/display/dc/dm_services.h #define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\ n 171 drivers/gpu/drm/amd/display/dc/dm_services.h n, __VA_ARGS__) n 54 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h #define REG_SET_N(reg_name, n, initial_val, ...) \ n 58 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h n, __VA_ARGS__) n 225 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h #define REG_UPDATE_N(reg_name, n, ...) \ n 228 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h n, __VA_ARGS__) n 446 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h #define IX_REG_SET_N(index_reg_name, data_reg_name, index, n, initial_val, ...) \ n 450 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h n, __VA_ARGS__) n 463 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h #define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...) \ n 467 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h n, __VA_ARGS__) n 484 drivers/gpu/drm/amd/display/dc/inc/reg_helper.h uint32_t index, uint32_t reg_val, int n, n 272 drivers/gpu/drm/amd/include/kgd_pp_interface.h int (*set_power_limit)(void *handle, uint32_t n); n 1091 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) n 1098 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c PPSMC_MSG_PkgPwrSetLimit, n<<8); n 57 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); n 1324 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) n 1330 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c PPSMC_MSG_SetPptLimit, n); n 74 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); n 32 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) n 39 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c PPSMC_MSG_SetPptLimit, n); n 26 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); n 499 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h int (*set_power_limit)(struct smu_context *smu, uint32_t n); n 343 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); n 58 drivers/gpu/drm/amd/powerplay/inc/pp_debug.h #define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n) \ n 59 drivers/gpu/drm/amd/powerplay/inc/pp_debug.h (type *)((char *)&(ptr)->member + (sizeof(type) * (n))) n 1070 drivers/gpu/drm/amd/powerplay/smu_v11_0.c static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) n 1074 drivers/gpu/drm/amd/powerplay/smu_v11_0.c if (n > smu->default_power_limit) { n 1080 drivers/gpu/drm/amd/powerplay/smu_v11_0.c if (n == 0) n 1081 drivers/gpu/drm/amd/powerplay/smu_v11_0.c n = smu->default_power_limit; n 1088 drivers/gpu/drm/amd/powerplay/smu_v11_0.c ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); n 1093 drivers/gpu/drm/amd/powerplay/smu_v11_0.c smu->power_limit = n; n 120 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c int i, j, n = 0; n 135 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c for (j = n - 1; j >= 0; j--) n 140 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c fmts[n++] = cap->fourcc; n 144 drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c *n_fmts = n; n 192 drivers/gpu/drm/arm/malidp_mw.c int n, i; n 199 drivers/gpu/drm/arm/malidp_mw.c for (n = 0, i = 0; i < map->n_pixel_formats; i++) { n 201 drivers/gpu/drm/arm/malidp_mw.c formats[n++] = map->pixel_formats[i].format; n 204 drivers/gpu/drm/arm/malidp_mw.c *n_formats = n; n 938 drivers/gpu/drm/arm/malidp_planes.c int ret, i = 0, j = 0, n; n 975 drivers/gpu/drm/arm/malidp_planes.c for (n = 0, j = 0; j < map->n_pixel_formats; j++) { n 977 drivers/gpu/drm/arm/malidp_planes.c formats[n++] = map->pixel_formats[j].format; n 987 drivers/gpu/drm/arm/malidp_planes.c &malidp_de_plane_funcs, formats, n, n 67 drivers/gpu/drm/armada/armada_drv.c int ret, n; n 69 drivers/gpu/drm/armada/armada_drv.c for (n = 0; ; n++) { n 71 drivers/gpu/drm/armada/armada_drv.c IORESOURCE_MEM, n); n 464 drivers/gpu/drm/armada/armada_gem.c static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n) n 470 drivers/gpu/drm/armada/armada_gem.c armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr) n 20 drivers/gpu/drm/armada/armada_plane.h #define armada_pitch(state, n) to_armada_plane_state(state)->pitches[n] n 18 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c unsigned int *cts, unsigned int *n) n 22 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c *n = 4096; n 25 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c *n = 6272; n 28 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c *n = 6144; n 32 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c *cts = ((f_tmds * *n) / (128 * fs)) * 1000; n 38 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c unsigned int n = 0; n 40 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n); n 42 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf); n 43 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff); n 44 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff); n 65 drivers/gpu/drm/bridge/sii902x.c #define SII902X_REG_CHIPID(n) (0x1b + (n)) n 268 drivers/gpu/drm/bridge/sil-sii8620.c struct sii8620_mt_msg *msg, *n; n 270 drivers/gpu/drm/bridge/sil-sii8620.c list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) { n 553 drivers/gpu/drm/bridge/sil-sii8620.h #define VAL_RX_HDMI_CTRL2_IDLE_CNT(n) ((n) << 4) n 513 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c unsigned int n) n 532 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c hdmi_writeb(hdmi, (n >> 16) & 0x0f, HDMI_AUD_N3); n 533 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c hdmi_writeb(hdmi, (n >> 8) & 0xff, HDMI_AUD_N2); n 534 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1); n 539 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c unsigned int n = (128 * freq) / 1000; n 550 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 4576; n 552 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 4096; n 554 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 11648; n 556 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 4096; n 557 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n *= mult; n 562 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 7007; n 564 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 17836; n 566 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 8918; n 568 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 6272; n 569 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n *= mult; n 574 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 6864; n 576 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 6144; n 578 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 11648; n 580 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 5824; n 582 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = 6144; n 583 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n *= mult; n 590 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c return n; n 597 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c unsigned int n, cts; n 601 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n = hdmi_compute_n(sample_rate, pixel_clk); n 614 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c tmp = (u64)ftdms * n; n 621 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c n, cts); n 627 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c hdmi->audio_n = n; n 629 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0); n 192 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define N_LANES(n) (((n) - 1) & 0x3) n 210 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define PHY_TESTDOUT(n) (((n) & 0xff) << 8) n 211 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define PHY_TESTDIN(n) ((n) & 0xff) n 430 drivers/gpu/drm/drm_blend.c int i, n = 0; n 451 drivers/gpu/drm/drm_blend.c states[n++] = plane_state; n 457 drivers/gpu/drm/drm_blend.c sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL); n 459 drivers/gpu/drm/drm_blend.c for (i = 0; i < n; i++) { n 433 drivers/gpu/drm/drm_client_modeset.c int n, int width, int height) n 442 drivers/gpu/drm/drm_client_modeset.c if (n == connector_count) n 445 drivers/gpu/drm/drm_client_modeset.c connector = connectors[n]; n 447 drivers/gpu/drm/drm_client_modeset.c best_crtcs[n] = NULL; n 449 drivers/gpu/drm/drm_client_modeset.c best_crtcs, modes, n + 1, width, height); n 450 drivers/gpu/drm/drm_client_modeset.c if (modes[n] == NULL) n 475 drivers/gpu/drm/drm_client_modeset.c for (o = 0; o < n; o++) n 479 drivers/gpu/drm/drm_client_modeset.c if (o < n) { n 484 drivers/gpu/drm/drm_client_modeset.c if (!drm_mode_equal(modes[o], modes[n])) n 488 drivers/gpu/drm/drm_client_modeset.c crtcs[n] = crtc; n 489 drivers/gpu/drm/drm_client_modeset.c memcpy(crtcs, best_crtcs, n * sizeof(*crtcs)); n 491 drivers/gpu/drm/drm_client_modeset.c crtcs, modes, n + 1, width, height); n 164 drivers/gpu/drm/drm_connector.c struct drm_connector *connector, *n; n 175 drivers/gpu/drm/drm_connector.c llist_for_each_entry_safe(connector, n, freed, free_node) { n 510 drivers/gpu/drm/drm_context.c struct drm_ctx_list *pos, *n; n 512 drivers/gpu/drm/drm_context.c list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { n 1965 drivers/gpu/drm/drm_edid.c int i, n = 0; n 1969 drivers/gpu/drm/drm_edid.c n = (127 - d) / 18; n 1970 drivers/gpu/drm/drm_edid.c for (i = 0; i < n; i++) n 1977 drivers/gpu/drm/drm_edid.c unsigned int i, n = min((int)ext[0x02], 6); n 1983 drivers/gpu/drm/drm_edid.c for (i = 0; i < n; i++) n 913 drivers/gpu/drm/drm_ioc32.c #define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n} n 110 drivers/gpu/drm/drm_mm.c unsigned int n; n 112 drivers/gpu/drm/drm_mm.c n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); n 115 drivers/gpu/drm/drm_mm.c node->stack = stack_depot_save(entries, n, GFP_NOWAIT); n 163 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c unsigned int len, n, off; n 168 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT); n 169 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c len = ALIGN(1 + n, 2); n 175 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c off, n)) n 180 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT); n 181 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c if (n == 0) n 182 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c n = 256; n 183 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c len = 2 + n * 2; n 470 drivers/gpu/drm/etnaviv/etnaviv_drv.c #define ETNA_IOCTL(n, func, flags) \ n 471 drivers/gpu/drm/etnaviv/etnaviv_drv.c DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags) n 149 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct etnaviv_vram_mapping *m, *n; n 185 drivers/gpu/drm/etnaviv/etnaviv_mmu.c list_for_each_entry_safe(m, n, &list, scan_node) n 196 drivers/gpu/drm/etnaviv/etnaviv_mmu.c list_for_each_entry_safe(m, n, &list, scan_node) n 205 drivers/gpu/drm/etnaviv/etnaviv_mmu.c list_for_each_entry_safe(m, n, &list, scan_node) { n 556 drivers/gpu/drm/exynos/exynos_drm_g2d.c struct g2d_cmdlist_userptr *g2d_userptr, *n; n 558 drivers/gpu/drm/exynos/exynos_drm_g2d.c list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list) n 856 drivers/gpu/drm/exynos/exynos_drm_g2d.c struct g2d_runqueue_node *node, *n; n 861 drivers/gpu/drm/exynos/exynos_drm_g2d.c list_for_each_entry_safe(node, n, &g2d->runqueue, list) { n 1361 drivers/gpu/drm/exynos/exynos_drm_g2d.c struct g2d_cmdlist_node *node, *n; n 1387 drivers/gpu/drm/exynos/exynos_drm_g2d.c list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) { n 1034 drivers/gpu/drm/exynos/exynos_hdmi.c u32 n, cts; n 1037 drivers/gpu/drm/exynos/exynos_hdmi.c n = 128 * freq / (27000000 / cts); n 1039 drivers/gpu/drm/exynos/exynos_hdmi.c hdmi_reg_writev(hdata, HDMI_ACR_N0, 3, n); n 12 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_WINCONx(n) (0x0020 + ((n) * 4)) n 13 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4)) n 15 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20)) n 16 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20)) n 17 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20)) n 18 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20)) n 19 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20)) n 20 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10)) n 21 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10)) n 22 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10)) n 23 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10)) n 24 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10)) n 25 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10)) n 26 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4)) n 27 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4)) n 30 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8)) n 31 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8)) n 32 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4)) n 33 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_WINxMAP(n) (0x0270 + ((n) * 4)) n 37 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4)) n 39 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4)) n 40 drivers/gpu/drm/exynos/regs-decon5433.h #define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4)) n 121 drivers/gpu/drm/exynos/regs-decon5433.h #define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n))) n 127 drivers/gpu/drm/exynos/regs-decon5433.h #define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16) n 128 drivers/gpu/drm/exynos/regs-decon5433.h #define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8) n 129 drivers/gpu/drm/exynos/regs-decon5433.h #define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0) n 220 drivers/gpu/drm/exynos/regs-decon5433.h #define BLENDERQ_Q_FUNC_F(n) (n << 18) n 221 drivers/gpu/drm/exynos/regs-decon5433.h #define BLENDERQ_P_FUNC_F(n) (n << 12) n 222 drivers/gpu/drm/exynos/regs-decon5433.h #define BLENDERQ_B_FUNC_F(n) (n << 6) n 223 drivers/gpu/drm/exynos/regs-decon5433.h #define BLENDERQ_A_FUNC_F(n) (n << 0) n 201 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4) n 203 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4) n 208 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4) n 210 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4) n 215 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4) n 217 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4) n 228 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4) n 233 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4) n 238 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4) n 247 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300) n 250 drivers/gpu/drm/exynos/regs-gsc.h #define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300) n 73 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_V13_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n)) n 300 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_ACP_DATA(n) HDMI_CORE_BASE(0x0520 + 4 * (n)) n 304 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_ISRC1_DATA(n) HDMI_CORE_BASE(0x0620 + 4 * (n)) n 305 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_ISRC2_DATA(n) HDMI_CORE_BASE(0x06A0 + 4 * (n)) n 312 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1)) n 319 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1)) n 323 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_MPG_DATA(n) HDMI_CORE_BASE(0x0920 + 4 * (n)) n 329 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_SPD_DATA(n) HDMI_CORE_BASE(0x0A20 + 4 * (n)) n 335 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_GAMUT_METADATA(n) HDMI_CORE_BASE(0x0B20 + 4 * (n)) n 341 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_VSI_DATA(n) HDMI_CORE_BASE(0x0C20 + 4 * (n)) n 368 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) n 369 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) n 376 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_HDCP_BKSV(n) HDMI_CORE_BASE(0x70A0 + 4 * (n)) n 377 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_HDCP_AKSV(n) HDMI_CORE_BASE(0x70C0 + 4 * (n)) n 378 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_HDCP_AN(n) HDMI_CORE_BASE(0x70E0 + 4 * (n)) n 421 drivers/gpu/drm/exynos/regs-hdmi.h #define HDMI_I2S_CH_ST(n) HDMI_I2S_BASE(0x028 + 4 * (n)) n 40 drivers/gpu/drm/exynos/regs-rotator.h #define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2)) n 41 drivers/gpu/drm/exynos/regs-rotator.h #define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2)) n 38 drivers/gpu/drm/gma500/cdv_intel_display.c .n = {.min = 2, .max = 6}, n 50 drivers/gpu/drm/gma500/cdv_intel_display.c .n = {.min = 2, .max = 6}, n 65 drivers/gpu/drm/gma500/cdv_intel_display.c .n = {.min = 1, .max = 1}, n 77 drivers/gpu/drm/gma500/cdv_intel_display.c .n = {.min = 2, .max = 6}, n 89 drivers/gpu/drm/gma500/cdv_intel_display.c .n = {.min = 1, .max = 1}, n 101 drivers/gpu/drm/gma500/cdv_intel_display.c .n = {.min = 2, .max = 6}, n 287 drivers/gpu/drm/gma500/cdv_intel_display.c n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT); n 396 drivers/gpu/drm/gma500/cdv_intel_display.c clock->vco = (refclk * clock->m) / clock->n; n 413 drivers/gpu/drm/gma500/cdv_intel_display.c clock.n = 1; n 419 drivers/gpu/drm/gma500/cdv_intel_display.c clock.n = 1; n 429 drivers/gpu/drm/gma500/cdv_intel_display.c clock.n = 5; n 435 drivers/gpu/drm/gma500/cdv_intel_display.c clock.n = 5; n 834 drivers/gpu/drm/gma500/cdv_intel_display.c clock->vco = refclk * clock->m / (clock->n + 2); n 873 drivers/gpu/drm/gma500/cdv_intel_display.c clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; n 685 drivers/gpu/drm/gma500/gma_display.c if (clock->n < limit->n.min || limit->n.max < clock->n) n 736 drivers/gpu/drm/gma500/gma_display.c for (clock.n = limit->n.min; n 737 drivers/gpu/drm/gma500/gma_display.c clock.n <= limit->n.max; clock.n++) { n 20 drivers/gpu/drm/gma500/gma_display.h int n; n 40 drivers/gpu/drm/gma500/gma_display.h struct gma_range_t dot, vco, n, m, m1, m2, p, p1; n 65 drivers/gpu/drm/gma500/oaktrail_crtc.c .n = {.min = 3, .max = 7}, n 119 drivers/gpu/drm/gma500/oaktrail_crtc.c clock->dot, clock->m, clock->m1, clock->m2, clock->n, n 135 drivers/gpu/drm/gma500/oaktrail_crtc.c for (clock.n = limit->n.min; clock.n <= limit->n.max; n 136 drivers/gpu/drm/gma500/oaktrail_crtc.c clock.n++) { n 151 drivers/gpu/drm/gma500/oaktrail_crtc.c (clock.n * clock.p); n 510 drivers/gpu/drm/gma500/oaktrail_crtc.c clock.n = (1L << (clock.n - 1)); n 519 drivers/gpu/drm/gma500/oaktrail_crtc.c fp = clock.n << 16 | clock.m; n 28 drivers/gpu/drm/gma500/psb_intel_display.c .n = {.min = 1, .max = 6}, n 40 drivers/gpu/drm/gma500/psb_intel_display.c .n = {.min = 1, .max = 6}, n 70 drivers/gpu/drm/gma500/psb_intel_display.c clock->vco = refclk * clock->m / (clock->n + 2); n 150 drivers/gpu/drm/gma500/psb_intel_display.c fp = clock.n << 16 | clock.m1 << 8 | clock.m2; n 332 drivers/gpu/drm/gma500/psb_intel_display.c clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; n 230 drivers/gpu/drm/i2c/ch7006_drv.c int n = 0; n 240 drivers/gpu/drm/i2c/ch7006_drv.c n++; n 243 drivers/gpu/drm/i2c/ch7006_drv.c return n; n 261 drivers/gpu/drm/i2c/ch7006_mode.c int n, best_n = 0; n 265 drivers/gpu/drm/i2c/ch7006_mode.c for (n = 0; n < CH7006_MAXN; n++) { n 267 drivers/gpu/drm/i2c/ch7006_mode.c freq = CH7006_FREQ0*(n+2)/(m+2); n 272 drivers/gpu/drm/i2c/ch7006_mode.c best_n = n; n 1005 drivers/gpu/drm/i2c/tda998x_drv.c u32 n; n 1028 drivers/gpu/drm/i2c/tda998x_drv.c n = 128 * settings->sample_rate / 1000; n 1034 drivers/gpu/drm/i2c/tda998x_drv.c buf[3] = n; n 1035 drivers/gpu/drm/i2c/tda998x_drv.c buf[4] = n >> 8; n 1036 drivers/gpu/drm/i2c/tda998x_drv.c buf[5] = n >> 16; n 1280 drivers/gpu/drm/i2c/tda998x_drv.c int n; n 1307 drivers/gpu/drm/i2c/tda998x_drv.c n = drm_add_edid_modes(connector, edid); n 1313 drivers/gpu/drm/i2c/tda998x_drv.c return n; n 241 drivers/gpu/drm/i810/i810_dma.c static int i810_wait_ring(struct drm_device *dev, int n) n 250 drivers/gpu/drm/i810/i810_dma.c while (ring->space < n) { n 263 drivers/gpu/drm/i810/i810_dma.c DRM_ERROR("space: %d wanted %d\n", ring->space, n); n 148 drivers/gpu/drm/i810/i810_drv.h #define BEGIN_LP_RING(n) do { \ n 150 drivers/gpu/drm/i810/i810_drv.h DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \ n 151 drivers/gpu/drm/i810/i810_drv.h if (dev_priv->ring.space < n*4) \ n 152 drivers/gpu/drm/i810/i810_drv.h i810_wait_ring(dev, n*4); \ n 153 drivers/gpu/drm/i810/i810_drv.h dev_priv->ring.space -= n*4; \ n 166 drivers/gpu/drm/i810/i810_drv.h #define OUT_RING(n) do { \ n 168 drivers/gpu/drm/i810/i810_drv.h DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ n 169 drivers/gpu/drm/i810/i810_drv.h *(volatile unsigned int *)(virt + outring) = n; \ n 72 drivers/gpu/drm/i915/display/intel_audio.c u16 n; n 78 drivers/gpu/drm/i915/display/intel_audio.c int n; n 277 drivers/gpu/drm/i915/display/intel_audio.c return hdmi_ncts_table[i].n; n 386 drivers/gpu/drm/i915/display/intel_audio.c DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n); n 398 drivers/gpu/drm/i915/display/intel_audio.c tmp |= AUD_CONFIG_N(nm->n); n 426 drivers/gpu/drm/i915/display/intel_audio.c int n, rate; n 437 drivers/gpu/drm/i915/display/intel_audio.c n = audio_config_hdmi_get_n(crtc_state, rate); n 438 drivers/gpu/drm/i915/display/intel_audio.c if (n != 0) { n 439 drivers/gpu/drm/i915/display/intel_audio.c DRM_DEBUG_KMS("using N %d\n", n); n 442 drivers/gpu/drm/i915/display/intel_audio.c tmp |= AUD_CONFIG_N(n); n 1232 drivers/gpu/drm/i915/display/intel_ddi.c int n, p, r; n 1269 drivers/gpu/drm/i915/display/intel_ddi.c n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; n 1272 drivers/gpu/drm/i915/display/intel_ddi.c return (refclk * n * 100) / (p * r); n 1662 drivers/gpu/drm/i915/display/intel_ddi.c clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; n 152 drivers/gpu/drm/i915/display/intel_display.c } dot, vco, n, m, m1, m2, p, p1; n 229 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 2, .max = 16 }, n 242 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 2, .max = 16 }, n 255 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 2, .max = 16 }, n 268 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 6 }, n 281 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 6 }, n 295 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 4 }, n 310 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 4 }, n 323 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 3 }, n 337 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 3 }, n 352 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 3, .max = 6 }, n 366 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 3, .max = 6 }, n 384 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 5 }, n 397 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 3 }, n 410 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 3 }, n 424 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 2 }, n 437 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 3 }, n 456 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 7 }, n 472 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 1 }, n 483 drivers/gpu/drm/i915/display/intel_display.c .n = { .min = 1, .max = 1 }, n 537 drivers/gpu/drm/i915/display/intel_display.c if (WARN_ON(clock->n == 0 || clock->p == 0)) n 539 drivers/gpu/drm/i915/display/intel_display.c clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); n 554 drivers/gpu/drm/i915/display/intel_display.c if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) n 556 drivers/gpu/drm/i915/display/intel_display.c clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); n 566 drivers/gpu/drm/i915/display/intel_display.c if (WARN_ON(clock->n == 0 || clock->p == 0)) n 568 drivers/gpu/drm/i915/display/intel_display.c clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); n 578 drivers/gpu/drm/i915/display/intel_display.c if (WARN_ON(clock->n == 0 || clock->p == 0)) n 581 drivers/gpu/drm/i915/display/intel_display.c clock->n << 22); n 597 drivers/gpu/drm/i915/display/intel_display.c if (clock->n < limit->n.min || limit->n.max < clock->n) n 685 drivers/gpu/drm/i915/display/intel_display.c for (clock.n = limit->n.min; n 686 drivers/gpu/drm/i915/display/intel_display.c clock.n <= limit->n.max; clock.n++) { n 741 drivers/gpu/drm/i915/display/intel_display.c for (clock.n = limit->n.min; n 742 drivers/gpu/drm/i915/display/intel_display.c clock.n <= limit->n.max; clock.n++) { n 796 drivers/gpu/drm/i915/display/intel_display.c max_n = limit->n.max; n 798 drivers/gpu/drm/i915/display/intel_display.c for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { n 818 drivers/gpu/drm/i915/display/intel_display.c max_n = clock.n; n 884 drivers/gpu/drm/i915/display/intel_display.c int max_n = min(limit->n.max, refclk / 19200); n 892 drivers/gpu/drm/i915/display/intel_display.c for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { n 901 drivers/gpu/drm/i915/display/intel_display.c clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, n 954 drivers/gpu/drm/i915/display/intel_display.c clock.n = 1, clock.m1 = 2; n 965 drivers/gpu/drm/i915/display/intel_display.c m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, n 7503 drivers/gpu/drm/i915/display/intel_display.c static void compute_m_n(unsigned int m, unsigned int n, n 7517 drivers/gpu/drm/i915/display/intel_display.c *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); n 7519 drivers/gpu/drm/i915/display/intel_display.c *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); n 7555 drivers/gpu/drm/i915/display/intel_display.c return (1 << dpll->n) << 16 | dpll->m2; n 7560 drivers/gpu/drm/i915/display/intel_display.c return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; n 7758 drivers/gpu/drm/i915/display/intel_display.c bestn = pipe_config->dpll.n; n 7857 drivers/gpu/drm/i915/display/intel_display.c bestn = pipe_config->dpll.n; n 8581 drivers/gpu/drm/i915/display/intel_display.c clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; n 8698 drivers/gpu/drm/i915/display/intel_display.c clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; n 9528 drivers/gpu/drm/i915/display/intel_display.c return i9xx_dpll_compute_m(dpll) < factor * dpll->n; n 9559 drivers/gpu/drm/i915/display/intel_display.c if (reduced_clock->m < factor * reduced_clock->n) n 11303 drivers/gpu/drm/i915/display/intel_display.c clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; n 11306 drivers/gpu/drm/i915/display/intel_display.c clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; n 12429 drivers/gpu/drm/i915/display/intel_display.c intel_compare_m_n(unsigned int m, unsigned int n, n 12433 drivers/gpu/drm/i915/display/intel_display.c if (m == m2 && n == n2) n 12436 drivers/gpu/drm/i915/display/intel_display.c if (exact || !m || !n || !m2 || !n2) n 12441 drivers/gpu/drm/i915/display/intel_display.c if (n > n2) { n 12442 drivers/gpu/drm/i915/display/intel_display.c while (n > n2) { n 12446 drivers/gpu/drm/i915/display/intel_display.c } else if (n < n2) { n 12447 drivers/gpu/drm/i915/display/intel_display.c while (n < n2) { n 12449 drivers/gpu/drm/i915/display/intel_display.c n <<= 1; n 12453 drivers/gpu/drm/i915/display/intel_display.c if (n != n2) n 16294 drivers/gpu/drm/i915/display/intel_display.c .n = 2, n 439 drivers/gpu/drm/i915/display/intel_display_types.h int n; n 97 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, n 99 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } n 104 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, n 106 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } n 111 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, n 113 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } n 127 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, n 129 drivers/gpu/drm/i915/display/intel_dp.c { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, n 1732 drivers/gpu/drm/i915/display/intel_dpll_mgr.c u32 n; n 1770 drivers/gpu/drm/i915/display/intel_dpll_mgr.c clk_div->n = best_clock.n; n 1842 drivers/gpu/drm/i915/display/intel_dpll_mgr.c dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n); n 1254 drivers/gpu/drm/i915/display/intel_sdvo.c clock->n = 3; n 1260 drivers/gpu/drm/i915/display/intel_sdvo.c clock->n = 6; n 61 drivers/gpu/drm/i915/display/vlv_dsi_pll.c unsigned int m, n, p; n 73 drivers/gpu/drm/i915/display/vlv_dsi_pll.c n = 4; n 78 drivers/gpu/drm/i915/display/vlv_dsi_pll.c n = 1; n 85 drivers/gpu/drm/i915/display/vlv_dsi_pll.c delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n)); n 93 drivers/gpu/drm/i915/display/vlv_dsi_pll.c int calc_dsi_clk = (m * ref_clk) / (p * n); n 106 drivers/gpu/drm/i915/display/vlv_dsi_pll.c (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT | n 262 drivers/gpu/drm/i915/display/vlv_dsi_pll.c u32 m = 0, p = 0, n; n 281 drivers/gpu/drm/i915/display/vlv_dsi_pll.c n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT; n 282 drivers/gpu/drm/i915/display/vlv_dsi_pll.c n = 1 << n; /* register has log2(N1) */ n 311 drivers/gpu/drm/i915/display/vlv_dsi_pll.c dsi_clock = (m * refclk) / (p * n); n 1372 drivers/gpu/drm/i915/gem/i915_gem_context.c unsigned int n; n 1416 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < num_siblings; n++) { n 1419 drivers/gpu/drm/i915/gem/i915_gem_context.c if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { n 1424 drivers/gpu/drm/i915/gem/i915_gem_context.c siblings[n] = intel_engine_lookup_user(set->ctx->i915, n 1427 drivers/gpu/drm/i915/gem/i915_gem_context.c if (!siblings[n]) { n 1429 drivers/gpu/drm/i915/gem/i915_gem_context.c n, ci.engine_class, ci.engine_instance); n 1435 drivers/gpu/drm/i915/gem/i915_gem_context.c ce = intel_execlists_create_virtual(set->ctx, siblings, n); n 1464 drivers/gpu/drm/i915/gem/i915_gem_context.c int err, n; n 1486 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { n 1487 drivers/gpu/drm/i915/gem/i915_gem_context.c err = check_user_mbz(&ext->mbz64[n]); n 1506 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < num_bonds; n++) { n 1509 drivers/gpu/drm/i915/gem/i915_gem_context.c if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) n 1517 drivers/gpu/drm/i915/gem/i915_gem_context.c n, ci.engine_class, ci.engine_instance); n 1549 drivers/gpu/drm/i915/gem/i915_gem_context.c unsigned int num_engines, n; n 1584 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < num_engines; n++) { n 1589 drivers/gpu/drm/i915/gem/i915_gem_context.c if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { n 1590 drivers/gpu/drm/i915/gem/i915_gem_context.c __free_engines(set.engines, n); n 1596 drivers/gpu/drm/i915/gem/i915_gem_context.c set.engines->engines[n] = NULL; n 1605 drivers/gpu/drm/i915/gem/i915_gem_context.c n, ci.engine_class, ci.engine_instance); n 1606 drivers/gpu/drm/i915/gem/i915_gem_context.c __free_engines(set.engines, n); n 1612 drivers/gpu/drm/i915/gem/i915_gem_context.c __free_engines(set.engines, n); n 1616 drivers/gpu/drm/i915/gem/i915_gem_context.c set.engines->engines[n] = ce; n 1649 drivers/gpu/drm/i915/gem/i915_gem_context.c unsigned int n; n 1656 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < e->num_engines; n++) { n 1657 drivers/gpu/drm/i915/gem/i915_gem_context.c if (e->engines[n]) n 1658 drivers/gpu/drm/i915/gem/i915_gem_context.c copy->engines[n] = intel_context_get(e->engines[n]); n 1660 drivers/gpu/drm/i915/gem/i915_gem_context.c copy->engines[n] = NULL; n 1662 drivers/gpu/drm/i915/gem/i915_gem_context.c copy->num_engines = n; n 1673 drivers/gpu/drm/i915/gem/i915_gem_context.c size_t n, count, size; n 1722 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < count; n++) { n 1728 drivers/gpu/drm/i915/gem/i915_gem_context.c if (e->engines[n]) { n 1729 drivers/gpu/drm/i915/gem/i915_gem_context.c ci.engine_class = e->engines[n]->engine->uabi_class; n 1730 drivers/gpu/drm/i915/gem/i915_gem_context.c ci.engine_instance = e->engines[n]->engine->uabi_instance; n 1733 drivers/gpu/drm/i915/gem/i915_gem_context.c if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { n 1857 drivers/gpu/drm/i915/gem/i915_gem_context.c unsigned long n; n 1864 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < e->num_engines; n++) { n 1867 drivers/gpu/drm/i915/gem/i915_gem_context.c if (!e->engines[n]) { n 1868 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n] = NULL; n 1871 drivers/gpu/drm/i915/gem/i915_gem_context.c engine = e->engines[n]->engine; n 1883 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n] = n 1886 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n] = intel_context_create(dst, engine); n 1887 drivers/gpu/drm/i915/gem/i915_gem_context.c if (IS_ERR_OR_NULL(clone->engines[n])) { n 1888 drivers/gpu/drm/i915/gem/i915_gem_context.c __free_engines(clone, n); n 1892 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->num_engines = n; n 1929 drivers/gpu/drm/i915/gem/i915_gem_context.c unsigned long n; n 1938 drivers/gpu/drm/i915/gem/i915_gem_context.c for (n = 0; n < e->num_engines; n++) { n 1939 drivers/gpu/drm/i915/gem/i915_gem_context.c struct intel_context *ce = e->engines[n]; n 1941 drivers/gpu/drm/i915/gem/i915_gem_context.c if (clone->engines[n]->engine->class != ce->engine->class) { n 1952 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n]->sseu = ce->sseu; n 2348 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c __free_fence_array(struct drm_syncobj **fences, unsigned int n) n 2350 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c while (n--) n 2351 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c drm_syncobj_put(ptr_mask_bits(fences[n], 2)); n 2362 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned long n; n 2384 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c for (n = 0; n < nfences; n++) { n 2408 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); n 2414 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c __free_fence_array(fences, n); n 2431 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned int n; n 2434 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c for (n = 0; n < nfences; n++) { n 2439 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c syncobj = ptr_unpack_bits(fences[n], &flags, 2); n 2462 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned int n; n 2464 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c for (n = 0; n < nfences; n++) { n 2468 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c syncobj = ptr_unpack_bits(fences[n], &flags, 2); n 216 drivers/gpu/drm/i915/gem/i915_gem_object.h unsigned int n, unsigned int *offset); n 220 drivers/gpu/drm/i915/gem/i915_gem_object.h unsigned int n); n 224 drivers/gpu/drm/i915/gem/i915_gem_object.h unsigned int n); n 228 drivers/gpu/drm/i915/gem/i915_gem_object.h unsigned long n, n 233 drivers/gpu/drm/i915/gem/i915_gem_object.h unsigned long n); n 375 drivers/gpu/drm/i915/gem/i915_gem_pages.c unsigned int n, n 383 drivers/gpu/drm/i915/gem/i915_gem_pages.c GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); n 395 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (n < READ_ONCE(iter->sg_idx)) n 409 drivers/gpu/drm/i915/gem/i915_gem_pages.c while (idx + count <= n) { n 444 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (unlikely(n < idx)) /* insertion completed by another thread */ n 450 drivers/gpu/drm/i915/gem/i915_gem_pages.c while (idx + count <= n) { n 456 drivers/gpu/drm/i915/gem/i915_gem_pages.c *offset = n - idx; n 462 drivers/gpu/drm/i915/gem/i915_gem_pages.c sg = radix_tree_lookup(&iter->radix, n); n 478 drivers/gpu/drm/i915/gem/i915_gem_pages.c *offset = n - base; n 487 drivers/gpu/drm/i915/gem/i915_gem_pages.c i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) n 494 drivers/gpu/drm/i915/gem/i915_gem_pages.c sg = i915_gem_object_get_sg(obj, n, &offset); n 501 drivers/gpu/drm/i915/gem/i915_gem_pages.c unsigned int n) n 505 drivers/gpu/drm/i915/gem/i915_gem_pages.c page = i915_gem_object_get_page(obj, n); n 514 drivers/gpu/drm/i915/gem/i915_gem_pages.c unsigned long n, n 520 drivers/gpu/drm/i915/gem/i915_gem_pages.c sg = i915_gem_object_get_sg(obj, n, &offset); n 530 drivers/gpu/drm/i915/gem/i915_gem_pages.c unsigned long n) n 532 drivers/gpu/drm/i915/gem/i915_gem_pages.c return i915_gem_object_get_dma_address_len(obj, n, NULL); n 184 drivers/gpu/drm/i915/gem/i915_gem_wait.c static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) n 188 drivers/gpu/drm/i915/gem/i915_gem_wait.c div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) n 191 drivers/gpu/drm/i915/gem/i915_gem_wait.c return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); n 31 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c unsigned long n; n 43 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c for (n = 0; n < nreal; n++) { n 903 drivers/gpu/drm/i915/gem/selftests/huge_pages.c unsigned long n; n 910 drivers/gpu/drm/i915/gem/selftests/huge_pages.c for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { n 911 drivers/gpu/drm/i915/gem/selftests/huge_pages.c u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); n 918 drivers/gpu/drm/i915/gem/selftests/huge_pages.c n, dword, ptr[dword], val); n 1002 drivers/gpu/drm/i915/gem/selftests/huge_pages.c int i, n; n 1014 drivers/gpu/drm/i915/gem/selftests/huge_pages.c n = 0; n 1021 drivers/gpu/drm/i915/gem/selftests/huge_pages.c engines[n++] = engine; n 1024 drivers/gpu/drm/i915/gem/selftests/huge_pages.c if (!n) n 1032 drivers/gpu/drm/i915/gem/selftests/huge_pages.c order = i915_random_order(n * I915_NUM_ENGINES, &prng); n 1047 drivers/gpu/drm/i915/gem/selftests/huge_pages.c engine = engines[order[i] % n]; n 1048 drivers/gpu/drm/i915/gem/selftests/huge_pages.c i = (i + 1) % (n * I915_NUM_ENGINES); n 1091 drivers/gpu/drm/i915/gem/selftests/huge_pages.c int n, i; n 1102 drivers/gpu/drm/i915/gem/selftests/huge_pages.c n = 0; n 1104 drivers/gpu/drm/i915/gem/selftests/huge_pages.c pages[n++] = BIT(i); n 1106 drivers/gpu/drm/i915/gem/selftests/huge_pages.c for (size_mask = 2; size_mask < BIT(n); size_mask++) { n 1109 drivers/gpu/drm/i915/gem/selftests/huge_pages.c for (i = 0; i < n; i++) { n 1121 drivers/gpu/drm/i915/gem/selftests/huge_pages.c for (i = 0; i < n; i++) { n 1323 drivers/gpu/drm/i915/gem/selftests/huge_pages.c unsigned int n; n 1421 drivers/gpu/drm/i915/gem/selftests/huge_pages.c n = 0; n 1426 drivers/gpu/drm/i915/gem/selftests/huge_pages.c err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); n 1430 drivers/gpu/drm/i915/gem/selftests/huge_pages.c while (n--) { n 1431 drivers/gpu/drm/i915/gem/selftests/huge_pages.c err = cpu_check(obj, n, 0xdeadbeaf); n 1514 drivers/gpu/drm/i915/gem/selftests/huge_pages.c unsigned int n; n 1550 drivers/gpu/drm/i915/gem/selftests/huge_pages.c n = 0; n 1555 drivers/gpu/drm/i915/gem/selftests/huge_pages.c err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); n 1583 drivers/gpu/drm/i915/gem/selftests/huge_pages.c while (n--) { n 1584 drivers/gpu/drm/i915/gem/selftests/huge_pages.c err = cpu_check(obj, n, 0xdeadbeaf); n 284 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c unsigned long count, n; n 333 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c for (n = 0; n < count; n++) n 334 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c values[n] = prandom_u32_state(&prng); n 336 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c for (n = 0; n < count; n++) { n 337 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c err = over->set(obj, offsets[n], ~values[n]); n 340 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c n, count, over->name, err); n 345 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c for (n = 0; n < count; n++) { n 346 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c err = write->set(obj, offsets[n], values[n]); n 349 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c n, count, write->name, err); n 354 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c for (n = 0; n < count; n++) { n 357 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c err = read->get(obj, offsets[n], &found); n 360 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c n, count, read->name, err); n 364 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c if (found != values[n]) { n 366 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c n, count, over->name, n 367 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c write->name, values[n], n 369 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c ~values[n], offsets[n]); n 37 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c unsigned long n; n 63 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c for (n = 0; n < nctx; n++) { n 64 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c ctx[n] = live_context(i915, file); n 65 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c if (IS_ERR(ctx[n])) { n 66 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c err = PTR_ERR(ctx[n]); n 77 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c for (n = 0; n < nctx; n++) { n 78 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c rq = igt_request_alloc(ctx[n], engine); n 105 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c for (n = 0; n < prime; n++) { n 106 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c rq = igt_request_alloc(ctx[n % nctx], engine); n 218 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c unsigned int n, m, need_flush; n 225 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c for (n = 0; n < real_page_count(obj); n++) { n 228 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c map = kmap_atomic(i915_gem_object_get_page(obj, n)); n 245 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c unsigned int n, m, needs_flush; n 252 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c for (n = 0; n < real_page_count(obj); n++) { n 255 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c map = kmap_atomic(i915_gem_object_get_page(obj, n)); n 263 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c n, real_page_count(obj), m, max, n 273 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c __builtin_return_address(0), idx, n, m, n 115 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c unsigned int n; n 129 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c n = page - view.partial.offset; n 130 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c GEM_BUG_ON(n >= view.partial.size); n 140 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); n 154 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c page, n, n 39 drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c unsigned int n; n 57 drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c for (n = 0; n < obj->base.size / PAGE_SIZE; n++) { n 58 drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c if (i915_gem_object_get_page(obj, n) != n 59 drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c i915_gem_object_get_page(obj, n % nreal)) { n 61 drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c n, n % nreal); n 46 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c unsigned long n, size; n 65 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c for (n = 0; n < count; n++) { n 233 drivers/gpu/drm/i915/gt/intel_engine.h u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); n 21 drivers/gpu/drm/i915/gt/intel_engine_pool.c int n; n 28 drivers/gpu/drm/i915/gt/intel_engine_pool.c n = fls(sz >> PAGE_SHIFT) - 1; n 29 drivers/gpu/drm/i915/gt/intel_engine_pool.c if (n >= ARRAY_SIZE(pool->cache_list)) n 30 drivers/gpu/drm/i915/gt/intel_engine_pool.c n = ARRAY_SIZE(pool->cache_list) - 1; n 32 drivers/gpu/drm/i915/gt/intel_engine_pool.c return &pool->cache_list[n]; n 151 drivers/gpu/drm/i915/gt/intel_engine_pool.c int n; n 154 drivers/gpu/drm/i915/gt/intel_engine_pool.c for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) n 155 drivers/gpu/drm/i915/gt/intel_engine_pool.c INIT_LIST_HEAD(&pool->cache_list[n]); n 160 drivers/gpu/drm/i915/gt/intel_engine_pool.c int n; n 162 drivers/gpu/drm/i915/gt/intel_engine_pool.c for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { n 163 drivers/gpu/drm/i915/gt/intel_engine_pool.c struct list_head *list = &pool->cache_list[n]; n 175 drivers/gpu/drm/i915/gt/intel_engine_pool.c int n; n 177 drivers/gpu/drm/i915/gt/intel_engine_pool.c for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) n 178 drivers/gpu/drm/i915/gt/intel_engine_pool.c GEM_BUG_ON(!list_empty(&pool->cache_list[n])); n 66 drivers/gpu/drm/i915/gt/intel_gpu_commands.h #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) n 764 drivers/gpu/drm/i915/gt/intel_lrc.c unsigned int n; n 784 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = execlists_num_ports(execlists); n--; ) { n 785 drivers/gpu/drm/i915/gt/intel_lrc.c struct i915_request *rq = execlists->pending[n]; n 789 drivers/gpu/drm/i915/gt/intel_lrc.c n); n 1218 drivers/gpu/drm/i915/gt/intel_lrc.c unsigned int n; n 1233 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = 1; n < ve->num_siblings; n++) { n 1234 drivers/gpu/drm/i915/gt/intel_lrc.c if (ve->siblings[n] == engine) { n 1235 drivers/gpu/drm/i915/gt/intel_lrc.c swap(ve->siblings[n], n 3422 drivers/gpu/drm/i915/gt/intel_lrc.c unsigned int n; n 3428 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = 0; n < ve->num_siblings; n++) { n 3429 drivers/gpu/drm/i915/gt/intel_lrc.c struct intel_engine_cs *sibling = ve->siblings[n]; n 3496 drivers/gpu/drm/i915/gt/intel_lrc.c unsigned int n; n 3498 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = 0; n < ve->num_siblings; n++) n 3499 drivers/gpu/drm/i915/gt/intel_lrc.c intel_engine_pm_get(ve->siblings[n]); n 3507 drivers/gpu/drm/i915/gt/intel_lrc.c unsigned int n; n 3511 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = 0; n < ve->num_siblings; n++) n 3512 drivers/gpu/drm/i915/gt/intel_lrc.c intel_engine_pm_put(ve->siblings[n]); n 3555 drivers/gpu/drm/i915/gt/intel_lrc.c unsigned int n; n 3564 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) { n 3565 drivers/gpu/drm/i915/gt/intel_lrc.c struct intel_engine_cs *sibling = ve->siblings[n]; n 3712 drivers/gpu/drm/i915/gt/intel_lrc.c unsigned int n; n 3770 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = 0; n < count; n++) { n 3771 drivers/gpu/drm/i915/gt/intel_lrc.c struct intel_engine_cs *sibling = siblings[n]; n 3884 drivers/gpu/drm/i915/gt/intel_lrc.c int n; n 3887 drivers/gpu/drm/i915/gt/intel_lrc.c for (n = 0; n < ve->num_siblings; n++) n 3888 drivers/gpu/drm/i915/gt/intel_lrc.c if (sibling == ve->siblings[n]) n 3890 drivers/gpu/drm/i915/gt/intel_lrc.c if (n == ve->num_siblings) n 49 drivers/gpu/drm/i915/gt/intel_lrc_reg.h #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ n 51 drivers/gpu/drm/i915/gt/intel_lrc_reg.h const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \ n 52 drivers/gpu/drm/i915/gt/intel_lrc_reg.h (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \ n 53 drivers/gpu/drm/i915/gt/intel_lrc_reg.h (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \ n 186 drivers/gpu/drm/i915/gt/selftest_lrc.c int err, i, n = 0; n 188 drivers/gpu/drm/i915/gt/selftest_lrc.c head = semaphore_queue(outer, vma, n++); n 197 drivers/gpu/drm/i915/gt/selftest_lrc.c rq = semaphore_queue(engine, vma, n++); n 205 drivers/gpu/drm/i915/gt/selftest_lrc.c err = release_queue(outer, vma, n); n 213 drivers/gpu/drm/i915/gt/selftest_lrc.c count, n); n 1599 drivers/gpu/drm/i915/gt/selftest_lrc.c int n; n 1624 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) n 1625 drivers/gpu/drm/i915/gt/selftest_lrc.c cs[n] = MI_ARB_CHECK; n 1626 drivers/gpu/drm/i915/gt/selftest_lrc.c cs[n] = MI_BATCH_BUFFER_END; n 1635 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < smoke.ncontext; n++) { n 1636 drivers/gpu/drm/i915/gt/selftest_lrc.c smoke.contexts[n] = kernel_context(smoke.i915); n 1637 drivers/gpu/drm/i915/gt/selftest_lrc.c if (!smoke.contexts[n]) n 1641 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < ARRAY_SIZE(phase); n++) { n 1642 drivers/gpu/drm/i915/gt/selftest_lrc.c err = smoke_crescendo(&smoke, phase[n]); n 1646 drivers/gpu/drm/i915/gt/selftest_lrc.c err = smoke_random(&smoke, phase[n]); n 1655 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < smoke.ncontext; n++) { n 1656 drivers/gpu/drm/i915/gt/selftest_lrc.c if (!smoke.contexts[n]) n 1658 drivers/gpu/drm/i915/gt/selftest_lrc.c kernel_context_close(smoke.contexts[n]); n 1682 drivers/gpu/drm/i915/gt/selftest_lrc.c unsigned long n, prime, nc; n 1689 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < nctx; n++) { n 1690 drivers/gpu/drm/i915/gt/selftest_lrc.c ctx[n] = kernel_context(i915); n 1691 drivers/gpu/drm/i915/gt/selftest_lrc.c if (!ctx[n]) { n 1693 drivers/gpu/drm/i915/gt/selftest_lrc.c nctx = n; n 1697 drivers/gpu/drm/i915/gt/selftest_lrc.c ve[n] = intel_execlists_create_virtual(ctx[n], n 1699 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(ve[n])) { n 1700 drivers/gpu/drm/i915/gt/selftest_lrc.c kernel_context_close(ctx[n]); n 1701 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(ve[n]); n 1702 drivers/gpu/drm/i915/gt/selftest_lrc.c nctx = n; n 1706 drivers/gpu/drm/i915/gt/selftest_lrc.c err = intel_context_pin(ve[n]); n 1708 drivers/gpu/drm/i915/gt/selftest_lrc.c intel_context_put(ve[n]); n 1709 drivers/gpu/drm/i915/gt/selftest_lrc.c kernel_context_close(ctx[n]); n 1710 drivers/gpu/drm/i915/gt/selftest_lrc.c nctx = n; n 1724 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < prime; n++) { n 1736 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < prime; n++) { n 1820 drivers/gpu/drm/i915/gt/selftest_lrc.c int nsibling, n; n 1832 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 1; n <= nsibling + 1; n++) { n 1834 drivers/gpu/drm/i915/gt/selftest_lrc.c n, 0); n 1839 drivers/gpu/drm/i915/gt/selftest_lrc.c err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN); n 1857 drivers/gpu/drm/i915/gt/selftest_lrc.c unsigned int n; n 1883 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < nsibling; n++) { n 1884 drivers/gpu/drm/i915/gt/selftest_lrc.c request[n] = i915_request_create(ve); n 1885 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(request[n])) { n 1886 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(request[n]); n 1887 drivers/gpu/drm/i915/gt/selftest_lrc.c nsibling = n; n 1892 drivers/gpu/drm/i915/gt/selftest_lrc.c request[n]->execution_mask = siblings[nsibling - n - 1]->mask; n 1894 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_get(request[n]); n 1895 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_add(request[n]); n 1898 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < nsibling; n++) { n 1899 drivers/gpu/drm/i915/gt/selftest_lrc.c if (i915_request_wait(request[n], 0, HZ / 10) < 0) { n 1902 drivers/gpu/drm/i915/gt/selftest_lrc.c request[n]->fence.context, n 1903 drivers/gpu/drm/i915/gt/selftest_lrc.c request[n]->fence.seqno); n 1907 drivers/gpu/drm/i915/gt/selftest_lrc.c request[n]->fence.context, n 1908 drivers/gpu/drm/i915/gt/selftest_lrc.c request[n]->fence.seqno); n 1915 drivers/gpu/drm/i915/gt/selftest_lrc.c if (request[n]->engine != siblings[nsibling - n - 1]) { n 1917 drivers/gpu/drm/i915/gt/selftest_lrc.c request[n]->engine->name, n 1918 drivers/gpu/drm/i915/gt/selftest_lrc.c siblings[nsibling - n - 1]->name); n 1932 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < nsibling; n++) n 1933 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_put(request[n]); n 1991 drivers/gpu/drm/i915/gt/selftest_lrc.c unsigned long n; n 2027 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < nsibling; n++) { n 2041 drivers/gpu/drm/i915/gt/selftest_lrc.c siblings[n]); n 2055 drivers/gpu/drm/i915/gt/selftest_lrc.c rq[n + 1] = i915_request_create(ve); n 2057 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(rq[n + 1])) { n 2058 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(rq[n + 1]); n 2062 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_get(rq[n + 1]); n 2064 drivers/gpu/drm/i915/gt/selftest_lrc.c err = i915_request_await_execution(rq[n + 1], n 2067 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_add(rq[n + 1]); n 2082 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; n < nsibling; n++) { n 2083 drivers/gpu/drm/i915/gt/selftest_lrc.c if (i915_request_wait(rq[n + 1], 0, n 2089 drivers/gpu/drm/i915/gt/selftest_lrc.c if (rq[n + 1]->engine != siblings[n]) { n 2091 drivers/gpu/drm/i915/gt/selftest_lrc.c siblings[n]->name, n 2092 drivers/gpu/drm/i915/gt/selftest_lrc.c rq[n + 1]->engine->name, n 2099 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; !IS_ERR(rq[n]); n++) n 2100 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_put(rq[n]); n 2105 drivers/gpu/drm/i915/gt/selftest_lrc.c for (n = 0; !IS_ERR(rq[n]); n++) n 2106 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_put(rq[n]); n 504 drivers/gpu/drm/i915/gt/selftest_timeline.c unsigned long count, n; n 526 drivers/gpu/drm/i915/gt/selftest_timeline.c for (n = 0; n < NUM_TIMELINES; n++) { n 551 drivers/gpu/drm/i915/gt/selftest_timeline.c for (n = 0; n < count; n++) { n 552 drivers/gpu/drm/i915/gt/selftest_timeline.c struct intel_timeline *tl = timelines[n]; n 554 drivers/gpu/drm/i915/gt/selftest_timeline.c if (!err && *tl->hwsp_seqno != n) { n 556 drivers/gpu/drm/i915/gt/selftest_timeline.c n, *tl->hwsp_seqno); n 579 drivers/gpu/drm/i915/gt/selftest_timeline.c unsigned long count, n; n 598 drivers/gpu/drm/i915/gt/selftest_timeline.c for (n = 0; n < NUM_TIMELINES; n++) { n 627 drivers/gpu/drm/i915/gt/selftest_timeline.c for (n = 0; n < count; n++) { n 628 drivers/gpu/drm/i915/gt/selftest_timeline.c struct intel_timeline *tl = timelines[n]; n 630 drivers/gpu/drm/i915/gt/selftest_timeline.c if (!err && *tl->hwsp_seqno != n) { n 632 drivers/gpu/drm/i915/gt/selftest_timeline.c n, *tl->hwsp_seqno); n 39 drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) n 42 drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h #define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4) n 540 drivers/gpu/drm/i915/gvt/dmabuf.c struct list_head *pos, *n; n 544 drivers/gpu/drm/i915/gvt/dmabuf.c list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) { n 1571 drivers/gpu/drm/i915/gvt/gtt.c struct list_head *pos, *n; n 1578 drivers/gpu/drm/i915/gvt/gtt.c list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { n 1684 drivers/gpu/drm/i915/gvt/gtt.c struct list_head *pos, *n; n 1690 drivers/gpu/drm/i915/gvt/gtt.c list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { n 2017 drivers/gpu/drm/i915/gvt/gtt.c struct list_head *pos, *n; n 2021 drivers/gpu/drm/i915/gvt/gtt.c list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { n 2219 drivers/gpu/drm/i915/gvt/gtt.c struct intel_gvt_partial_pte *partial_pte, *pos, *n; n 2242 drivers/gpu/drm/i915/gvt/gtt.c list_for_each_entry_safe(pos, n, n 2482 drivers/gpu/drm/i915/gvt/gtt.c struct list_head *pos, *n; n 2485 drivers/gpu/drm/i915/gvt/gtt.c list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { n 2534 drivers/gpu/drm/i915/gvt/gtt.c struct list_head *pos, *n; n 2540 drivers/gpu/drm/i915/gvt/gtt.c list_for_each_safe(pos, n, >t->oos_page_free_list_head) { n 2755 drivers/gpu/drm/i915/gvt/gtt.c struct list_head *pos, *n; n 2758 drivers/gpu/drm/i915/gvt/gtt.c list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { n 890 drivers/gpu/drm/i915/gvt/scheduler.c struct intel_vgpu_workload *pos, *n; n 895 drivers/gpu/drm/i915/gvt/scheduler.c list_for_each_entry_safe(pos, n, n 126 drivers/gpu/drm/i915/i915_active.c struct active_node *it, *n; n 146 drivers/gpu/drm/i915/i915_active.c rbtree_postorder_for_each_entry_safe(it, n, &root, node) { n 408 drivers/gpu/drm/i915/i915_active.c struct active_node *it, *n; n 426 drivers/gpu/drm/i915/i915_active.c rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { n 462 drivers/gpu/drm/i915/i915_active.c struct active_node *it, *n; n 474 drivers/gpu/drm/i915/i915_active.c rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { n 1167 drivers/gpu/drm/i915/i915_cmd_parser.c int offset, n; n 1183 drivers/gpu/drm/i915/i915_cmd_parser.c for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) { n 1186 drivers/gpu/drm/i915/i915_cmd_parser.c src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); n 1854 drivers/gpu/drm/i915/i915_drv.h #define IS_GEN(dev_priv, n) \ n 1855 drivers/gpu/drm/i915/i915_drv.h (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \ n 1856 drivers/gpu/drm/i915/i915_drv.h INTEL_INFO(dev_priv)->gen == (n)) n 518 drivers/gpu/drm/i915/i915_gem_gtt.h const unsigned short n) n 520 drivers/gpu/drm/i915/i915_gem_gtt.h return pd->entry[n]; n 525 drivers/gpu/drm/i915/i915_gem_gtt.h const unsigned short n) n 527 drivers/gpu/drm/i915/i915_gem_gtt.h return pdp->entry[n]; n 531 drivers/gpu/drm/i915/i915_gem_gtt.h i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) n 533 drivers/gpu/drm/i915/i915_gem_gtt.h struct i915_page_dma *pt = ppgtt->pd->entry[n]; n 482 drivers/gpu/drm/i915/i915_gpu_error.c int n; n 542 drivers/gpu/drm/i915/i915_gpu_error.c for (n = 0; n < ee->num_ports; n++) { n 543 drivers/gpu/drm/i915/i915_gpu_error.c err_printf(m, " ELSP[%d]:", n); n 544 drivers/gpu/drm/i915/i915_gpu_error.c error_print_request(m, " ", &ee->execlist[n], epoch); n 1241 drivers/gpu/drm/i915/i915_gpu_error.c unsigned int n = 0; n 1244 drivers/gpu/drm/i915/i915_gpu_error.c record_request(*port++, &ee->execlist[n++]); n 1246 drivers/gpu/drm/i915/i915_gpu_error.c ee->num_ports = n; n 421 drivers/gpu/drm/i915/i915_reg.h #define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4) n 422 drivers/gpu/drm/i915/i915_reg.h #define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8) n 561 drivers/gpu/drm/i915/i915_reg.h #define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) n 562 drivers/gpu/drm/i915/i915_reg.h #define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) n 590 drivers/gpu/drm/i915/i915_reg.h #define GEN7_SO_NUM_PRIMS_WRITTEN(n) _MMIO(0x5200 + (n) * 8) n 591 drivers/gpu/drm/i915/i915_reg.h #define GEN7_SO_NUM_PRIMS_WRITTEN_UDW(n) _MMIO(0x5200 + (n) * 8 + 4) n 593 drivers/gpu/drm/i915/i915_reg.h #define GEN7_SO_PRIM_STORAGE_NEEDED(n) _MMIO(0x5240 + (n) * 8) n 594 drivers/gpu/drm/i915/i915_reg.h #define GEN7_SO_PRIM_STORAGE_NEEDED_UDW(n) _MMIO(0x5240 + (n) * 8 + 4) n 608 drivers/gpu/drm/i915/i915_reg.h #define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8) n 609 drivers/gpu/drm/i915/i915_reg.h #define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) n 8593 drivers/gpu/drm/i915/i915_reg.h #define FORCEWAKE_MEDIA_VDBOX_GEN11(n) _MMIO(0xa540 + (n) * 4) n 8594 drivers/gpu/drm/i915/i915_reg.h #define FORCEWAKE_MEDIA_VEBOX_GEN11(n) _MMIO(0xa560 + (n) * 4) n 8598 drivers/gpu/drm/i915/i915_reg.h #define FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n) _MMIO(0x0D50 + (n) * 4) n 8599 drivers/gpu/drm/i915/i915_reg.h #define FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n) _MMIO(0x0D70 + (n) * 4) n 9037 drivers/gpu/drm/i915/i915_reg.h #define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4) n 9056 drivers/gpu/drm/i915/i915_reg.h #define AUD_CONFIG_N(n) \ n 9057 drivers/gpu/drm/i915/i915_reg.h (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \ n 9058 drivers/gpu/drm/i915/i915_reg.h (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT)) n 9473 drivers/gpu/drm/i915/i915_reg.h #define DDI_BUF_TRANS_SELECT(n) ((n) << 24) n 11189 drivers/gpu/drm/i915/i915_reg.h #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ n 11190 drivers/gpu/drm/i915/i915_reg.h #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) n 11194 drivers/gpu/drm/i915/i915_reg.h #define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ n 11199 drivers/gpu/drm/i915/i915_reg.h #define READ_DATA_VALID(n) (1 << (n)) n 20 drivers/gpu/drm/i915/i915_scheduler.h #define priolist_for_each_request_consume(it, n, plist, idx) \ n 24 drivers/gpu/drm/i915/i915_scheduler.h list_for_each_entry_safe(it, n, \ n 140 drivers/gpu/drm/i915/i915_utils.h #define check_struct_size(p, member, n, sz) \ n 143 drivers/gpu/drm/i915/i915_utils.h n, sz)) n 145 drivers/gpu/drm/i915/i915_utils.h #define ptr_mask_bits(ptr, n) ({ \ n 147 drivers/gpu/drm/i915/i915_utils.h (typeof(ptr))(__v & -BIT(n)); \ n 150 drivers/gpu/drm/i915/i915_utils.h #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) n 152 drivers/gpu/drm/i915/i915_utils.h #define ptr_unpack_bits(ptr, bits, n) ({ \ n 154 drivers/gpu/drm/i915/i915_utils.h *(bits) = __v & (BIT(n) - 1); \ n 155 drivers/gpu/drm/i915/i915_utils.h (typeof(ptr))(__v & -BIT(n)); \ n 158 drivers/gpu/drm/i915/i915_utils.h #define ptr_pack_bits(ptr, bits, n) ({ \ n 160 drivers/gpu/drm/i915/i915_utils.h GEM_BUG_ON(__bits & -BIT(n)); \ n 236 drivers/gpu/drm/i915/i915_utils.h static inline bool is_power_of_2_u64(u64 n) n 238 drivers/gpu/drm/i915/i915_utils.h return (n != 0 && ((n & (n - 1)) == 0)); n 63 drivers/gpu/drm/i915/intel_runtime_pm.c unsigned int n; n 65 drivers/gpu/drm/i915/intel_runtime_pm.c n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); n 66 drivers/gpu/drm/i915/intel_runtime_pm.c return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); n 120 drivers/gpu/drm/i915/intel_runtime_pm.c unsigned long flags, n; n 127 drivers/gpu/drm/i915/intel_runtime_pm.c for (n = rpm->debug.count; n--; ) { n 128 drivers/gpu/drm/i915/intel_runtime_pm.c if (rpm->debug.owners[n] == stack) { n 129 drivers/gpu/drm/i915/intel_runtime_pm.c memmove(rpm->debug.owners + n, n 130 drivers/gpu/drm/i915/intel_runtime_pm.c rpm->debug.owners + n + 1, n 131 drivers/gpu/drm/i915/intel_runtime_pm.c (--rpm->debug.count - n) * sizeof(stack)); n 349 drivers/gpu/drm/i915/intel_uncore.c u32 n; n 354 drivers/gpu/drm/i915/intel_uncore.c n = fifo_free_entries(uncore); n 356 drivers/gpu/drm/i915/intel_uncore.c n = uncore->fifo_count; n 358 drivers/gpu/drm/i915/intel_uncore.c if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { n 359 drivers/gpu/drm/i915/intel_uncore.c if (wait_for_atomic((n = fifo_free_entries(uncore)) > n 362 drivers/gpu/drm/i915/intel_uncore.c DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); n 367 drivers/gpu/drm/i915/intel_uncore.c uncore->fifo_count = n - 1; n 230 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c unsigned int *order, count, n; n 275 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (n = 0; n < count; n++) { n 276 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 addr = hole_start + order[n] * BIT_ULL(size); n 283 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, n, count)) { n 300 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c count = n; n 303 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (n = 0; n < count; n++) { n 304 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 addr = hole_start + order[n] * BIT_ULL(size); n 730 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c unsigned int *order, count, n; n 774 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (n = 0; n < count; n++) { n 775 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 addr = hole_start + order[n] * BIT_ULL(size); n 803 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, n, count)) { n 1148 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c unsigned int *order, n; n 1174 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (n = 0; n < count; n++) { n 1175 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 offset = tmp.start + n * PAGE_SIZE; n 1188 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (n = 0; n < count; n++) { n 1189 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 offset = tmp.start + order[n] * PAGE_SIZE; n 1193 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c iowrite32(n, vaddr + n); n 1199 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (n = 0; n < count; n++) { n 1200 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c u64 offset = tmp.start + order[n] * PAGE_SIZE; n 1205 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c val = ioread32(vaddr + n); n 1208 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c if (val != n) { n 1210 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c val, n); n 315 drivers/gpu/drm/i915/selftests/i915_request.c unsigned int n, count; n 334 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < count; n++) { n 336 drivers/gpu/drm/i915/selftests/i915_request.c t->contexts[order[n] % t->ncontexts]; n 349 drivers/gpu/drm/i915/selftests/i915_request.c count = n; n 357 drivers/gpu/drm/i915/selftests/i915_request.c requests[n] = i915_request_get(rq); n 370 drivers/gpu/drm/i915/selftests/i915_request.c count = n; n 395 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < count; n++) { n 396 drivers/gpu/drm/i915/selftests/i915_request.c struct i915_request *rq = requests[n]; n 440 drivers/gpu/drm/i915/selftests/i915_request.c unsigned int n; n 461 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < t.ncontexts; n++) { n 462 drivers/gpu/drm/i915/selftests/i915_request.c t.contexts[n] = mock_context(t.engine->i915, "mock"); n 463 drivers/gpu/drm/i915/selftests/i915_request.c if (!t.contexts[n]) { n 470 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < ncpus; n++) { n 471 drivers/gpu/drm/i915/selftests/i915_request.c threads[n] = kthread_run(__igt_breadcrumbs_smoketest, n 472 drivers/gpu/drm/i915/selftests/i915_request.c &t, "igt/%d", n); n 473 drivers/gpu/drm/i915/selftests/i915_request.c if (IS_ERR(threads[n])) { n 474 drivers/gpu/drm/i915/selftests/i915_request.c ret = PTR_ERR(threads[n]); n 475 drivers/gpu/drm/i915/selftests/i915_request.c ncpus = n; n 479 drivers/gpu/drm/i915/selftests/i915_request.c get_task_struct(threads[n]); n 484 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < ncpus; n++) { n 487 drivers/gpu/drm/i915/selftests/i915_request.c err = kthread_stop(threads[n]); n 491 drivers/gpu/drm/i915/selftests/i915_request.c put_task_struct(threads[n]); n 500 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < t.ncontexts; n++) { n 501 drivers/gpu/drm/i915/selftests/i915_request.c if (!t.contexts[n]) n 503 drivers/gpu/drm/i915/selftests/i915_request.c mock_context_close(t.contexts[n]); n 557 drivers/gpu/drm/i915/selftests/i915_request.c unsigned long n, prime; n 568 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < prime; n++) { n 707 drivers/gpu/drm/i915/selftests/i915_request.c unsigned long n, prime; n 725 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < prime; n++) { n 1114 drivers/gpu/drm/i915/selftests/i915_request.c unsigned int n; n 1153 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < t[0].ncontexts; n++) { n 1154 drivers/gpu/drm/i915/selftests/i915_request.c t[0].contexts[n] = live_context(i915, file); n 1155 drivers/gpu/drm/i915/selftests/i915_request.c if (!t[0].contexts[n]) { n 1179 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < ncpus; n++) { n 1183 drivers/gpu/drm/i915/selftests/i915_request.c &t[id], "igt/%d.%d", id, n); n 1191 drivers/gpu/drm/i915/selftests/i915_request.c threads[id * ncpus + n] = tsk; n 1202 drivers/gpu/drm/i915/selftests/i915_request.c for (n = 0; n < ncpus; n++) { n 1203 drivers/gpu/drm/i915/selftests/i915_request.c struct task_struct *tsk = threads[id * ncpus + n]; n 68 drivers/gpu/drm/i915/selftests/i915_selftest.c #define selftest(n, f) [mock_##n] = { .name = #n, { .mock = f } }, n 74 drivers/gpu/drm/i915/selftests/i915_selftest.c #define selftest(n, f) [live_##n] = { .name = #n, { .live = f } }, n 81 drivers/gpu/drm/i915/selftests/i915_selftest.c #define selftest(n, func) selftest_0(n, func, param(n)) n 82 drivers/gpu/drm/i915/selftests/i915_selftest.c #define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n)) n 83 drivers/gpu/drm/i915/selftests/i915_selftest.c #define selftest_0(n, func, id) \ n 84 drivers/gpu/drm/i915/selftests/i915_selftest.c module_param_named(id, mock_selftests[mock_##n].enabled, bool, 0400); n 89 drivers/gpu/drm/i915/selftests/i915_selftest.c #define param(n) __PASTE(igt__, __PASTE(__LINE__, __live_##n)) n 90 drivers/gpu/drm/i915/selftests/i915_selftest.c #define selftest_0(n, func, id) \ n 91 drivers/gpu/drm/i915/selftests/i915_selftest.c module_param_named(id, live_selftests[live_##n].enabled, bool, 0400); n 357 drivers/gpu/drm/i915/selftests/i915_vma.c unsigned int n, n 361 drivers/gpu/drm/i915/selftests/i915_vma.c return (r->plane[n].stride * (r->plane[n].height - y - 1) + n 362 drivers/gpu/drm/i915/selftests/i915_vma.c r->plane[n].offset + x); n 367 drivers/gpu/drm/i915/selftests/i915_vma.c const struct intel_rotation_info *r, unsigned int n, n 372 drivers/gpu/drm/i915/selftests/i915_vma.c for (x = 0; x < r->plane[n].width; x++) { n 373 drivers/gpu/drm/i915/selftests/i915_vma.c for (y = 0; y < r->plane[n].height; y++) { n 379 drivers/gpu/drm/i915/selftests/i915_vma.c n, x, y); n 383 drivers/gpu/drm/i915/selftests/i915_vma.c src_idx = rotated_index(r, n, x, y); n 407 drivers/gpu/drm/i915/selftests/i915_vma.c unsigned int n, n 411 drivers/gpu/drm/i915/selftests/i915_vma.c return (r->plane[n].stride * y + n 412 drivers/gpu/drm/i915/selftests/i915_vma.c r->plane[n].offset + x); n 417 drivers/gpu/drm/i915/selftests/i915_vma.c const struct intel_remapped_info *r, unsigned int n, n 424 drivers/gpu/drm/i915/selftests/i915_vma.c for (y = 0; y < r->plane[n].height; y++) { n 425 drivers/gpu/drm/i915/selftests/i915_vma.c for (x = 0; x < r->plane[n].width; x++) { n 431 drivers/gpu/drm/i915/selftests/i915_vma.c n, x, y); n 439 drivers/gpu/drm/i915/selftests/i915_vma.c src_idx = remapped_index(r, n, x, y); n 516 drivers/gpu/drm/i915/selftests/i915_vma.c unsigned int n, max_offset; n 581 drivers/gpu/drm/i915/selftests/i915_vma.c for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) { n 583 drivers/gpu/drm/i915/selftests/i915_vma.c sg = assert_rotated(obj, &view.rotated, n, sg); n 585 drivers/gpu/drm/i915/selftests/i915_vma.c sg = assert_remapped(obj, &view.remapped, n, sg); n 589 drivers/gpu/drm/i915/selftests/i915_vma.c "rotated" : "remapped", n, n 37 drivers/gpu/drm/i915/selftests/scatterlist.c typedef unsigned int (*npages_fn_t)(unsigned long n, n 48 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned long pfn, n; n 51 drivers/gpu/drm/i915/selftests/scatterlist.c for_each_sg(pt->st.sgl, sg, pt->st.nents, n) { n 53 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned int npages = npages_fn(n, pt->st.nents, rnd); n 165 drivers/gpu/drm/i915/selftests/scatterlist.c static unsigned int one(unsigned long n, n 172 drivers/gpu/drm/i915/selftests/scatterlist.c static unsigned int grow(unsigned long n, n 176 drivers/gpu/drm/i915/selftests/scatterlist.c return n + 1; n 179 drivers/gpu/drm/i915/selftests/scatterlist.c static unsigned int shrink(unsigned long n, n 183 drivers/gpu/drm/i915/selftests/scatterlist.c return count - n; n 186 drivers/gpu/drm/i915/selftests/scatterlist.c static unsigned int random(unsigned long n, n 193 drivers/gpu/drm/i915/selftests/scatterlist.c static unsigned int random_page_size_pages(unsigned long n, n 221 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned long n, pfn; n 237 drivers/gpu/drm/i915/selftests/scatterlist.c for (n = 0; n < count; n++) { n 238 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned long npages = npages_fn(n, count, rnd); n 248 drivers/gpu/drm/i915/selftests/scatterlist.c if (n) n 259 drivers/gpu/drm/i915/selftests/scatterlist.c pt->st.nents = n; n 160 drivers/gpu/drm/lima/lima_pp.c int i, j, n = 0; n 167 drivers/gpu/drm/lima/lima_pp.c writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4); n 24 drivers/gpu/drm/mediatek/mtk_disp_ovl.c #define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n)) n 25 drivers/gpu/drm/mediatek/mtk_disp_ovl.c #define DISP_REG_OVL_SRC_SIZE(n) (0x0038 + 0x20 * (n)) n 26 drivers/gpu/drm/mediatek/mtk_disp_ovl.c #define DISP_REG_OVL_OFFSET(n) (0x003c + 0x20 * (n)) n 27 drivers/gpu/drm/mediatek/mtk_disp_ovl.c #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n)) n 28 drivers/gpu/drm/mediatek/mtk_disp_ovl.c #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n)) n 29 drivers/gpu/drm/mediatek/mtk_disp_ovl.c #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n)) n 32 drivers/gpu/drm/mediatek/mtk_disp_ovl.c #define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n)) n 36 drivers/gpu/drm/mediatek/mtk_drm_ddp.c #define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) n 37 drivers/gpu/drm/mediatek/mtk_drm_ddp.c #define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n)) n 38 drivers/gpu/drm/mediatek/mtk_drm_ddp.c #define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) n 39 drivers/gpu/drm/mediatek/mtk_drm_ddp.c #define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n)) n 40 drivers/gpu/drm/mediatek/mtk_drm_ddp.c #define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n)) n 41 drivers/gpu/drm/mediatek/mtk_drm_ddp.c #define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n)) n 143 drivers/gpu/drm/mediatek/mtk_dsi.c #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) n 650 drivers/gpu/drm/mediatek/mtk_hdmi.c unsigned int n[3]; n 682 drivers/gpu/drm/mediatek/mtk_hdmi.c return recommended->n[0]; n 684 drivers/gpu/drm/mediatek/mtk_hdmi.c return recommended->n[1]; n 686 drivers/gpu/drm/mediatek/mtk_hdmi.c return recommended->n[2]; n 688 drivers/gpu/drm/mediatek/mtk_hdmi.c return recommended->n[1] * 2; n 690 drivers/gpu/drm/mediatek/mtk_hdmi.c return recommended->n[2] * 2; n 692 drivers/gpu/drm/mediatek/mtk_hdmi.c return recommended->n[1] * 4; n 694 drivers/gpu/drm/mediatek/mtk_hdmi.c return recommended->n[2] * 4; n 717 drivers/gpu/drm/mediatek/mtk_hdmi.c unsigned int tmds_clock, unsigned int n) n 719 drivers/gpu/drm/mediatek/mtk_hdmi.c return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n, n 723 drivers/gpu/drm/mediatek/mtk_hdmi.c static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n, n 739 drivers/gpu/drm/mediatek/mtk_hdmi.c val[4] = (n >> 16) & 0xff; n 740 drivers/gpu/drm/mediatek/mtk_hdmi.c val[5] = (n >> 8) & 0xff; n 741 drivers/gpu/drm/mediatek/mtk_hdmi.c val[6] = n & 0xff; n 751 drivers/gpu/drm/mediatek/mtk_hdmi.c unsigned int n, cts; n 753 drivers/gpu/drm/mediatek/mtk_hdmi.c n = hdmi_recommended_n(sample_rate, clock); n 754 drivers/gpu/drm/mediatek/mtk_hdmi.c cts = hdmi_expected_cts(sample_rate, clock, n); n 757 drivers/gpu/drm/mediatek/mtk_hdmi.c __func__, sample_rate, clock, n, cts); n 761 drivers/gpu/drm/mediatek/mtk_hdmi.c do_hdmi_hw_aud_set_ncts(hdmi, n, cts); n 284 drivers/gpu/drm/mga/mga_drv.h #define BEGIN_DMA(n) \ n 287 drivers/gpu/drm/mga/mga_drv.h DRM_INFO("BEGIN_DMA(%d)\n", (n)); \ n 289 drivers/gpu/drm/mga/mga_drv.h dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \ n 155 drivers/gpu/drm/mga/mga_ioc32.c #define DRM_IOCTL32_DEF(n, f)[DRM_##n] = {.fn = f, .name = #n} n 107 drivers/gpu/drm/mgag200/mgag200_mode.c unsigned int p, m, n; n 115 drivers/gpu/drm/mgag200/mgag200_mode.c m = n = p = 0; n 140 drivers/gpu/drm/mgag200/mgag200_mode.c n = testn - 1; n 149 drivers/gpu/drm/mgag200/mgag200_mode.c m = n = p = 0; n 183 drivers/gpu/drm/mgag200/mgag200_mode.c n = testn - 1; n 190 drivers/gpu/drm/mgag200/mgag200_mode.c fvv = pllreffreq * (n + 1) / (m + 1); n 208 drivers/gpu/drm/mgag200/mgag200_mode.c WREG_DAC(MGA1064_PIX_PLLC_N, n); n 226 drivers/gpu/drm/mgag200/mgag200_mode.c unsigned int p, m, n; n 232 drivers/gpu/drm/mgag200/mgag200_mode.c m = n = p = 0; n 262 drivers/gpu/drm/mgag200/mgag200_mode.c n = (testn & 0xFF); n 293 drivers/gpu/drm/mgag200/mgag200_mode.c n = testn - 1; n 295 drivers/gpu/drm/mgag200/mgag200_mode.c ((n >> 1) & 0x80); n 343 drivers/gpu/drm/mgag200/mgag200_mode.c WREG_DAC(MGA1064_WB_PIX_PLLC_N, n); n 405 drivers/gpu/drm/mgag200/mgag200_mode.c unsigned int p, m, n; n 409 drivers/gpu/drm/mgag200/mgag200_mode.c m = n = p = 0; n 432 drivers/gpu/drm/mgag200/mgag200_mode.c n = testn - 1; n 459 drivers/gpu/drm/mgag200/mgag200_mode.c WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); n 498 drivers/gpu/drm/mgag200/mgag200_mode.c unsigned int p, m, n; n 504 drivers/gpu/drm/mgag200/mgag200_mode.c m = n = p = 0; n 528 drivers/gpu/drm/mgag200/mgag200_mode.c n = testn; n 562 drivers/gpu/drm/mgag200/mgag200_mode.c n = testn - 1; n 590 drivers/gpu/drm/mgag200/mgag200_mode.c WREG_DAC(MGA1064_EH_PIX_PLLC_N, n); n 628 drivers/gpu/drm/mgag200/mgag200_mode.c unsigned int p, m, n; n 633 drivers/gpu/drm/mgag200/mgag200_mode.c m = n = p = 0; n 664 drivers/gpu/drm/mgag200/mgag200_mode.c n = testn; n 694 drivers/gpu/drm/mgag200/mgag200_mode.c WREG_DAC(MGA1064_ER_PIX_PLLC_N, n); n 90 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c #define ENT(n) { .name = #n, .show = show, .data = n ##_print } n 12 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h #define REG_MASK(n) ((BIT(n)) - 1) n 185 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h #define NAME(n) [n] = #n n 197 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h #define NAME(n) [SSPP_ ## n] = #n n 149 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c int n, fetch_stride, cpp; n 154 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); n 158 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c n = roundup_pow_of_two(n); n 160 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c blkcfg |= (n << (8 * i)); n 175 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c int n = blkcfg & 0xff; n 177 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c if (!n) n 180 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); n 181 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c ret = smp_request_block(smp, state, cid, n); n 184 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c n, ret); n 48 drivers/gpu/drm/msm/disp/mdp_kms.c struct mdp_irq *handler, *n; n 53 drivers/gpu/drm/msm/disp/mdp_kms.c list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) { n 10 drivers/gpu/drm/msm/dsi/phy/dsi_phy.c #define S_DIV_ROUND_UP(n, d) \ n 11 drivers/gpu/drm/msm/dsi/phy/dsi_phy.c (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) n 116 drivers/gpu/drm/msm/edp/edp_ctrl.c u32 n; n 842 drivers/gpu/drm/msm/edp/edp_ctrl.c static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n) n 857 drivers/gpu/drm/msm/edp/edp_ctrl.c edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi); n 896 drivers/gpu/drm/msm/edp/edp_ctrl.c u32 m, n; n 911 drivers/gpu/drm/msm/edp/edp_ctrl.c msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n); n 912 drivers/gpu/drm/msm/edp/edp_ctrl.c edp_sw_mvid_nvid(ctrl, m, n); n 1338 drivers/gpu/drm/msm/edp/edp_ctrl.c *pn = divs[i].n; n 25 drivers/gpu/drm/msm/hdmi/hdmi_audio.c uint32_t n; /* N parameter for clock regeneration */ n 114 drivers/gpu/drm/msm/hdmi/hdmi_audio.c uint32_t n, cts, multiplier; n 118 drivers/gpu/drm/msm/hdmi/hdmi_audio.c n = arcs->lut[audio->rate].n; n 124 drivers/gpu/drm/msm/hdmi/hdmi_audio.c n >>= 2; /* divide N by 4 and use multiplier */ n 128 drivers/gpu/drm/msm/hdmi/hdmi_audio.c n >>= 1; /* divide N by 2 and use multiplier */ n 133 drivers/gpu/drm/msm/hdmi/hdmi_audio.c DBG("n=%u, cts=%u, multiplier=%u", n, cts, multiplier); n 155 drivers/gpu/drm/msm/hdmi/hdmi_audio.c HDMI_ACR_1_N(n)); n 90 drivers/gpu/drm/msm/msm_drv.c char n[32]; n 92 drivers/gpu/drm/msm/msm_drv.c snprintf(n, sizeof(n), "%s_clk", name); n 95 drivers/gpu/drm/msm/msm_drv.c if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n)) n 36 drivers/gpu/drm/msm/msm_fb.c int i, n = fb->format->num_planes; n 42 drivers/gpu/drm/msm/msm_fb.c for (i = 0; i < n; i++) { n 58 drivers/gpu/drm/msm/msm_fb.c int ret, i, n = fb->format->num_planes; n 61 drivers/gpu/drm/msm/msm_fb.c for (i = 0; i < n; i++) { n 74 drivers/gpu/drm/msm/msm_fb.c int i, n = fb->format->num_planes; n 76 drivers/gpu/drm/msm/msm_fb.c for (i = 0; i < n; i++) n 106 drivers/gpu/drm/msm/msm_fb.c int ret, i, n = info->num_planes; n 108 drivers/gpu/drm/msm/msm_fb.c for (i = 0; i < n; i++) { n 125 drivers/gpu/drm/msm/msm_fb.c for (i = 0; i < n; i++) n 140 drivers/gpu/drm/msm/msm_fb.c int ret, i, n; n 146 drivers/gpu/drm/msm/msm_fb.c n = info->num_planes; n 166 drivers/gpu/drm/msm/msm_fb.c if (n > ARRAY_SIZE(fb->obj)) { n 171 drivers/gpu/drm/msm/msm_fb.c for (i = 0; i < n; i++) { n 558 drivers/gpu/drm/msm/msm_gpu.c int i, n = min(ncntrs, gpu->num_perfcntrs); n 565 drivers/gpu/drm/msm/msm_gpu.c for (i = 0; i < n; i++) n 572 drivers/gpu/drm/msm/msm_gpu.c return n; n 64 drivers/gpu/drm/msm/msm_perf.c int i, n; n 68 drivers/gpu/drm/msm/msm_perf.c n = snprintf(ptr, rem, "%%BUSY"); n 69 drivers/gpu/drm/msm/msm_perf.c ptr += n; n 70 drivers/gpu/drm/msm/msm_perf.c rem -= n; n 74 drivers/gpu/drm/msm/msm_perf.c n = snprintf(ptr, rem, "\t%s", perfcntr->name); n 75 drivers/gpu/drm/msm/msm_perf.c ptr += n; n 76 drivers/gpu/drm/msm/msm_perf.c rem -= n; n 96 drivers/gpu/drm/msm/msm_perf.c n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10); n 97 drivers/gpu/drm/msm/msm_perf.c ptr += n; n 98 drivers/gpu/drm/msm/msm_perf.c rem -= n; n 103 drivers/gpu/drm/msm/msm_perf.c n = snprintf(ptr, rem, "\t%5d.%02d", n 105 drivers/gpu/drm/msm/msm_perf.c ptr += n; n 106 drivers/gpu/drm/msm/msm_perf.c rem -= n; n 110 drivers/gpu/drm/msm/msm_perf.c n = snprintf(ptr, rem, "\n"); n 111 drivers/gpu/drm/msm/msm_perf.c ptr += n; n 112 drivers/gpu/drm/msm/msm_perf.c rem -= n; n 124 drivers/gpu/drm/msm/msm_perf.c int n = 0, ret = 0; n 134 drivers/gpu/drm/msm/msm_perf.c n = min((int)sz, perf->buftot - perf->bufpos); n 135 drivers/gpu/drm/msm/msm_perf.c if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) { n 140 drivers/gpu/drm/msm/msm_perf.c perf->bufpos += n; n 141 drivers/gpu/drm/msm/msm_perf.c *ppos += n; n 147 drivers/gpu/drm/msm/msm_perf.c return n; n 108 drivers/gpu/drm/msm/msm_rd.c int n; n 118 drivers/gpu/drm/msm/msm_rd.c n = min(sz, circ_space_to_end(&rd->fifo)); n 119 drivers/gpu/drm/msm/msm_rd.c memcpy(fptr, ptr, n); n 121 drivers/gpu/drm/msm/msm_rd.c smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); n 122 drivers/gpu/drm/msm/msm_rd.c sz -= n; n 123 drivers/gpu/drm/msm/msm_rd.c ptr += n; n 143 drivers/gpu/drm/msm/msm_rd.c int n = 0, ret = 0; n 156 drivers/gpu/drm/msm/msm_rd.c n = min_t(int, sz, circ_count_to_end(&rd->fifo)); n 157 drivers/gpu/drm/msm/msm_rd.c if (copy_to_user(buf, fptr, n)) { n 162 drivers/gpu/drm/msm/msm_rd.c smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1)); n 163 drivers/gpu/drm/msm/msm_rd.c *ppos += n; n 171 drivers/gpu/drm/msm/msm_rd.c return n; n 349 drivers/gpu/drm/msm/msm_rd.c int i, n; n 363 drivers/gpu/drm/msm/msm_rd.c n = vscnprintf(msg, sizeof(msg), fmt, args); n 366 drivers/gpu/drm/msm/msm_rd.c rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); n 372 drivers/gpu/drm/msm/msm_rd.c n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u", n 376 drivers/gpu/drm/msm/msm_rd.c n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u", n 381 drivers/gpu/drm/msm/msm_rd.c rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); n 205 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c int n = 0; n 224 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c n++; n 227 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c return n; n 250 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c int i, n = 0; n 286 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c n++; n 289 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c return n; n 86 drivers/gpu/drm/nouveau/dispnv50/disp.c int ret, i, n; n 90 drivers/gpu/drm/nouveau/dispnv50/disp.c ret = n = nvif_object_sclass_get(disp, &sclass); n 95 drivers/gpu/drm/nouveau/dispnv50/disp.c for (i = 0; i < n; i++) { n 61 drivers/gpu/drm/nouveau/include/nvif/cl0080.h #define NV_DEVICE_INFO(n) ((n) | (0x00000000ULL << 32)) n 62 drivers/gpu/drm/nouveau/include/nvif/cl0080.h #define NV_DEVICE_FIFO(n) ((n) | (0x00000001ULL << 32)) n 93 drivers/gpu/drm/nouveau/include/nvif/cl0080.h #define NV_DEVICE_FIFO_RUNLIST_ENGINES(n) ((n) + NV_DEVICE_FIFO(0x00000010)) n 31 drivers/gpu/drm/nouveau/include/nvif/device.h #define nvif_nsec(d,n,cond...) ({ \ n 33 drivers/gpu/drm/nouveau/include/nvif/device.h u64 _nsecs = (n), _time0 = nvif_device_time(_device); \ n 32 drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h #define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */ (n) n 33 drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h #define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100) n 54 drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h #define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */ (n) n 55 drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h #define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100) n 52 drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h #define nvkm_nsec(d,n,cond...) ({ \ n 57 drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h nvkm_timer_wait_init((d), (n), &_wait); \ n 69 drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h #define nvkm_wait_nsec(d,n,addr,mask,data) \ n 70 drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h nvkm_nsec(d, n, \ n 1232 drivers/gpu/drm/nouveau/nouveau_bios.c static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) n 1236 drivers/gpu/drm/nouveau/nouveau_bios.c for (i = 0; i <= (n - len); i++) { n 342 drivers/gpu/drm/nouveau/nouveau_bo.c set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) n 344 drivers/gpu/drm/nouveau/nouveau_bo.c *n = 0; n 347 drivers/gpu/drm/nouveau/nouveau_bo.c pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; n 349 drivers/gpu/drm/nouveau/nouveau_bo.c pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; n 351 drivers/gpu/drm/nouveau/nouveau_bo.c pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; n 328 drivers/gpu/drm/nouveau/nouveau_display.c #define PROP_ENUM(p,gen,n,list) do { \ n 337 drivers/gpu/drm/nouveau/nouveau_display.c p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \ n 427 drivers/gpu/drm/nouveau/nouveau_drm.c int ret, i, n; n 440 drivers/gpu/drm/nouveau/nouveau_drm.c ret = n = nvif_object_sclass_get(&device->object, &sclass); n 444 drivers/gpu/drm/nouveau/nouveau_drm.c for (ret = -ENOSYS, i = 0; i < n; i++) { n 643 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_AUXCH_DATA_OUT(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0) n 645 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_AUXCH_DATA_IN(i, n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0) n 708 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) n 717 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) n 73 drivers/gpu/drm/nouveau/nvkm/core/client.c struct nvkm_notify n; n 82 drivers/gpu/drm/nouveau/nvkm/core/client.c nvkm_client_notify(struct nvkm_notify *n) n 84 drivers/gpu/drm/nouveau/nvkm/core/client.c struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n); n 86 drivers/gpu/drm/nouveau/nvkm/core/client.c return client->ntfy(¬ify->rep, notify->size, n->data, n->size); n 94 drivers/gpu/drm/nouveau/nvkm/core/client.c nvkm_notify_put(&client->notify[index]->n); n 106 drivers/gpu/drm/nouveau/nvkm/core/client.c nvkm_notify_get(&client->notify[index]->n); n 118 drivers/gpu/drm/nouveau/nvkm/core/client.c nvkm_notify_fini(&client->notify[index]->n); n 166 drivers/gpu/drm/nouveau/nvkm/core/client.c false, data, size, reply, ¬ify->n); n 2640 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c #define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break n 2683 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c #define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break n 921 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c int gpc, ppc, n = 0; n 930 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { n 933 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c const u32 u = 0x418ea0 + (n * 0x04); n 56 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c int gpc, ppc, b, n = 0; n 73 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { n 76 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c const u32 u = 0x418ea0 + (n * 0x04); n 52 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c int gpc, ppc, b, n = 0; n 69 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { n 73 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c const u32 u = 0x418ea0 + (n * 0x04); n 74 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c int gpc, ppc, b, n = 0; n 90 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { n 94 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgv100.c const u32 u = 0x418ea0 + (n * 0x04); n 92 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h #define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) n 93 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h #define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) n 72 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); n 83 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; n 94 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c rate = clk->parent_rate * pll->n; n 140 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c u32 m, n, n2; n 152 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c n = (target_vco_f * m) / ref_clk_f; n 155 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c if (n > clk->params->max_n) n 158 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c for (; n <= n2; n++) { n 161 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c if (n < clk->params->min_n) n 163 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c if (n > clk->params->max_n) n 166 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c vco_f = ref_clk_f * n / m; n 178 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c best_n = n; n 198 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c pll->n = best_n; n 205 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c target_freq / KHZ, pll->m, pll->n, pll->pl, n 211 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) n 221 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c if (n == pll.n) n 230 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c pll.n = n; n 345 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c return gk20a_pllg_slide(clk, pll->n); n 348 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); n 349 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c ret = gk20a_pllg_slide(clk, cur_pll.n); n 356 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); n 362 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c return gk20a_pllg_slide(clk, pll->n); n 112 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h u32 n; n 229 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c u32 n; n 241 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c n = (n_eff << DFS_DET_RANGE) - det_delta; n 243 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c if (n <= 0) { n 245 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c n = 1 << DFS_DET_RANGE; n 247 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c if (n >> DFS_DET_RANGE > p->max_n) { n 249 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c n = p->max_n << DFS_DET_RANGE; n 251 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c *n_int = n >> DFS_DET_RANGE; n 254 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c rem = ((u32)n) & MASK(DFS_DET_RANGE); n 266 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c gm20b_pllg_slide(struct gm20b_clk *clk, u32 n) n 275 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din); n 280 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c if (n_int == pll.base.n && sdm_din == pll.sdm_din) n 292 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c pll.base.n = n_int; n 370 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din); n 372 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din && n 411 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c cur_pll.base.n = n_int; n 443 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c return gm20b_pllg_slide(clk, pll->n); n 446 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); n 447 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c ret = gm20b_pllg_slide(clk, cur_pll.n); n 454 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); n 460 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c return gm20b_pllg_slide(clk, pll->n); n 509 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c pll->n = nsafe; n 14 drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h #define clk_nsec(s,n) hwsq_nsec(&(s)->base, (n)) n 102 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c int n = (device->chipset == 0x31 ? 2 : 4); n 105 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c for (i = 0; i < n; i++) { n 171 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h #define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n)) n 172 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h #define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n)) n 1444 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c struct nvbios_ramcfg *p, *n; n 1452 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c n = &cfg->bios; n 1484 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0a_03fe |= p->rammap_11_0a_03fe != n->rammap_11_0a_03fe; n 1485 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_09_01ff |= p->rammap_11_09_01ff != n->rammap_11_09_01ff; n 1486 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0a_0400 |= p->rammap_11_0a_0400 != n->rammap_11_0a_0400; n 1487 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0a_0800 |= p->rammap_11_0a_0800 != n->rammap_11_0a_0800; n 1488 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0b_01f0 |= p->rammap_11_0b_01f0 != n->rammap_11_0b_01f0; n 1489 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0b_0200 |= p->rammap_11_0b_0200 != n->rammap_11_0b_0200; n 1490 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0d |= p->rammap_11_0d != n->rammap_11_0d; n 1491 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0f |= p->rammap_11_0f != n->rammap_11_0f; n 1492 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0e |= p->rammap_11_0e != n->rammap_11_0e; n 1493 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0b_0800 |= p->rammap_11_0b_0800 != n->rammap_11_0b_0800; n 1494 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->rammap_11_0b_0400 |= p->rammap_11_0b_0400 != n->rammap_11_0b_0400; n 1495 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->ramcfg_11_01_01 |= p->ramcfg_11_01_01 != n->ramcfg_11_01_01; n 1496 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->ramcfg_11_01_02 |= p->ramcfg_11_01_02 != n->ramcfg_11_01_02; n 1497 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->ramcfg_11_01_10 |= p->ramcfg_11_01_10 != n->ramcfg_11_01_10; n 1498 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->ramcfg_11_02_03 |= p->ramcfg_11_02_03 != n->ramcfg_11_02_03; n 1499 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->ramcfg_11_08_20 |= p->ramcfg_11_08_20 != n->ramcfg_11_08_20; n 1500 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c d->timing_20_30_07 |= p->timing_20_30_07 != n->timing_20_30_07; n 16 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h #define ram_nsec(s,n) hwsq_nsec(&(s)->base, (n)) n 9 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define NVKM_I2C_PAD_HYBRID(n) /* 'n' is hw pad index */ (n) n 10 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define NVKM_I2C_PAD_CCB(n) /* 'n' is ccb index */ ((n) + 0x100) n 11 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define NVKM_I2C_PAD_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x200) n 96 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c u32 n, d; n 100 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c n = f; n 103 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR); n 105 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c if (!n || !d) { n 106 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c n = 1; n 113 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c while (((n % 5) == 0) && ((d % 5) == 0)) { n 114 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c n /= 5; n 118 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c while (((n % 2) == 0) && ((d % 2) == 0)) { n 119 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c n /= 2; n 123 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c while (n > 0xffff || d > 0xffff) { n 124 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c n >>= 1; n 129 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c nvkm_debug(subdev, "numerator : %08x\n", n); n 131 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n); n 133 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n); n 33 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c u32 n, d; n 37 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c n = f; n 40 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR); n 42 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c if (!n || !d) { n 43 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c n = 1; n 50 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c while (((n % 5) == 0) && ((d % 5) == 0)) { n 51 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c n /= 5; n 55 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c while (((n % 2) == 0) && ((d % 2) == 0)) { n 56 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c n /= 2; n 60 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c while (n > 0xffff || d > 0xffff) { n 61 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c n >>= 1; n 66 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c nvkm_debug(subdev, "numerator : %08x\n", n); n 68 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n); n 70 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n); n 33 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c u32 m = 1, n, d; n 37 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c n = f; n 39 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c while (n < (d * 2)) { n 40 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c n += (n / m); n 45 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c while (((n % 5) == 0) && ((d % 5) == 0)) { n 46 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c n /= 5; n 50 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c while (((n % 2) == 0) && ((d % 2) == 0)) { n 51 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c n /= 2; n 55 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c while (n > 0xffff || d > 0xffff) { n 56 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c n >>= 1; n 62 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c nvkm_debug(subdev, "numerator : %08x\n", n); n 64 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c nvkm_debug(subdev, "timer frequency : %dHz\n", (f * m) * d / n); n 67 drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n); n 147 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c int n = 0; n 150 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c if (info->engine >= 0 && info->runlist >= 0 && n++ == index) { n 317 drivers/gpu/drm/omapdrm/dss/base.c struct device_node *n; n 337 drivers/gpu/drm/omapdrm/dss/base.c n = of_get_child_by_name(node, "ports"); n 338 drivers/gpu/drm/omapdrm/dss/base.c if (!n) n 339 drivers/gpu/drm/omapdrm/dss/base.c n = of_get_child_by_name(node, "port"); n 340 drivers/gpu/drm/omapdrm/dss/base.c if (!n) n 343 drivers/gpu/drm/omapdrm/dss/base.c of_node_put(n); n 345 drivers/gpu/drm/omapdrm/dss/base.c n = NULL; n 346 drivers/gpu/drm/omapdrm/dss/base.c while ((n = of_graph_get_next_endpoint(node, n)) != NULL) { n 347 drivers/gpu/drm/omapdrm/dss/base.c struct device_node *pn = of_graph_get_remote_port_parent(n); n 37 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ n 38 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_BA0_OFFSET(n)) n 39 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_BA1(n) (DISPC_OVL_BASE(n) + \ n 40 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_BA1_OFFSET(n)) n 41 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_BA0_UV(n) (DISPC_OVL_BASE(n) + \ n 42 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_BA0_UV_OFFSET(n)) n 43 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_BA1_UV(n) (DISPC_OVL_BASE(n) + \ n 44 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_BA1_UV_OFFSET(n)) n 45 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_POSITION(n) (DISPC_OVL_BASE(n) + \ n 46 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_POS_OFFSET(n)) n 47 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_SIZE(n) (DISPC_OVL_BASE(n) + \ n 48 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_SIZE_OFFSET(n)) n 49 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_ATTRIBUTES(n) (DISPC_OVL_BASE(n) + \ n 50 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_ATTR_OFFSET(n)) n 51 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_ATTRIBUTES2(n) (DISPC_OVL_BASE(n) + \ n 52 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_ATTR2_OFFSET(n)) n 53 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIFO_THRESHOLD(n) (DISPC_OVL_BASE(n) + \ n 54 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIFO_THRESH_OFFSET(n)) n 55 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIFO_SIZE_STATUS(n) (DISPC_OVL_BASE(n) + \ n 56 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIFO_SIZE_STATUS_OFFSET(n)) n 57 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_ROW_INC(n) (DISPC_OVL_BASE(n) + \ n 58 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_ROW_INC_OFFSET(n)) n 59 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_PIXEL_INC(n) (DISPC_OVL_BASE(n) + \ n 60 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_PIX_INC_OFFSET(n)) n 61 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_WINDOW_SKIP(n) (DISPC_OVL_BASE(n) + \ n 62 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_WINDOW_SKIP_OFFSET(n)) n 63 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_TABLE_BA(n) (DISPC_OVL_BASE(n) + \ n 64 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_TABLE_BA_OFFSET(n)) n 65 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR(n) (DISPC_OVL_BASE(n) + \ n 66 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR_OFFSET(n)) n 67 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR2(n) (DISPC_OVL_BASE(n) + \ n 68 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR2_OFFSET(n)) n 69 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_PICTURE_SIZE(n) (DISPC_OVL_BASE(n) + \ n 70 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_PIC_SIZE_OFFSET(n)) n 71 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_ACCU0(n) (DISPC_OVL_BASE(n) + \ n 72 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_ACCU0_OFFSET(n)) n 73 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_ACCU1(n) (DISPC_OVL_BASE(n) + \ n 74 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_ACCU1_OFFSET(n)) n 75 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_ACCU2_0(n) (DISPC_OVL_BASE(n) + \ n 76 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_ACCU2_0_OFFSET(n)) n 77 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_ACCU2_1(n) (DISPC_OVL_BASE(n) + \ n 78 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_ACCU2_1_OFFSET(n)) n 79 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR_COEF_H(n, i) (DISPC_OVL_BASE(n) + \ n 80 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR_COEF_H_OFFSET(n, i)) n 81 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR_COEF_HV(n, i) (DISPC_OVL_BASE(n) + \ n 82 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR_COEF_HV_OFFSET(n, i)) n 83 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR_COEF_H2(n, i) (DISPC_OVL_BASE(n) + \ n 84 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR_COEF_H2_OFFSET(n, i)) n 85 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR_COEF_HV2(n, i) (DISPC_OVL_BASE(n) + \ n 86 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR_COEF_HV2_OFFSET(n, i)) n 87 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_CONV_COEF(n, i) (DISPC_OVL_BASE(n) + \ n 88 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_CONV_COEF_OFFSET(n, i)) n 89 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR_COEF_V(n, i) (DISPC_OVL_BASE(n) + \ n 90 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR_COEF_V_OFFSET(n, i)) n 91 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_FIR_COEF_V2(n, i) (DISPC_OVL_BASE(n) + \ n 92 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_FIR_COEF_V2_OFFSET(n, i)) n 93 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \ n 94 drivers/gpu/drm/omapdrm/dss/dispc.h DISPC_PRELOAD_OFFSET(n)) n 95 drivers/gpu/drm/omapdrm/dss/dispc.h #define DISPC_OVL_MFLAG_THRESHOLD(n) DISPC_MFLAG_THRESHOLD_OFFSET(n) n 190 drivers/gpu/drm/omapdrm/dss/dpi.c static bool dpi_calc_pll_cb(int n, int m, unsigned long fint, n 196 drivers/gpu/drm/omapdrm/dss/dpi.c ctx->pll_cinfo.n = n; n 79 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20)) n 80 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20)) n 81 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20)) n 82 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20)) n 83 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20)) n 84 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20)) n 85 drivers/gpu/drm/omapdrm/dss/dsi.c #define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20)) n 1400 drivers/gpu/drm/omapdrm/dss/dsi.c seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n); n 4331 drivers/gpu/drm/omapdrm/dss/dsi.c static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, n 4337 drivers/gpu/drm/omapdrm/dss/dsi.c ctx->dsi_cinfo.n = n; n 4632 drivers/gpu/drm/omapdrm/dss/dsi.c static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, n 4638 drivers/gpu/drm/omapdrm/dss/dsi.c ctx->dsi_cinfo.n = n; n 136 drivers/gpu/drm/omapdrm/dss/dss.h u16 n; n 441 drivers/gpu/drm/omapdrm/dss/dss.h typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint, n 213 drivers/gpu/drm/omapdrm/dss/hdmi.h u32 n; n 335 drivers/gpu/drm/omapdrm/dss/hdmi.h int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts); n 525 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0); n 526 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0); n 527 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0); n 678 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c int err, n, cts, channel_count; n 740 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); n 743 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c acore.n = n; n 142 drivers/gpu/drm/omapdrm/dss/hdmi4_core.h #define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110) n 147 drivers/gpu/drm/omapdrm/dss/hdmi4_core.h #define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190) n 152 drivers/gpu/drm/omapdrm/dss/hdmi4_core.h #define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210) n 157 drivers/gpu/drm/omapdrm/dss/hdmi4_core.h #define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290) n 158 drivers/gpu/drm/omapdrm/dss/hdmi4_core.h #define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300) n 160 drivers/gpu/drm/omapdrm/dss/hdmi4_core.h #define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380) n 189 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c int r, n, i; n 201 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c n = edid[0x7e]; n 203 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c if (n > max_ext_blocks) n 204 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c n = max_ext_blocks; n 206 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c for (i = 1; i <= n; i++) { n 652 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_AUD_N1, cfg->n, 7, 0); n 653 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_AUD_N2, cfg->n >> 8, 7, 0); n 654 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_AUD_N3, cfg->n >> 16, 3, 0); n 801 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c int err, n, cts, channel_count; n 844 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); n 845 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c core_cfg.n = n; n 104 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_VSDPAYLOAD(n) (n * 4 + 0x040C8) n 105 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_SPDVENDORNAME(n) (n * 4 + 0x04128) n 106 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_SPDPRODUCTNAME(n) (n * 4 + 0x04148) n 112 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_AUDSCHNLS(n) (n * 4 + 0x0419C) n 116 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_ACP(n) ((16-n) * 4 + 0x04208) n 118 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_ISCR1(n) ((16-n) * 4 + 0x0424C) n 119 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_ISCR2(n) ((15-n) * 4 + 0x0428C) n 125 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_RDRB(n) (n * 4 + 0x042E0) n 144 drivers/gpu/drm/omapdrm/dss/hdmi5_core.h #define HDMI_CORE_FC_GMD_PB(n) (n * 4 + 0x04414) n 52 drivers/gpu/drm/omapdrm/dss/hdmi_common.c int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts) n 57 drivers/gpu/drm/omapdrm/dss/hdmi_common.c if (n == NULL || cts == NULL) n 95 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 8192; n 98 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 12544; n 101 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 8192; n 104 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 25088; n 107 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 16384; n 110 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 50176; n 113 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 32768; n 121 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 4096; n 124 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 6272; n 127 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 6144; n 130 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 12544; n 133 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 12288; n 136 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 25088; n 139 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *n = 24576; n 146 drivers/gpu/drm/omapdrm/dss/hdmi_common.c *cts = (pclk/1000) * (*n / 128) * deep_color / (sample_freq / 10); n 111 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL); n 112 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c if (n) { n 113 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c n->node = node; n 114 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c n->root = root; n 115 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c list_add(&n->list, &dss_conv_list); n 121 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c struct dss_conv_node *n; n 123 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c list_for_each_entry(n, &dss_conv_list, list) { n 124 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c if (n->node == node) n 133 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c struct device_node *n; n 141 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c n = of_get_child_by_name(node, "ports"); n 142 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c if (!n) n 143 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c n = of_get_child_by_name(node, "port"); n 144 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c if (!n) n 147 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c of_node_put(n); n 149 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c n = NULL; n 150 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c while ((n = of_graph_get_next_endpoint(node, n)) != NULL) { n 153 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c pn = of_graph_get_remote_port_parent(n); n 207 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c struct dss_conv_node *n; n 209 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c n = list_first_entry(&dss_conv_list, struct dss_conv_node, n 212 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c if (of_match_node(omapdss_of_fixups_whitelist, n->node)) n 213 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c omapdss_omapify_node(n->node); n 215 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c list_del(&n->list); n 216 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c of_node_put(n->node); n 217 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c kfree(n); n 210 drivers/gpu/drm/omapdrm/dss/pll.c int n, n_start, n_stop, n_inc; n 232 drivers/gpu/drm/omapdrm/dss/pll.c for (n = n_start; n != n_stop; n += n_inc) { n 233 drivers/gpu/drm/omapdrm/dss/pll.c fint = clkin / n; n 250 drivers/gpu/drm/omapdrm/dss/pll.c if (func(n, m, fint, clkdco, data)) n 272 drivers/gpu/drm/omapdrm/dss/pll.c unsigned int n, m, mf, m2, sd; n 278 drivers/gpu/drm/omapdrm/dss/pll.c n = DIV_ROUND_UP(clkin, hw->fint_max); n 279 drivers/gpu/drm/omapdrm/dss/pll.c fint = clkin / n; n 307 drivers/gpu/drm/omapdrm/dss/pll.c n, m, mf, m2, sd); n 310 drivers/gpu/drm/omapdrm/dss/pll.c cinfo->n = n; n 401 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */ n 527 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */ n 173 drivers/gpu/drm/omapdrm/omap_connector.c int n; n 190 drivers/gpu/drm/omapdrm/omap_connector.c n = drm_add_edid_modes(connector, edid); n 195 drivers/gpu/drm/omapdrm/omap_connector.c return n; n 385 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c int n = i + roll; n 386 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c if (n >= npages) n 387 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c n -= npages; n 388 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c data[i] = (pages && pages[n]) ? n 389 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c page_to_phys(pages[n]) : engine->dmm->dummy_pa; n 83 drivers/gpu/drm/omapdrm/omap_fb.c const struct drm_format_info *format, int n, int x, int y) n 86 drivers/gpu/drm/omapdrm/omap_fb.c struct plane *plane = &omap_fb->planes[n]; n 89 drivers/gpu/drm/omapdrm/omap_fb.c offset = fb->offsets[n] n 90 drivers/gpu/drm/omapdrm/omap_fb.c + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub)) n 91 drivers/gpu/drm/omapdrm/omap_fb.c + (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub)); n 230 drivers/gpu/drm/omapdrm/omap_fb.c int ret, i, n = fb->format->num_planes; n 240 drivers/gpu/drm/omapdrm/omap_fb.c for (i = 0; i < n; i++) { n 270 drivers/gpu/drm/omapdrm/omap_fb.c int i, n = fb->format->num_planes; n 281 drivers/gpu/drm/omapdrm/omap_fb.c for (i = 0; i < n; i++) { n 293 drivers/gpu/drm/omapdrm/omap_fb.c int i, n = fb->format->num_planes; n 298 drivers/gpu/drm/omapdrm/omap_fb.c for (i = 0; i < n; i++) { n 300 drivers/gpu/drm/omapdrm/omap_fb.c i, fb->offsets[n], fb->pitches[i]); n 171 drivers/gpu/drm/omapdrm/omap_gem.c int n = priv->usergart[fmt].height; n 172 drivers/gpu/drm/omapdrm/omap_gem.c size_t size = PAGE_SIZE * n; n 180 drivers/gpu/drm/omapdrm/omap_gem.c for (i = n; i > 0; i--) { n 391 drivers/gpu/drm/omapdrm/omap_gem.c const int n = priv->usergart[fmt].height; n 433 drivers/gpu/drm/omapdrm/omap_gem.c slots = min(slots - (off << n_shift), n); n 448 drivers/gpu/drm/omapdrm/omap_gem.c sizeof(struct page *) * (n - slots)); n 462 drivers/gpu/drm/omapdrm/omap_gem.c for (i = n; i > 0; i--) { n 211 drivers/gpu/drm/omapdrm/omap_irq.c struct omap_irq_wait *wait, *n; n 242 drivers/gpu/drm/omapdrm/omap_irq.c list_for_each_entry_safe(wait, n, &priv->wait_list, node) { n 26 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_RAMCTRL_EPF(n) (((n) & 3) << 4) n 30 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_RGBCTRL_RCM(n) (((n) & 3) << 5) n 34 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_RGBCTRL_VBP(n) ((n) & 0x7f) n 35 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_RGBCTRL_HBP(n) ((n) & 0x1f) n 38 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PORCTRL_IDLE_BP(n) (((n) & 0xf) << 4) n 39 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PORCTRL_IDLE_FP(n) ((n) & 0xf) n 40 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PORCTRL_PARTIAL_BP(n) (((n) & 0xf) << 4) n 41 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PORCTRL_PARTIAL_FP(n) ((n) & 0xf) n 44 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_GCTRL_VGHS(n) (((n) & 7) << 4) n 45 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_GCTRL_VGLS(n) ((n) & 7) n 65 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PWCTRL1_AVDD(n) (((n) & 3) << 6) n 66 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PWCTRL1_AVCL(n) (((n) & 3) << 4) n 67 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PWCTRL1_VDS(n) ((n) & 3) n 70 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_JP0(n) (((n) & 3) << 4) n 71 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_JP1(n) (((n) & 3) << 4) n 72 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP0(n) ((n) & 0xf) n 73 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP1(n) ((n) & 0x3f) n 74 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP2(n) ((n) & 0x3f) n 75 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP4(n) ((n) & 0x1f) n 76 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP6(n) ((n) & 0x1f) n 77 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP13(n) ((n) & 0xf) n 78 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP20(n) ((n) & 0x7f) n 79 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP27(n) ((n) & 7) n 80 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP36(n) (((n) & 7) << 4) n 81 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP43(n) ((n) & 0x7f) n 82 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP50(n) ((n) & 0xf) n 83 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP57(n) ((n) & 0x1f) n 84 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP59(n) ((n) & 0x1f) n 85 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP61(n) ((n) & 0x3f) n 86 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP62(n) ((n) & 0x3f) n 87 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_PVGAMCTRL_VP63(n) (((n) & 0xf) << 4) n 90 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_JN0(n) (((n) & 3) << 4) n 91 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_JN1(n) (((n) & 3) << 4) n 92 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN0(n) ((n) & 0xf) n 93 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN1(n) ((n) & 0x3f) n 94 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN2(n) ((n) & 0x3f) n 95 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN4(n) ((n) & 0x1f) n 96 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN6(n) ((n) & 0x1f) n 97 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN13(n) ((n) & 0xf) n 98 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN20(n) ((n) & 0x7f) n 99 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN27(n) ((n) & 7) n 100 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN36(n) (((n) & 7) << 4) n 101 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN43(n) ((n) & 0x7f) n 102 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN50(n) ((n) & 0xf) n 103 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN57(n) ((n) & 0x1f) n 104 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN59(n) ((n) & 0x1f) n 105 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN61(n) ((n) & 0x3f) n 106 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN62(n) ((n) & 0x3f) n 107 drivers/gpu/drm/panel/panel-sitronix-st7789v.c #define ST7789V_NVGAMCTRL_VN63(n) (((n) & 0xf) << 4) n 535 drivers/gpu/drm/panfrost/panfrost_drv.c #define PANFROST_IOCTL(n, func, flags) \ n 536 drivers/gpu/drm/panfrost/panfrost_drv.c DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags) n 79 drivers/gpu/drm/panfrost/panfrost_regs.h #define GPU_TEXTURE_FEATURES(n) (0x0B0 + ((n) * 4)) n 80 drivers/gpu/drm/panfrost/panfrost_regs.h #define GPU_JS_FEATURES(n) (0x0C0 + ((n) * 4)) n 222 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_HEAD_LO(n) (JS_BASE + ((n) * 0x80) + 0x00) n 223 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_HEAD_HI(n) (JS_BASE + ((n) * 0x80) + 0x04) n 224 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_TAIL_LO(n) (JS_BASE + ((n) * 0x80) + 0x08) n 225 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_TAIL_HI(n) (JS_BASE + ((n) * 0x80) + 0x0c) n 226 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_AFFINITY_LO(n) (JS_BASE + ((n) * 0x80) + 0x10) n 227 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_AFFINITY_HI(n) (JS_BASE + ((n) * 0x80) + 0x14) n 228 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_CONFIG(n) (JS_BASE + ((n) * 0x80) + 0x18) n 229 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_XAFFINITY(n) (JS_BASE + ((n) * 0x80) + 0x1c) n 230 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_COMMAND(n) (JS_BASE + ((n) * 0x80) + 0x20) n 231 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_STATUS(n) (JS_BASE + ((n) * 0x80) + 0x24) n 232 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x40) n 233 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x44) n 234 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x50) n 235 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x54) n 236 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x58) n 237 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x60) n 238 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x70) n 249 drivers/gpu/drm/panfrost/panfrost_regs.h #define JS_CONFIG_THREAD_PRI(n) ((n) << 16) n 306 drivers/gpu/drm/qxl/qxl_object.c struct qxl_bo *bo, *n; n 311 drivers/gpu/drm/qxl/qxl_object.c list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { n 872 drivers/gpu/drm/r128/r128_cce.c int r128_wait_ring(drm_r128_private_t *dev_priv, int n) n 879 drivers/gpu/drm/r128/r128_cce.c if (ring->space >= n) n 162 drivers/gpu/drm/r128/r128_drv.h extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n); n 418 drivers/gpu/drm/r128/r128_drv.h #define CCE_PACKET0(reg, n) (R128_CCE_PACKET0 | \ n 419 drivers/gpu/drm/r128/r128_drv.h ((n) << 16) | ((reg) >> 2)) n 423 drivers/gpu/drm/r128/r128_drv.h #define CCE_PACKET3(pkt, n) (R128_CCE_PACKET3 | \ n 424 drivers/gpu/drm/r128/r128_drv.h (pkt) | ((n) << 16)) n 489 drivers/gpu/drm/r128/r128_drv.h #define BEGIN_RING(n) do { \ n 491 drivers/gpu/drm/r128/r128_drv.h DRM_INFO("BEGIN_RING(%d)\n", (n)); \ n 492 drivers/gpu/drm/r128/r128_drv.h if (dev_priv->ring.space <= (n) * sizeof(u32)) { \ n 494 drivers/gpu/drm/r128/r128_drv.h r128_wait_ring(dev_priv, (n) * sizeof(u32)); \ n 496 drivers/gpu/drm/r128/r128_drv.h _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ n 101 drivers/gpu/drm/r128/r128_ioc32.c int n; n 118 drivers/gpu/drm/r128/r128_ioc32.c depth.n = depth32.n; n 901 drivers/gpu/drm/r128/r128_state.c count = depth->n; n 910 drivers/gpu/drm/r128/r128_state.c buffer_size = depth->n * sizeof(u32); n 915 drivers/gpu/drm/r128/r128_state.c mask_size = depth->n; n 986 drivers/gpu/drm/r128/r128_state.c count = depth->n; n 1000 drivers/gpu/drm/r128/r128_state.c buffer_size = depth->n * sizeof(u32); n 1009 drivers/gpu/drm/r128/r128_state.c mask_size = depth->n; n 1080 drivers/gpu/drm/r128/r128_state.c count = depth->n; n 1122 drivers/gpu/drm/r128/r128_state.c count = depth->n; n 94 drivers/gpu/drm/radeon/atom.c static void debug_print_spaces(int n) n 96 drivers/gpu/drm/radeon/atom.c while (n--) n 184 drivers/gpu/drm/radeon/ci_dpm.c static int ci_set_power_limit(struct radeon_device *rdev, u32 n); n 1698 drivers/gpu/drm/radeon/ci_dpm.c static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) n 1704 drivers/gpu/drm/radeon/ci_dpm.c ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); n 1712 drivers/gpu/drm/radeon/ci_dpm.c static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) n 1718 drivers/gpu/drm/radeon/ci_dpm.c ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); n 1726 drivers/gpu/drm/radeon/ci_dpm.c static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n) n 1732 drivers/gpu/drm/radeon/ci_dpm.c ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n); n 1740 drivers/gpu/drm/radeon/ci_dpm.c static int ci_set_power_limit(struct radeon_device *rdev, u32 n) n 1746 drivers/gpu/drm/radeon/ci_dpm.c ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n); n 1682 drivers/gpu/drm/radeon/cikd.h #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ n 1684 drivers/gpu/drm/radeon/cikd.h ((n) & 0x3FFF) << 16) n 1691 drivers/gpu/drm/radeon/cikd.h #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ n 1693 drivers/gpu/drm/radeon/cikd.h ((n) & 0x3FFF) << 16) n 1695 drivers/gpu/drm/radeon/cikd.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 1416 drivers/gpu/drm/radeon/evergreend.h #define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \ n 1418 drivers/gpu/drm/radeon/evergreend.h (((n) & 0xFFFFF) << 0)) n 1534 drivers/gpu/drm/radeon/evergreend.h #define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ n 1536 drivers/gpu/drm/radeon/evergreend.h ((n) & 0x3FFF) << 16) n 1543 drivers/gpu/drm/radeon/evergreend.h #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ n 1545 drivers/gpu/drm/radeon/evergreend.h ((n) & 0x3FFF) << 16) n 146 drivers/gpu/drm/radeon/mkregtable.c unsigned nlloop, i, j, n, c, id; n 153 drivers/gpu/drm/radeon/mkregtable.c n = 4; n 154 drivers/gpu/drm/radeon/mkregtable.c if (n > c) n 155 drivers/gpu/drm/radeon/mkregtable.c n = c; n 156 drivers/gpu/drm/radeon/mkregtable.c c -= n; n 157 drivers/gpu/drm/radeon/mkregtable.c for (j = 0; j < n; j++) { n 1397 drivers/gpu/drm/radeon/ni_dpm.c u64 tmp, n, d; n 1422 drivers/gpu/drm/radeon/ni_dpm.c n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90); n 1424 drivers/gpu/drm/radeon/ni_dpm.c tmp = div64_u64(n, d); n 1148 drivers/gpu/drm/radeon/nid.h #define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ n 1150 drivers/gpu/drm/radeon/nid.h ((n) & 0x3FFF) << 16) n 1157 drivers/gpu/drm/radeon/nid.h #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ n 1159 drivers/gpu/drm/radeon/nid.h ((n) & 0x3FFF) << 16) n 1337 drivers/gpu/drm/radeon/nid.h #define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ n 1340 drivers/gpu/drm/radeon/nid.h (((n) & 0xFFFFF) << 0)) n 1342 drivers/gpu/drm/radeon/nid.h #define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \ n 1344 drivers/gpu/drm/radeon/nid.h (((n) & 0xFFFFF) << 0)) n 1346 drivers/gpu/drm/radeon/nid.h #define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \ n 1349 drivers/gpu/drm/radeon/nid.h (((n) & 0xFFFFF) << 0)) n 1367 drivers/gpu/drm/radeon/r100.c const unsigned *auth, unsigned n, n 1382 drivers/gpu/drm/radeon/r100.c if ((reg >> 7) > n) { n 1386 drivers/gpu/drm/radeon/r100.c if (((reg + (pkt->count << 2)) >> 7) > n) { n 2469 drivers/gpu/drm/radeon/r100.c static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) n 2476 drivers/gpu/drm/radeon/r100.c if (tmp >= n) { n 59 drivers/gpu/drm/radeon/r100d.h #define PACKET0(reg, n) (CP_PACKET0 | \ n 61 drivers/gpu/drm/radeon/r100d.h REG_SET(PACKET0_COUNT, (n))) n 63 drivers/gpu/drm/radeon/r100d.h #define PACKET3(op, n) (CP_PACKET3 | \ n 65 drivers/gpu/drm/radeon/r100d.h REG_SET(PACKET3_COUNT, (n))) n 60 drivers/gpu/drm/radeon/r300d.h #define PACKET0(reg, n) (CP_PACKET0 | \ n 62 drivers/gpu/drm/radeon/r300d.h REG_SET(PACKET0_COUNT, (n))) n 64 drivers/gpu/drm/radeon/r300d.h #define PACKET3(op, n) (CP_PACKET3 | \ n 66 drivers/gpu/drm/radeon/r300d.h REG_SET(PACKET3_COUNT, (n))) n 645 drivers/gpu/drm/radeon/r600d.h #define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ n 648 drivers/gpu/drm/radeon/r600d.h (((n) & 0xFFFF) << 0)) n 1584 drivers/gpu/drm/radeon/r600d.h #define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ n 1586 drivers/gpu/drm/radeon/r600d.h ((n) & 0x3FFF) << 16) n 1587 drivers/gpu/drm/radeon/r600d.h #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ n 1589 drivers/gpu/drm/radeon/r600d.h ((n) & 0x3FFF) << 16) n 2958 drivers/gpu/drm/radeon/radeon.h unsigned n); n 162 drivers/gpu/drm/radeon/radeon_acpi.c static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask) n 164 drivers/gpu/drm/radeon/radeon_acpi.c n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; n 165 drivers/gpu/drm/radeon/radeon_acpi.c n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; n 166 drivers/gpu/drm/radeon/radeon_acpi.c n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; n 167 drivers/gpu/drm/radeon/radeon_acpi.c n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; n 168 drivers/gpu/drm/radeon/radeon_acpi.c n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; n 169 drivers/gpu/drm/radeon/radeon_acpi.c n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; n 170 drivers/gpu/drm/radeon/radeon_acpi.c n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; n 171 drivers/gpu/drm/radeon/radeon_acpi.c n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; n 172 drivers/gpu/drm/radeon/radeon_acpi.c n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; n 258 drivers/gpu/drm/radeon/radeon_acpi.c struct radeon_atif_notification_cfg *n) n 286 drivers/gpu/drm/radeon/radeon_acpi.c n->enabled = false; n 287 drivers/gpu/drm/radeon/radeon_acpi.c n->command_code = 0; n 289 drivers/gpu/drm/radeon/radeon_acpi.c n->enabled = true; n 290 drivers/gpu/drm/radeon/radeon_acpi.c n->command_code = 0x81; n 296 drivers/gpu/drm/radeon/radeon_acpi.c n->enabled = true; n 297 drivers/gpu/drm/radeon/radeon_acpi.c n->command_code = params.command_code; n 302 drivers/gpu/drm/radeon/radeon_acpi.c (n->enabled ? "enabled" : "disabled"), n 303 drivers/gpu/drm/radeon/radeon_acpi.c n->command_code); n 126 drivers/gpu/drm/radeon/radeon_asic.h const unsigned *auth, unsigned n, n 551 drivers/gpu/drm/radeon/radeon_audio.c int n, cts; n 555 drivers/gpu/drm/radeon/radeon_audio.c n = 128 * freq; n 559 drivers/gpu/drm/radeon/radeon_audio.c div = gcd(n, cts); n 561 drivers/gpu/drm/radeon/radeon_audio.c n /= div; n 568 drivers/gpu/drm/radeon/radeon_audio.c mul = ((128*freq/1000) + (n-1))/n; n 570 drivers/gpu/drm/radeon/radeon_audio.c n *= mul; n 574 drivers/gpu/drm/radeon/radeon_audio.c if (n < (128*freq/1500)) n 576 drivers/gpu/drm/radeon/radeon_audio.c if (n > (128*freq/300)) n 579 drivers/gpu/drm/radeon/radeon_audio.c *N = n; n 37 drivers/gpu/drm/radeon/radeon_benchmark.c int flag, int n, n 46 drivers/gpu/drm/radeon/radeon_benchmark.c for (i = 0; i < n; i++) { n 75 drivers/gpu/drm/radeon/radeon_benchmark.c static void radeon_benchmark_log_results(int n, unsigned size, n 80 drivers/gpu/drm/radeon/radeon_benchmark.c unsigned int throughput = (n * (size >> 10)) / time; n 83 drivers/gpu/drm/radeon/radeon_benchmark.c kind, n, size >> 10, sdomain, ddomain, time, n 93 drivers/gpu/drm/radeon/radeon_benchmark.c int r, n; n 96 drivers/gpu/drm/radeon/radeon_benchmark.c n = RADEON_BENCHMARK_ITERATIONS; n 124 drivers/gpu/drm/radeon/radeon_benchmark.c RADEON_BENCHMARK_COPY_DMA, n, n 129 drivers/gpu/drm/radeon/radeon_benchmark.c radeon_benchmark_log_results(n, size, time, n 135 drivers/gpu/drm/radeon/radeon_benchmark.c RADEON_BENCHMARK_COPY_BLIT, n, n 140 drivers/gpu/drm/radeon/radeon_benchmark.c radeon_benchmark_log_results(n, size, time, n 1339 drivers/gpu/drm/radeon/radeon_combios.c int i, n; n 1348 drivers/gpu/drm/radeon/radeon_combios.c n = RBIOS8(tmds_info + 5) + 1; n 1349 drivers/gpu/drm/radeon/radeon_combios.c if (n > 4) n 1350 drivers/gpu/drm/radeon/radeon_combios.c n = 4; n 1351 drivers/gpu/drm/radeon/radeon_combios.c for (i = 0; i < n; i++) { n 1362 drivers/gpu/drm/radeon/radeon_combios.c n = RBIOS8(tmds_info + 5) + 1; n 1363 drivers/gpu/drm/radeon/radeon_combios.c if (n > 4) n 1364 drivers/gpu/drm/radeon/radeon_combios.c n = 4; n 1365 drivers/gpu/drm/radeon/radeon_combios.c for (i = 0; i < n; i++) { n 1098 drivers/gpu/drm/radeon/radeon_display.c static inline uint32_t radeon_div(uint64_t n, uint32_t d) n 1102 drivers/gpu/drm/radeon/radeon_display.c n += d / 2; n 1104 drivers/gpu/drm/radeon/radeon_display.c mod = do_div(n, d); n 1105 drivers/gpu/drm/radeon/radeon_display.c return n; n 244 drivers/gpu/drm/radeon/radeon_i2c.c u8 n, m, loop; n 266 drivers/gpu/drm/radeon/radeon_i2c.c n = loop - 1; n 268 drivers/gpu/drm/radeon/radeon_i2c.c prescale = m | (n << 8); n 570 drivers/gpu/drm/radeon/radeon_irq_kms.c bool enable, const char *name, unsigned n) n 579 drivers/gpu/drm/radeon/radeon_irq_kms.c DRM_DEBUG("%s%d interrupts enabled\n", name, n); n 582 drivers/gpu/drm/radeon/radeon_irq_kms.c DRM_DEBUG("%s%d interrupts disabled\n", name, n); n 542 drivers/gpu/drm/radeon/radeon_legacy_tv.c uint32_t m, n, p; n 687 drivers/gpu/drm/radeon/radeon_legacy_tv.c n = NTSC_TV_PLL_N_27; n 691 drivers/gpu/drm/radeon/radeon_legacy_tv.c n = NTSC_TV_PLL_N_14; n 697 drivers/gpu/drm/radeon/radeon_legacy_tv.c n = PAL_TV_PLL_N_27; n 701 drivers/gpu/drm/radeon/radeon_legacy_tv.c n = PAL_TV_PLL_N_14; n 708 drivers/gpu/drm/radeon/radeon_legacy_tv.c ((n & RADEON_TV_N0LO_MASK) << RADEON_TV_N0LO_SHIFT) | n 709 drivers/gpu/drm/radeon/radeon_legacy_tv.c (((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) | n 437 drivers/gpu/drm/radeon/radeon_object.c struct radeon_bo *bo, *n; n 443 drivers/gpu/drm/radeon/radeon_object.c list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { n 151 drivers/gpu/drm/radeon/radeon_pm.c struct radeon_bo *bo, *n; n 156 drivers/gpu/drm/radeon/radeon_pm.c list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { n 40 drivers/gpu/drm/radeon/radeon_test.c unsigned n, size; n 60 drivers/gpu/drm/radeon/radeon_test.c n = rdev->mc.gtt_size - rdev->gart_pin_size; n 61 drivers/gpu/drm/radeon/radeon_test.c n /= size; n 63 drivers/gpu/drm/radeon/radeon_test.c gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); n 65 drivers/gpu/drm/radeon/radeon_test.c DRM_ERROR("Failed to allocate %d pointers\n", n); n 84 drivers/gpu/drm/radeon/radeon_test.c for (i = 0; i < n; i++) { n 200 drivers/gpu/drm/radeon/rv515d.h #define PACKET0(reg, n) (CP_PACKET0 | \ n 202 drivers/gpu/drm/radeon/rv515d.h REG_SET(PACKET0_COUNT, (n))) n 204 drivers/gpu/drm/radeon/rv515d.h #define PACKET3(op, n) (CP_PACKET3 | \ n 206 drivers/gpu/drm/radeon/rv515d.h REG_SET(PACKET3_COUNT, (n))) n 663 drivers/gpu/drm/radeon/rv770d.h #define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ n 666 drivers/gpu/drm/radeon/rv770d.h (((n) & 0xFFFF) << 0)) n 985 drivers/gpu/drm/radeon/rv770d.h #define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ n 987 drivers/gpu/drm/radeon/rv770d.h ((n) & 0x3FFF) << 16) n 988 drivers/gpu/drm/radeon/rv770d.h #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ n 990 drivers/gpu/drm/radeon/rv770d.h ((n) & 0x3FFF) << 16) n 2261 drivers/gpu/drm/radeon/si_dpm.c u64 pwr_efficiency_ratio, n, d; n 2266 drivers/gpu/drm/radeon/si_dpm.c n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); n 2268 drivers/gpu/drm/radeon/si_dpm.c pwr_efficiency_ratio = div64_u64(n, d); n 1586 drivers/gpu/drm/radeon/sid.h #define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \ n 1588 drivers/gpu/drm/radeon/sid.h ((n) & 0x3FFF) << 16) n 1595 drivers/gpu/drm/radeon/sid.h #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ n 1597 drivers/gpu/drm/radeon/sid.h ((n) & 0x3FFF) << 16) n 1599 drivers/gpu/drm/radeon/sid.h #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) n 1853 drivers/gpu/drm/radeon/sid.h #define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ n 1857 drivers/gpu/drm/radeon/sid.h (((n) & 0xFFFFF) << 0)) n 1859 drivers/gpu/drm/radeon/sid.h #define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \ n 1861 drivers/gpu/drm/radeon/sid.h (((n) & 0xFFFFF) << 0)) n 1863 drivers/gpu/drm/radeon/sid.h #define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \ n 1866 drivers/gpu/drm/radeon/sid.h (((n) & 0xFFFFF) << 0)) n 1597 drivers/gpu/drm/radeon/sumo_dpm.c u32 n = 0; n 1602 drivers/gpu/drm/radeon/sumo_dpm.c sclk_voltage_mapping_table->entries[n].sclk_frequency = n 1604 drivers/gpu/drm/radeon/sumo_dpm.c sclk_voltage_mapping_table->entries[n].vid_2bit = n 1607 drivers/gpu/drm/radeon/sumo_dpm.c n++; n 1611 drivers/gpu/drm/radeon/sumo_dpm.c sclk_voltage_mapping_table->num_max_dpm_entries = n; n 125 drivers/gpu/drm/radeon/trinity_dpm.h int trinity_dpm_force_state(struct radeon_device *rdev, u32 n); n 126 drivers/gpu/drm/radeon/trinity_dpm.h int trinity_dpm_n_levels_disabled(struct radeon_device *rdev, u32 n); n 73 drivers/gpu/drm/radeon/trinity_smc.c int trinity_dpm_force_state(struct radeon_device *rdev, u32 n) n 75 drivers/gpu/drm/radeon/trinity_smc.c WREG32_SMC(SMU_SCRATCH0, n); n 80 drivers/gpu/drm/radeon/trinity_smc.c int trinity_dpm_n_levels_disabled(struct radeon_device *rdev, u32 n) n 82 drivers/gpu/drm/radeon/trinity_smc.c WREG32_SMC(SMU_SCRATCH0, n); n 78 drivers/gpu/drm/rcar-du/rcar_du_crtc.c unsigned int n; n 91 drivers/gpu/drm/rcar-du/rcar_du_crtc.c unsigned int n; n 122 drivers/gpu/drm/rcar-du/rcar_du_crtc.c for (n = 119; n > 38; n--) { n 132 drivers/gpu/drm/rcar-du/rcar_du_crtc.c unsigned long fout = input * (n + 1) / (m + 1); n 147 drivers/gpu/drm/rcar-du/rcar_du_crtc.c dpll->n = n; n 162 drivers/gpu/drm/rcar-du/rcar_du_crtc.c dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff); n 249 drivers/gpu/drm/rcar-du/rcar_du_crtc.c | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m) n 72 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DSSR_DFB(n) (1 << ((n)+15)) n 78 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DSSR_ADC(n) (1 << ((n)-1)) n 86 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DSRCR_ADCL(n) (1 << ((n)-1)) n 95 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DIER_ADCE(n) (1 << ((n)-1)) n 104 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPPR_DPE(n) (1 << ((n)*4-1)) n 105 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPPR_DPS(n, p) (((p)-1) << DPPR_DPS_SHIFT(n)) n 106 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPPR_DPS_SHIFT(n) (((n)-1)*4) n 154 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB2_DSA0(n) (0 << ((n)*2+16)) n 155 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB2_DSA1(n) (1 << ((n)*2+16)) n 156 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB2_DSA2(n) (2 << ((n)*2+16)) n 157 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB2_INIT(n) (3 << ((n)*2+16)) n 158 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB2_MASK(n) (3 << ((n)*2+16)) n 159 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB_DSA0(n) (0 << ((n)*2)) n 160 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB_DSA1(n) (1 << ((n)*2)) n 161 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB_DSA2(n) (2 << ((n)*2)) n 162 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB_INIT(n) (3 << ((n)*2)) n 163 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DVCSR_VCnFB_MASK(n) (3 << ((n)*2)) n 211 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DD1SSR_ADC(n) (1 << ((n)-1)) n 220 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DD1SRCR_ADC(n) (1 << ((n)-1)) n 229 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DD1IER_ADC(n) (1 << ((n)-1)) n 234 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DEFR8_DRGBS_DU(n) ((n) << 4) n 255 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2)) n 256 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2)) n 257 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2)) n 258 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2)) n 259 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2)) n 260 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DIDSR_PDCS_MASK(n) (3 << ((n) * 2)) n 287 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPLLCR_FDPLL(n) ((n) << 12) n 288 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPLLCR_N(n) ((n) << 5) n 289 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPLLCR_M(n) ((n) << 3) n 297 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPLLC2R_M(n) ((n) << 8) n 298 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPLLC2R_FDPLL(n) ((n) << 0) n 529 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPTSR_PnDK(n) (1 << ((n) + 16)) n 530 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DPTSR_PnTS(n) (1 << (n)) n 533 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DAPTSR_APnDK(n) (1 << ((n) + 16)) n 534 drivers/gpu/drm/rcar-du/rcar_du_regs.h #define DAPTSR_APnTS(n) (1 << (n)) n 252 drivers/gpu/drm/rcar-du/rcar_lvds.c unsigned int n; n 265 drivers/gpu/drm/rcar-du/rcar_lvds.c for (n = n_min; n < n_max; ++n) { n 277 drivers/gpu/drm/rcar-du/rcar_lvds.c fvco = fpfd * n; n 297 drivers/gpu/drm/rcar-du/rcar_lvds.c pll->pll_n = n; n 26 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2)) n 52 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDPLLCR_CKSEL_DU_DOTCLKIN(n) ((5 + (n) * 2) << 17) n 57 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDPLLCR_PLLE(n) ((n) << 10) n 58 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDPLLCR_PLLN(n) ((n) << 3) n 59 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDPLLCR_PLLM(n) ((n) << 0) n 86 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4)) n 87 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4)) n 98 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDSCR_DEPTH(n) (((n) - 1) << 29) n 100 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDSCR_TWGCNT(n) ((((n) - 256) / 16) << 24) n 101 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDSCR_SDIV(n) ((n) << 22) n 109 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDDIV_DIV(n) ((n) << 0) n 38 drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c #define N_LANES(n) ((((n) - 1) & 0x3) << 0) n 54 drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c #define PHY_TESTDOUT(n) (((n) & 0xff) << 8) n 55 drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c #define PHY_TESTDIN(n) (((n) & 0xff) << 0) n 48 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VIDEO_INPUT_FORMAT(n) (n << 1) n 61 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6) n 62 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VIDEO_INPUT_BITS(n) (n << 4) n 63 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VIDEO_INPUT_CSP(n) (n << 0) n 73 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VIDEO_AUTO_CSC(n) (n << 7) n 75 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VIDEO_C0_C2_SWAP(n) (n << 0) n 88 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4) n 101 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_AVMUTE_CLEAR(n) (n << 7) n 102 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_AVMUTE_ENABLE(n) (n << 6) n 103 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_AUDIO_MUTE(n) (n << 1) n 104 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VIDEO_MUTE(n) (n << 0) n 107 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_HSYNC_POLARITY(n) (n << 3) n 108 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_VSYNC_POLARITY(n) (n << 2) n 109 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_INETLACE(n) (n << 1) n 110 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_EXTERANL_VIDEO(n) (n << 0) n 133 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_CTS_SOURCE(n) (n << 7) n 140 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_DOWN_SAMPLE(n) (n << 5) n 146 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_AUDIO_SOURCE(n) (n << 3) n 148 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_MCLK_ENABLE(n) (n << 2) n 155 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_MCLK_RATIO(n) (n) n 175 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_I2S_CHANNEL(n) ((n) << 2) n 181 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_I2S_MODE(n) (n) n 185 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_SPIDF_FREQ(n) (n) n 201 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7) n 224 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_PACKET_GCP_EN(n) ((n & 1) << 7) n 225 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_PACKET_MSI_EN(n) ((n & 1) << 6) n 226 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_PACKET_SDI_EN(n) ((n & 1) << 5) n 227 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_PACKET_VSI_EN(n) ((n & 1) << 4) n 260 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_HDMI_DVI(n) (n << 1) n 277 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5) n 303 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_CLK_CHG_PWR(n) ((n & 1) << 3) n 304 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_DATA_CHG_PWR(n) ((n & 7) << 0) n 307 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_CLK_MAIN_DRIVER(n) (n << 4) n 308 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_DATA_MAIN_DRIVER(n) (n << 0) n 311 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_PRE_EMPHASIS(n) ((n & 7) << 4) n 312 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_CLK_PRE_DRIVER(n) ((n & 3) << 2) n 313 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_DATA_PRE_DRIVER(n) ((n & 3) << 0) n 316 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_FEEDBACK_DIV_LOW(n) (n & 0xff) n 318 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_FEEDBACK_DIV_HIGH(n) (n & 1) n 321 drivers/gpu/drm/rockchip/inno_hdmi.h #define v_PRE_DIV_RATIO(n) (n & 0x1f) n 47 drivers/gpu/drm/savage/savage_bci.c savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) n 55 drivers/gpu/drm/savage/savage_bci.c if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) n 57 drivers/gpu/drm/savage/savage_bci.c "(more than guaranteed space in COB)\n", n); n 76 drivers/gpu/drm/savage/savage_bci.c savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n) n 78 drivers/gpu/drm/savage/savage_bci.c uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; n 97 drivers/gpu/drm/savage/savage_bci.c savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n) n 99 drivers/gpu/drm/savage/savage_bci.c uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; n 372 drivers/gpu/drm/savage/savage_bci.c uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n) n 377 drivers/gpu/drm/savage/savage_bci.c unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / n 383 drivers/gpu/drm/savage/savage_bci.c cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); n 388 drivers/gpu/drm/savage/savage_bci.c if (n < rest) n 389 drivers/gpu/drm/savage/savage_bci.c rest = n; n 391 drivers/gpu/drm/savage/savage_bci.c n -= rest; n 396 drivers/gpu/drm/savage/savage_bci.c (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; n 412 drivers/gpu/drm/savage/savage_bci.c if (n > SAVAGE_DMA_PAGE_SIZE) n 415 drivers/gpu/drm/savage/savage_bci.c dev_priv->dma_pages[i].used = n; n 416 drivers/gpu/drm/savage/savage_bci.c n -= SAVAGE_DMA_PAGE_SIZE; n 421 drivers/gpu/drm/savage/savage_bci.c i, dev_priv->dma_pages[i].used, n); n 193 drivers/gpu/drm/savage/savage_drv.h int (*wait_fifo) (struct drm_savage_private * dev_priv, unsigned int n); n 213 drivers/gpu/drm/savage/savage_drv.h unsigned int n); n 461 drivers/gpu/drm/savage/savage_drv.h #define BCI_SET_REGISTERS( first, n ) \ n 463 drivers/gpu/drm/savage/savage_drv.h ((uint32_t)(n) & 0xff) << 16 | \ n 465 drivers/gpu/drm/savage/savage_drv.h #define DMA_SET_REGISTERS( first, n ) \ n 467 drivers/gpu/drm/savage/savage_drv.h ((uint32_t)(n) & 0xff) << 16 | \ n 470 drivers/gpu/drm/savage/savage_drv.h #define BCI_DRAW_PRIMITIVE(n, type, skip) \ n 472 drivers/gpu/drm/savage/savage_drv.h ((n) << 16)) n 473 drivers/gpu/drm/savage/savage_drv.h #define DMA_DRAW_PRIMITIVE(n, type, skip) \ n 475 drivers/gpu/drm/savage/savage_drv.h ((n) << 16)) n 477 drivers/gpu/drm/savage/savage_drv.h #define BCI_DRAW_INDICES_S3D(n, type, i0) \ n 479 drivers/gpu/drm/savage/savage_drv.h ((n) << 16) | (i0)) n 481 drivers/gpu/drm/savage/savage_drv.h #define BCI_DRAW_INDICES_S4(n, type, skip) \ n 483 drivers/gpu/drm/savage/savage_drv.h (skip) | ((n) << 16)) n 485 drivers/gpu/drm/savage/savage_drv.h #define BCI_DMA(n) \ n 486 drivers/gpu/drm/savage/savage_drv.h BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1)) n 503 drivers/gpu/drm/savage/savage_drv.h #define BEGIN_BCI( n ) do { \ n 504 drivers/gpu/drm/savage/savage_drv.h dev_priv->wait_fifo(dev_priv, (n)); \ n 517 drivers/gpu/drm/savage/savage_drv.h #define BEGIN_DMA( n ) do { \ n 521 drivers/gpu/drm/savage/savage_drv.h if ((n) > rest) { \ n 522 drivers/gpu/drm/savage/savage_drv.h dma_ptr = savage_dma_alloc(dev_priv, (n)); \ n 529 drivers/gpu/drm/savage/savage_drv.h dev_priv->dma_pages[cur].used += (n); \ n 535 drivers/gpu/drm/savage/savage_drv.h #define DMA_COPY(src, n) do { \ n 536 drivers/gpu/drm/savage/savage_drv.h memcpy(dma_ptr, (src), (n)*4); \ n 537 drivers/gpu/drm/savage/savage_drv.h dma_ptr += n; \ n 267 drivers/gpu/drm/savage/savage_state.c unsigned int n = count < 255 ? count : 255; n 268 drivers/gpu/drm/savage/savage_state.c DMA_SET_REGISTERS(start, n); n 269 drivers/gpu/drm/savage/savage_state.c DMA_COPY(regs, n); n 270 drivers/gpu/drm/savage/savage_state.c count -= n; n 271 drivers/gpu/drm/savage/savage_state.c start += n; n 272 drivers/gpu/drm/savage/savage_state.c regs += n; n 292 drivers/gpu/drm/savage/savage_state.c unsigned int n = cmd_header->prim.count; n 302 drivers/gpu/drm/savage/savage_state.c if (!n) n 311 drivers/gpu/drm/savage/savage_state.c if (n % 3 != 0) { n 313 drivers/gpu/drm/savage/savage_state.c n); n 319 drivers/gpu/drm/savage/savage_state.c if (n < 3) { n 322 drivers/gpu/drm/savage/savage_state.c n); n 350 drivers/gpu/drm/savage/savage_state.c if (start + n > dmabuf->total / 32) { n 352 drivers/gpu/drm/savage/savage_state.c start, start + n - 1, dmabuf->total / 32); n 380 drivers/gpu/drm/savage/savage_state.c while (n != 0) { n 382 drivers/gpu/drm/savage/savage_state.c unsigned int count = n > 255 ? 255 : n; n 418 drivers/gpu/drm/savage/savage_state.c n -= count; n 434 drivers/gpu/drm/savage/savage_state.c unsigned int n = cmd_header->prim.count; n 440 drivers/gpu/drm/savage/savage_state.c if (!n) n 449 drivers/gpu/drm/savage/savage_state.c if (n % 3 != 0) { n 451 drivers/gpu/drm/savage/savage_state.c n); n 457 drivers/gpu/drm/savage/savage_state.c if (n < 3) { n 460 drivers/gpu/drm/savage/savage_state.c n); n 493 drivers/gpu/drm/savage/savage_state.c if (start + n > vb_size / (vb_stride * 4)) { n 495 drivers/gpu/drm/savage/savage_state.c start, start + n - 1, vb_size / (vb_stride * 4)); n 500 drivers/gpu/drm/savage/savage_state.c while (n != 0) { n 502 drivers/gpu/drm/savage/savage_state.c unsigned int count = n > 255 ? 255 : n; n 537 drivers/gpu/drm/savage/savage_state.c n -= count; n 553 drivers/gpu/drm/savage/savage_state.c unsigned int n = cmd_header->idx.count; n 562 drivers/gpu/drm/savage/savage_state.c if (!n) n 571 drivers/gpu/drm/savage/savage_state.c if (n % 3 != 0) { n 572 drivers/gpu/drm/savage/savage_state.c DRM_ERROR("wrong number of indices %u in TRILIST\n", n); n 578 drivers/gpu/drm/savage/savage_state.c if (n < 3) { n 580 drivers/gpu/drm/savage/savage_state.c ("wrong number of indices %u in TRIFAN/STRIP\n", n); n 632 drivers/gpu/drm/savage/savage_state.c while (n != 0) { n 634 drivers/gpu/drm/savage/savage_state.c unsigned int count = n > 255 ? 255 : n; n 679 drivers/gpu/drm/savage/savage_state.c n -= count; n 696 drivers/gpu/drm/savage/savage_state.c unsigned int n = cmd_header->idx.count; n 701 drivers/gpu/drm/savage/savage_state.c if (!n) n 710 drivers/gpu/drm/savage/savage_state.c if (n % 3 != 0) { n 711 drivers/gpu/drm/savage/savage_state.c DRM_ERROR("wrong number of indices %u in TRILIST\n", n); n 717 drivers/gpu/drm/savage/savage_state.c if (n < 3) { n 719 drivers/gpu/drm/savage/savage_state.c ("wrong number of indices %u in TRIFAN/STRIP\n", n); n 753 drivers/gpu/drm/savage/savage_state.c while (n != 0) { n 755 drivers/gpu/drm/savage/savage_state.c unsigned int count = n > 255 ? 255 : n; n 794 drivers/gpu/drm/savage/savage_state.c n -= count; n 32 drivers/gpu/drm/selftests/drm_selftest.c #define selftest(n, f) [__idx_##n] = { .name = #n, .func = f }, n 43 drivers/gpu/drm/selftests/drm_selftest.c #define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n)) n 44 drivers/gpu/drm/selftests/drm_selftest.c #define selftest_0(n, func, id) \ n 45 drivers/gpu/drm/selftests/drm_selftest.c module_param_named(id, selftests[__idx_##n].enabled, bool, 0400); n 46 drivers/gpu/drm/selftests/drm_selftest.c #define selftest(n, func) selftest_0(n, func, param(n)) n 109 drivers/gpu/drm/selftests/test-drm_mm.c unsigned long n; n 115 drivers/gpu/drm/selftests/test-drm_mm.c n = 0; n 120 drivers/gpu/drm/selftests/test-drm_mm.c n, addr, node->start); n 126 drivers/gpu/drm/selftests/test-drm_mm.c n, size, node->size); n 131 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("node[%ld] is followed by a hole!\n", n); n 151 drivers/gpu/drm/selftests/test-drm_mm.c n++; n 355 drivers/gpu/drm/selftests/test-drm_mm.c int n; n 357 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < ARRAY_SIZE(boundaries); n++) { n 360 drivers/gpu/drm/selftests/test-drm_mm.c boundaries[n].start, n 361 drivers/gpu/drm/selftests/test-drm_mm.c boundaries[n].size))) { n 363 drivers/gpu/drm/selftests/test-drm_mm.c n, boundaries[n].name, count, size); n 376 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n, m, o = 0; n 403 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 404 drivers/gpu/drm/selftests/test-drm_mm.c nodes[n].start = order[n] * size; n 405 drivers/gpu/drm/selftests/test-drm_mm.c nodes[n].size = size; n 407 drivers/gpu/drm/selftests/test-drm_mm.c err = drm_mm_reserve_node(&mm, &nodes[n]); n 410 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start); n 415 drivers/gpu/drm/selftests/test-drm_mm.c if (!drm_mm_node_allocated(&nodes[n])) { n 417 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start); n 421 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_reserve_fail(&mm, &nodes[n])) n 431 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 433 drivers/gpu/drm/selftests/test-drm_mm.c set_node(&tmp, order[n] * size, 1))) n 437 drivers/gpu/drm/selftests/test-drm_mm.c drm_mm_remove_node(&nodes[order[n]]); n 438 drivers/gpu/drm/selftests/test-drm_mm.c err = drm_mm_reserve_node(&mm, &nodes[order[n]]); n 441 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start); n 451 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 455 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 458 drivers/gpu/drm/selftests/test-drm_mm.c size * n, n 459 drivers/gpu/drm/selftests/test-drm_mm.c size * (count - n)))) n 464 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number(n, min(max_prime, count)) { n 465 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 470 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 475 drivers/gpu/drm/selftests/test-drm_mm.c m, n, node->start); n 481 drivers/gpu/drm/selftests/test-drm_mm.c o += n; n 502 drivers/gpu/drm/selftests/test-drm_mm.c int n, ret; n 504 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, 54) { n 505 drivers/gpu/drm/selftests/test-drm_mm.c u64 size = BIT_ULL(n); n 574 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n, m, o = 0; n 595 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 598 drivers/gpu/drm/selftests/test-drm_mm.c node = replace ? &tmp : &nodes[n]; n 600 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert(&mm, node, size, 0, n, mode)) { n 602 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, size, n); n 607 drivers/gpu/drm/selftests/test-drm_mm.c drm_mm_replace_node(&tmp, &nodes[n]); n 610 drivers/gpu/drm/selftests/test-drm_mm.c n); n 614 drivers/gpu/drm/selftests/test-drm_mm.c if (!assert_node(&nodes[n], &mm, size, 0, n)) { n 616 drivers/gpu/drm/selftests/test-drm_mm.c size, n); n 620 drivers/gpu/drm/selftests/test-drm_mm.c if (tmp.start != nodes[n].start) { n 623 drivers/gpu/drm/selftests/test-drm_mm.c nodes[n].start, nodes[n].size); n 638 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 639 drivers/gpu/drm/selftests/test-drm_mm.c u64 addr = nodes[n].start; n 641 drivers/gpu/drm/selftests/test-drm_mm.c drm_mm_remove_node(&nodes[n]); n 642 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) { n 644 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, size, n); n 648 drivers/gpu/drm/selftests/test-drm_mm.c if (nodes[n].start != addr) { n 650 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n, addr, nodes[n].start); n 659 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number(n, min(max_prime, count)) { n 660 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 665 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 667 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert(&mm, node, size, 0, n, mode)) { n 669 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, size, n); n 674 drivers/gpu/drm/selftests/test-drm_mm.c o += n; n 705 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n; n 708 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, 54) { n 709 drivers/gpu/drm/selftests/test-drm_mm.c u64 size = BIT_ULL(n); n 732 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n; n 741 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, 54) { n 742 drivers/gpu/drm/selftests/test-drm_mm.c u64 size = BIT_ULL(n); n 821 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n; n 826 drivers/gpu/drm/selftests/test-drm_mm.c n = div64_u64(start + size - 1, size); n 830 drivers/gpu/drm/selftests/test-drm_mm.c n, node->start, node->start + node->size, start, end); n 834 drivers/gpu/drm/selftests/test-drm_mm.c if (node->start != n * size) { n 836 drivers/gpu/drm/selftests/test-drm_mm.c n, n * size, node->start); n 842 drivers/gpu/drm/selftests/test-drm_mm.c n, size, node->size); n 848 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("node %d is followed by a hole!\n", n); n 852 drivers/gpu/drm/selftests/test-drm_mm.c n++; n 881 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n, start_n, end_n; n 904 drivers/gpu/drm/selftests/test-drm_mm.c for (n = start_n; n <= end_n; n++) { n 905 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert_in_range(&mm, &nodes[n], n 906 drivers/gpu/drm/selftests/test-drm_mm.c size, size, n, n 909 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, size, n, n 923 drivers/gpu/drm/selftests/test-drm_mm.c for (n = start_n; n <= end_n; n++) { n 924 drivers/gpu/drm/selftests/test-drm_mm.c u64 addr = nodes[n].start; n 926 drivers/gpu/drm/selftests/test-drm_mm.c drm_mm_remove_node(&nodes[n]); n 927 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert_in_range(&mm, &nodes[n], n 928 drivers/gpu/drm/selftests/test-drm_mm.c size, size, n, n 930 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("%s reinsert failed, step %d\n", mode->name, n); n 934 drivers/gpu/drm/selftests/test-drm_mm.c if (nodes[n].start != addr) { n 936 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n, addr, nodes[n].start); n 994 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n; n 1002 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, 50) { n 1003 drivers/gpu/drm/selftests/test-drm_mm.c const u64 size = BIT_ULL(n); n 1242 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n; n 1245 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < total_size; n++) { n 1246 drivers/gpu/drm/selftests/test-drm_mm.c e = &nodes[n]; n 1253 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < total_size; n++) { n 1254 drivers/gpu/drm/selftests/test-drm_mm.c e = &nodes[n]; n 1257 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("node[%d] no longer allocated!\n", n); n 1269 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < total_size; n++) { n 1270 drivers/gpu/drm/selftests/test-drm_mm.c e = &nodes[n]; n 1273 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("node[%d] no longer connected!\n", n); n 1288 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n; n 1292 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < total_size; n++) { n 1293 drivers/gpu/drm/selftests/test-drm_mm.c e = &nodes[n]; n 1409 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n; n 1430 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < size; n++) { n 1431 drivers/gpu/drm/selftests/test-drm_mm.c err = drm_mm_insert_node(&mm, &nodes[n].node, 1); n 1433 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, step %d\n", n); n 1450 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= size; n <<= 1) { n 1454 drivers/gpu/drm/selftests/test-drm_mm.c n, 1, n 1458 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n); n 1464 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n < size; n <<= 1) { n 1468 drivers/gpu/drm/selftests/test-drm_mm.c size/2, n, n 1472 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, size/2, n); n 1478 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, min(size, max_prime)) { n 1479 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int nsize = (size - n + 1) / 2; n 1486 drivers/gpu/drm/selftests/test-drm_mm.c nsize, n, n 1490 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, nsize, n); n 1522 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n; n 1540 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < size; n++) { n 1541 drivers/gpu/drm/selftests/test-drm_mm.c err = drm_mm_insert_node(&mm, &nodes[n].node, 1); n 1543 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, step %d\n", n); n 1550 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= range_size; n <<= 1) { n 1554 drivers/gpu/drm/selftests/test-drm_mm.c n, 1, n 1558 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n, range_start, range_end); n 1563 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= range_size; n <<= 1) { n 1567 drivers/gpu/drm/selftests/test-drm_mm.c range_size/2, n, n 1571 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, range_size/2, n, range_start, range_end); n 1576 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, min(range_size, max_prime)) { n 1577 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int nsize = (range_size - n + 1) / 2; n 1584 drivers/gpu/drm/selftests/test-drm_mm.c nsize, n, n 1588 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, nsize, n, range_start, range_end); n 1622 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n, m, o = 0; n 1646 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 1647 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert(&mm, &nodes[n], n 1648 drivers/gpu/drm/selftests/test-drm_mm.c size, 0, n, n 1650 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, size %u step %d\n", size, n); n 1654 drivers/gpu/drm/selftests/test-drm_mm.c if (drm_mm_hole_follows(&nodes[n])) { n 1656 drivers/gpu/drm/selftests/test-drm_mm.c n, nodes[n].start, size); n 1660 drivers/gpu/drm/selftests/test-drm_mm.c if (!assert_one_hole(&mm, 0, size*(count - n - 1))) n 1668 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, min(count, max_prime)) { n 1669 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 1675 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 1682 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, step %d/%d\n", m, n); n 1688 drivers/gpu/drm/selftests/test-drm_mm.c m, n, node->start); n 1695 drivers/gpu/drm/selftests/test-drm_mm.c m, n, size, last, node_index(node)); n 1704 drivers/gpu/drm/selftests/test-drm_mm.c o += n; n 1736 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n, m, o = 0; n 1759 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < count; n++) { n 1760 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert(&mm, &nodes[n], n 1761 drivers/gpu/drm/selftests/test-drm_mm.c size, 0, n, n 1763 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("bottomup insert failed, size %u step %d\n", size, n); n 1767 drivers/gpu/drm/selftests/test-drm_mm.c if (!assert_one_hole(&mm, size*(n + 1), size*count)) n 1775 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, min(count, max_prime)) { n 1776 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 1782 drivers/gpu/drm/selftests/test-drm_mm.c for (m = 0; m < n; m++) { n 1789 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, step %d/%d\n", m, n); n 1796 drivers/gpu/drm/selftests/test-drm_mm.c m, n, first, node_index(node)); n 1804 drivers/gpu/drm/selftests/test-drm_mm.c o += n; n 1932 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int n; n 1944 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= count; n++) { n 1952 drivers/gpu/drm/selftests/test-drm_mm.c n, 0, n, n 1954 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, step %d\n", n); n 1995 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= count; n++) { n 2005 drivers/gpu/drm/selftests/test-drm_mm.c node->size = n + count; n 2011 drivers/gpu/drm/selftests/test-drm_mm.c n, err); n 2015 drivers/gpu/drm/selftests/test-drm_mm.c node->start += n + 1; n 2016 drivers/gpu/drm/selftests/test-drm_mm.c rem = misalignment(node, n + count); n 2017 drivers/gpu/drm/selftests/test-drm_mm.c node->start += n + count - rem; n 2021 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("reserve %d failed, err=%d\n", n, err); n 2029 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= count; n++) { n 2037 drivers/gpu/drm/selftests/test-drm_mm.c n, n, n, n 2040 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n); n 2161 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n; n 2182 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < total_size; n++) { n 2183 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert(&mm, &nodes[n].node, n 2186 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, step %d\n", n); n 2192 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= total_size; n <<= 1) { n 2196 drivers/gpu/drm/selftests/test-drm_mm.c n, 1, color++, n 2200 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n); n 2205 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n < total_size; n <<= 1) { n 2209 drivers/gpu/drm/selftests/test-drm_mm.c total_size/2, n, color++, n 2213 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, total_size/2, n); n 2218 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, min(total_size, max_prime)) { n 2219 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int nsize = (total_size - n + 1) / 2; n 2226 drivers/gpu/drm/selftests/test-drm_mm.c nsize, n, color++, n 2230 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, nsize, n); n 2264 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int *order, n; n 2283 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 0; n < total_size; n++) { n 2284 drivers/gpu/drm/selftests/test-drm_mm.c if (!expect_insert(&mm, &nodes[n].node, n 2287 drivers/gpu/drm/selftests/test-drm_mm.c pr_err("insert failed, step %d\n", n); n 2293 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n <= range_size; n <<= 1) { n 2297 drivers/gpu/drm/selftests/test-drm_mm.c n, 1, color++, n 2301 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, n, range_start, range_end); n 2306 drivers/gpu/drm/selftests/test-drm_mm.c for (n = 1; n < range_size; n <<= 1) { n 2310 drivers/gpu/drm/selftests/test-drm_mm.c range_size/2, n, color++, n 2314 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, total_size/2, n, range_start, range_end); n 2319 drivers/gpu/drm/selftests/test-drm_mm.c for_each_prime_number_from(n, 1, min(range_size, max_prime)) { n 2320 drivers/gpu/drm/selftests/test-drm_mm.c unsigned int nsize = (range_size - n + 1) / 2; n 2327 drivers/gpu/drm/selftests/test-drm_mm.c nsize, n, color++, n 2331 drivers/gpu/drm/selftests/test-drm_mm.c mode->name, nsize, n, range_start, range_end); n 153 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBCR_UPC(n) (1 << ((n) + 16)) n 154 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBCR_UPF(n) (1 << ((n) + 8)) n 155 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBCR_UPD(n) (1 << ((n) + 0)) n 156 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00) n 187 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04) n 192 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08) n 197 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c) n 202 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10) n 211 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14) n 220 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18) n 229 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c) n 238 drivers/gpu/drm/shmobile/shmob_drm_regs.h #define LDBnBBGCL(n) (0xb10 + (n) * 0x04) n 94 drivers/gpu/drm/sti/sti_hdmi.c #define HDMI_IFRAME_CFG_DI_N(x, n) ((x) << ((n-1)*4)) /* n from 1 to 6 */ n 795 drivers/gpu/drm/sti/sti_hdmi.c unsigned int n; n 799 drivers/gpu/drm/sti/sti_hdmi.c n = 4096; n 802 drivers/gpu/drm/sti/sti_hdmi.c n = 6272; n 805 drivers/gpu/drm/sti/sti_hdmi.c n = 6144; n 808 drivers/gpu/drm/sti/sti_hdmi.c n = 6272 * 2; n 811 drivers/gpu/drm/sti/sti_hdmi.c n = 6144 * 2; n 814 drivers/gpu/drm/sti/sti_hdmi.c n = 6272 * 4; n 817 drivers/gpu/drm/sti/sti_hdmi.c n = 6144 * 4; n 821 drivers/gpu/drm/sti/sti_hdmi.c n = (audio_fs * 128) / 1000; n 824 drivers/gpu/drm/sti/sti_hdmi.c return n; n 829 drivers/gpu/drm/sti/sti_hdmi.c int audio_cfg, n; n 839 drivers/gpu/drm/sti/sti_hdmi.c n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate); n 842 drivers/gpu/drm/sti/sti_hdmi.c params->sample_rate, hdmi->mode.clock * 1000, n); n 843 drivers/gpu/drm/sti/sti_hdmi.c hdmi_write(hdmi, n, HDMI_AUDN); n 145 drivers/gpu/drm/stm/dw_mipi_dsi-stm.c int i, o, n, n_min, n_max; n 173 drivers/gpu/drm/stm/dw_mipi_dsi-stm.c n = DIV_ROUND_CLOSEST(i * o * clkout_khz, clkin_khz); n 175 drivers/gpu/drm/stm/dw_mipi_dsi-stm.c if (n < n_min || n > n_max) n 178 drivers/gpu/drm/stm/dw_mipi_dsi-stm.c delta = dsi_pll_get_clkout_khz(clkin_khz, i, n, o) - n 184 drivers/gpu/drm/stm/dw_mipi_dsi-stm.c *ndiv = n; n 45 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_AVI_INFOFRAME_REG(n) (0x080 + (n)) n 67 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PAD_CTRL1_REG_EMP(n) (((n) & 7) << 10) n 69 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PAD_CTRL1_REG_AMP(n) (((n) & 7) << 3) n 83 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PLL_CTRL_VCO_GAIN(n) (((n) & 7) << 20) n 84 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PLL_CTRL_S(n) (((n) & 7) << 17) n 85 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PLL_CTRL_CP_S(n) (((n) & 0x1f) << 12) n 86 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PLL_CTRL_CS(n) (((n) & 0xf) << 8) n 87 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PLL_CTRL_DIV(n) (((n) & 0xf) << 4) n 89 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PLL_CTRL_VCO_S(n) ((n) & 0xf) n 92 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PLL_DBG0_TMDS_PARENT(n) (((n) & 1) << 21) n 101 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PKT_CTRL_REG(n) (0x2f0 + (4 * (n))) n 102 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_PKT_CTRL_TYPE(n, t) ((t) << (((n) % 4) * 4)) n 133 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(n) (((n) & 0xf) << 4) n 136 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(n) ((n) & 0xf) n 152 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7) n 31 drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c u8 *m, u8 *n) n 54 drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c if (m && n) { n 56 drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c *n = best_n; n 76 drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c u8 m, n; n 80 drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c n = reg & 0x7; n 82 drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c return (((parent_rate / ddc->pre_div) / 10) >> n) / n 39 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4) n 52 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BASIC_CTL1_VIDEO_ST_DELAY(n) (((n) & 0x1fff) << 4) n 58 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BASIC_SIZE0_VBP(n) (((n) & 0xfff) << 16) n 59 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BASIC_SIZE0_VSA(n) ((n) & 0xfff) n 62 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BASIC_SIZE1_VT(n) (((n) & 0xfff) << 16) n 63 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BASIC_SIZE1_VACT(n) ((n) & 0xfff) n 65 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_FUNC_REG(n) (0x020 + (n) * 0x04) n 66 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_FUNC_INST_MODE(n) (((n) & 0xf) << 28) n 67 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_FUNC_ESCAPE_ENTRY(n) (((n) & 0xf) << 24) n 68 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_FUNC_TRANS_PACKET(n) (((n) & 0xf) << 20) n 70 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_FUNC_LANE_DEN(n) ((n) & 0xf) n 74 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_LOOP_NUM_REG(n) (0x044 + (n) * 0x10) n 75 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_LOOP_NUM_N1(n) (((n) & 0xfff) << 16) n 76 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_LOOP_NUM_N0(n) ((n) & 0xfff) n 80 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_JUMP_CFG_REG(n) (0x04c + (n) * 0x04) n 81 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_JUMP_CFG_TO(n) (((n) & 0xf) << 20) n 82 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_JUMP_CFG_POINT(n) (((n) & 0xf) << 16) n 83 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_INST_JUMP_CFG_NUM(n) ((n) & 0xffff) n 91 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_TCON_DRQ_SET(n) ((n) & 0x3ff) n 95 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_CTL0_FORMAT(n) ((n) & 0xf) n 100 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_PH_ECC(n) (((n) & 0xff) << 24) n 101 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_PH_WC(n) (((n) & 0xffff) << 8) n 102 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_PH_VC(n) (((n) & 3) << 6) n 103 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_PH_DT(n) ((n) & 0x3f) n 106 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_PF0_CRC_FORCE(n) ((n) & 0xffff) n 109 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINEN(n) (((n) & 0xffff) << 16) n 110 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_PIXEL_PF1_CRC_INIT_LINE0(n) ((n) & 0xffff) n 123 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BLK_PF(n) (((n) & 0xffff) << 16) n 124 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BLK_PD(n) ((n) & 0xff) n 143 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BURST_LINE_SYNC_POINT(n) (((n) & 0xffff) << 16) n 144 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BURST_LINE_NUM(n) ((n) & 0xffff) n 147 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BURST_DRQ_EDGE1(n) (((n) & 0xffff) << 16) n 148 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_BURST_DRQ_EDGE0(n) ((n) & 0xffff) n 155 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_CMD_RX_REG(n) (0x240 + (n) * 0x04) n 159 drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c #define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04) n 68 drivers/gpu/drm/sun4i/sun8i_mixer.h #define SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(n) (0xf << ((n) << 2)) n 69 drivers/gpu/drm/sun4i/sun8i_mixer.h #define SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(n) ((n) << 2) n 38 drivers/gpu/drm/tegra/dsi.h #define DSI_CONTROL_LANES(n) (((n) & 0x3) << 4) n 121 drivers/gpu/drm/tegra/hdmi.c unsigned int n; n 373 drivers/gpu/drm/tegra/hdmi.c int n; n 376 drivers/gpu/drm/tegra/hdmi.c config->n = -1; n 378 drivers/gpu/drm/tegra/hdmi.c for (n = min_n; n <= max_n; n++) { n 384 drivers/gpu/drm/tegra/hdmi.c aval_f = ((int64_t)24000000 << 16) * n; n 391 drivers/gpu/drm/tegra/hdmi.c cts_f = ((int64_t)pix_clock << 16) * n; n 396 drivers/gpu/drm/tegra/hdmi.c delta = abs(n - ideal_n); n 401 drivers/gpu/drm/tegra/hdmi.c config->n = n; n 409 drivers/gpu/drm/tegra/hdmi.c return config->n != -1 ? 0 : -EINVAL; n 552 drivers/gpu/drm/tegra/hdmi.c hdmi->pixel_clock, config.n, config.cts, config.aval); n 557 drivers/gpu/drm/tegra/hdmi.c AUDIO_N_VALUE(config.n - 1); n 560 drivers/gpu/drm/tegra/hdmi.c tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config.n) | ACR_ENABLE, n 445 drivers/gpu/drm/tiny/repaper.c size_t n = line * epd->width / 8; n 447 drivers/gpu/drm/tiny/repaper.c repaper_one_line(epd, line, &image[n], 0, &mask[n], n 557 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c const char *n[] = {"wc", "uc", "cached", " dma32", "huge"}; n 609 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c "%s", n[i]); n 278 drivers/gpu/drm/v3d/v3d_drv.h static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) n 282 drivers/gpu/drm/v3d/v3d_drv.h div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) n 285 drivers/gpu/drm/v3d/v3d_drv.h return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); n 281 drivers/gpu/drm/v3d/v3d_regs.h #define V3D_CLE_CTNCS(n) (V3D_CLE_CT0CS + 4 * n) n 284 drivers/gpu/drm/v3d/v3d_regs.h #define V3D_CLE_CTNEA(n) (V3D_CLE_CT0EA + 4 * n) n 287 drivers/gpu/drm/v3d/v3d_regs.h #define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n) n 290 drivers/gpu/drm/v3d/v3d_regs.h #define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n) n 310 drivers/gpu/drm/v3d/v3d_regs.h #define V3D_CLE_CTNQBA(n) (V3D_CLE_CT0QBA + 4 * n) n 313 drivers/gpu/drm/v3d/v3d_regs.h #define V3D_CLE_CTNQEA(n) (V3D_CLE_CT0QEA + 4 * n) n 718 drivers/gpu/drm/vc4/vc4_hdmi.c unsigned long n, m; n 725 drivers/gpu/drm/vc4/vc4_hdmi.c &n, &m); n 728 drivers/gpu/drm/vc4/vc4_hdmi.c VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) | n 740 drivers/gpu/drm/vc4/vc4_hdmi.c u32 n, cts; n 743 drivers/gpu/drm/vc4/vc4_hdmi.c n = 128 * samplerate / 1000; n 744 drivers/gpu/drm/vc4/vc4_hdmi.c tmp = (u64)(mode->clock * 1000) * n; n 750 drivers/gpu/drm/vc4/vc4_hdmi.c VC4_SET_FIELD(n, VC4_HDMI_CRP_CFG_N)); n 72 drivers/gpu/drm/vc4/vc4_regs.h #define V3D_CTNCS(n) (V3D_CT0CS + 4 * n) n 83 drivers/gpu/drm/vc4/vc4_regs.h #define V3D_CTNEA(n) (V3D_CT0EA + 4 * (n)) n 86 drivers/gpu/drm/vc4/vc4_regs.h #define V3D_CTNCA(n) (V3D_CT0CA + 4 * (n)) n 89 drivers/gpu/drm/vc4/vc4_regs.h #define V3D_CTNRA0(n) (V3D_CT00RA0 + 4 * (n)) n 92 drivers/gpu/drm/vc4/vc4_regs.h #define V3D_CTNLC(n) (V3D_CT0LC + 4 * (n)) n 95 drivers/gpu/drm/vc4/vc4_regs.h #define V3D_CTNPC(n) (V3D_CT0PC + 4 * (n)) n 245 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n) n 247 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c memcpy(dest, src, n); n 288 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c size_t n) n 292 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c if (WARN_ON_ONCE(round_down(n, diff->cpp) != n)) n 296 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c csize = vmw_find_first_diff(dest, src, n, diff->cpp); n 297 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c if (csize < n) { n 308 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c n -= csize; n 309 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c csize = vmw_find_last_diff(dest, src, n, diff->cpp); n 316 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c diff->line_offset += n; n 1356 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h size_t n); n 1376 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h size_t n); n 1378 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n); n 382 drivers/gpu/drm/zte/zx_hdmi.c unsigned int n; n 385 drivers/gpu/drm/zte/zx_hdmi.c n = 6272 * (fs / 44100); n 387 drivers/gpu/drm/zte/zx_hdmi.c n = fs * 128 / 1000; n 389 drivers/gpu/drm/zte/zx_hdmi.c return n; n 400 drivers/gpu/drm/zte/zx_hdmi.c int n; n 428 drivers/gpu/drm/zte/zx_hdmi.c n = zx_hdmi_audio_get_n(params->sample_rate); n 429 drivers/gpu/drm/zte/zx_hdmi.c hdmi_writeb(hdmi, N_SVAL1, n & 0xff); n 430 drivers/gpu/drm/zte/zx_hdmi.c hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff); n 431 drivers/gpu/drm/zte/zx_hdmi.c hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf); n 316 drivers/gpu/host1x/cdma.c struct host1x_job *job, *n; n 326 drivers/gpu/host1x/cdma.c list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { n 44 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_RC(n) (0x002c + ((n)*4)) n 45 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_RS(n) (0x004c + ((n)*4)) n 46 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_GRC(n) (0x005c + ((n)*4)) n 47 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_GRS(n) (0x007c + ((n)*4)) n 48 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_GBC(n) (0x008c + ((n)*4)) n 49 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_GBS(n) (0x00Ac + ((n)*4)) n 50 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_BC(n) (0x00Bc + ((n)*4)) n 51 drivers/gpu/ipu-v3/ipu-csi.c #define CSI_CPD_BS(n) (0x00Dc + ((n)*4)) n 18 drivers/gpu/ipu-v3/ipu-dc.c #define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2) n 19 drivers/gpu/ipu-v3/ipu-dc.c #define DC_MAP_CONF_VAL(n) (0x144 + ((n) & ~0x1) * 2) n 66 drivers/gpu/ipu-v3/ipu-prv.h #define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * (n)) n 67 drivers/gpu/ipu-v3/ipu-prv.h #define IPU_INT_STAT(n) IPU_CM_REG(0x0200 + 4 * (n)) n 990 drivers/gpu/vga/vgaarb.c int n; n 994 drivers/gpu/vga/vgaarb.c n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func); n 995 drivers/gpu/vga/vgaarb.c if (n != 4) n 621 drivers/greybus/interface.c struct attribute *attr, int n) n 636 drivers/greybus/interface.c struct attribute *attr, int n) n 650 drivers/greybus/interface.c struct attribute *attr, int n) n 260 drivers/hid/hid-bigbenff.c int n; n 268 drivers/hid/hid-bigbenff.c for (n = 0; n < NUM_LEDS; n++) { n 269 drivers/hid/hid-bigbenff.c if (led == bigben->leds[n]) { n 271 drivers/hid/hid-bigbenff.c work = (bigben->led_state & BIT(n)); n 272 drivers/hid/hid-bigbenff.c bigben->led_state &= ~BIT(n); n 274 drivers/hid/hid-bigbenff.c work = !(bigben->led_state & BIT(n)); n 275 drivers/hid/hid-bigbenff.c bigben->led_state |= BIT(n); n 292 drivers/hid/hid-bigbenff.c int n; n 299 drivers/hid/hid-bigbenff.c for (n = 0; n < NUM_LEDS; n++) { n 300 drivers/hid/hid-bigbenff.c if (led == bigben->leds[n]) n 301 drivers/hid/hid-bigbenff.c return (bigben->led_state & BIT(n)) ? LED_ON : LED_OFF; n 325 drivers/hid/hid-bigbenff.c int n, error; n 362 drivers/hid/hid-bigbenff.c for (n = 0; n < NUM_LEDS; n++) { n 375 drivers/hid/hid-bigbenff.c dev_name(&hid->dev), n + 1 n 378 drivers/hid/hid-bigbenff.c led->brightness = (n == 0) ? LED_ON : LED_OFF; n 382 drivers/hid/hid-bigbenff.c bigben->leds[n] = led; n 204 drivers/hid/hid-core.c int n; n 206 drivers/hid/hid-core.c for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { n 207 drivers/hid/hid-core.c unsigned index = parser->collection_stack[n]; n 476 drivers/hid/hid-core.c unsigned n; n 550 drivers/hid/hid-core.c for (n = parser->local.usage_minimum; n <= data; n++) n 551 drivers/hid/hid-core.c if (hid_add_usage(parser, n, item->size)) { n 657 drivers/hid/hid-core.c unsigned n; n 659 drivers/hid/hid-core.c for (n = 0; n < report->maxfield; n++) n 660 drivers/hid/hid-core.c kfree(report->field[n]); n 1301 drivers/hid/hid-core.c static s32 snto32(__u32 value, unsigned n) n 1303 drivers/hid/hid-core.c switch (n) { n 1308 drivers/hid/hid-core.c return value & (1 << (n - 1)) ? value | (~0U << n) : value; n 1311 drivers/hid/hid-core.c s32 hid_snto32(__u32 value, unsigned n) n 1313 drivers/hid/hid-core.c return snto32(value, n); n 1321 drivers/hid/hid-core.c static u32 s32ton(__s32 value, unsigned n) n 1323 drivers/hid/hid-core.c s32 a = value >> (n - 1); n 1325 drivers/hid/hid-core.c return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; n 1326 drivers/hid/hid-core.c return value & ((1 << n) - 1); n 1341 drivers/hid/hid-core.c static u32 __extract(u8 *report, unsigned offset, int n) n 1348 drivers/hid/hid-core.c u32 mask = n < 32 ? (1U << n) - 1 : ~0U; n 1350 drivers/hid/hid-core.c while (n > 0) { n 1352 drivers/hid/hid-core.c n -= bits_to_copy; n 1363 drivers/hid/hid-core.c unsigned offset, unsigned n) n 1365 drivers/hid/hid-core.c if (n > 32) { n 1367 drivers/hid/hid-core.c __func__, n, current->comm); n 1368 drivers/hid/hid-core.c n = 32; n 1371 drivers/hid/hid-core.c return __extract(report, offset, n); n 1384 drivers/hid/hid-core.c static void __implement(u8 *report, unsigned offset, int n, u32 value) n 1390 drivers/hid/hid-core.c while (n - bits_to_set >= 0) { n 1394 drivers/hid/hid-core.c n -= bits_to_set; n 1401 drivers/hid/hid-core.c if (n) { n 1402 drivers/hid/hid-core.c u8 bit_mask = ((1U << n) - 1); n 1409 drivers/hid/hid-core.c unsigned offset, unsigned n, u32 value) n 1411 drivers/hid/hid-core.c if (unlikely(n > 32)) { n 1413 drivers/hid/hid-core.c __func__, n, current->comm); n 1414 drivers/hid/hid-core.c n = 32; n 1415 drivers/hid/hid-core.c } else if (n < 32) { n 1416 drivers/hid/hid-core.c u32 m = (1U << n) - 1; n 1421 drivers/hid/hid-core.c __func__, value, n, current->comm); n 1427 drivers/hid/hid-core.c __implement(report, offset, n, value); n 1434 drivers/hid/hid-core.c static int search(__s32 *array, __s32 value, unsigned n) n 1436 drivers/hid/hid-core.c while (n--) { n 1526 drivers/hid/hid-core.c unsigned n; n 1538 drivers/hid/hid-core.c for (n = 0; n < count; n++) { n 1540 drivers/hid/hid-core.c value[n] = min < 0 ? n 1541 drivers/hid/hid-core.c snto32(hid_field_extract(hid, data, offset + n * size, n 1543 drivers/hid/hid-core.c hid_field_extract(hid, data, offset + n * size, size); n 1547 drivers/hid/hid-core.c value[n] >= min && value[n] <= max && n 1548 drivers/hid/hid-core.c value[n] - min < field->maxusage && n 1549 drivers/hid/hid-core.c field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) n 1553 drivers/hid/hid-core.c for (n = 0; n < count; n++) { n 1556 drivers/hid/hid-core.c hid_process_event(hid, field, &field->usage[n], value[n], interrupt); n 1560 drivers/hid/hid-core.c if (field->value[n] >= min && field->value[n] <= max n 1561 drivers/hid/hid-core.c && field->value[n] - min < field->maxusage n 1562 drivers/hid/hid-core.c && field->usage[field->value[n] - min].hid n 1563 drivers/hid/hid-core.c && search(value, field->value[n], count)) n 1564 drivers/hid/hid-core.c hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt); n 1566 drivers/hid/hid-core.c if (value[n] >= min && value[n] <= max n 1567 drivers/hid/hid-core.c && value[n] - min < field->maxusage n 1568 drivers/hid/hid-core.c && field->usage[value[n] - min].hid n 1569 drivers/hid/hid-core.c && search(field->value, value[n], count)) n 1570 drivers/hid/hid-core.c hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt); n 1588 drivers/hid/hid-core.c unsigned n; n 1590 drivers/hid/hid-core.c for (n = 0; n < count; n++) { n 1592 drivers/hid/hid-core.c implement(hid, data, offset + n * size, size, n 1593 drivers/hid/hid-core.c s32ton(field->value[n], size)); n 1595 drivers/hid/hid-core.c implement(hid, data, offset + n * size, size, n 1596 drivers/hid/hid-core.c field->value[n]); n 1607 drivers/hid/hid-core.c unsigned n; n 1613 drivers/hid/hid-core.c for (n = 0; n < report->maxfield; n++) n 1614 drivers/hid/hid-core.c hid_output_field(report->device, report->field[n], data); n 1671 drivers/hid/hid-core.c unsigned int n = 0; /* Normally report number is 0 */ n 1675 drivers/hid/hid-core.c n = *data; n 1677 drivers/hid/hid-core.c report = report_enum->report_id_hash[n]; n 1679 drivers/hid/hid-core.c dbg_hid("undefined report_id %u received\n", n); n 2161 drivers/hid/hid-core.c struct hid_dynid *dynid, *n; n 2164 drivers/hid/hid-core.c list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { n 518 drivers/hid/hid-debug.c static void tab(int n, struct seq_file *f) { n 519 drivers/hid/hid-debug.c seq_printf(f, "%*s", n, ""); n 522 drivers/hid/hid-debug.c void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) { n 526 drivers/hid/hid-debug.c tab(n, f); n 531 drivers/hid/hid-debug.c tab(n, f); n 536 drivers/hid/hid-debug.c tab(n, f); n 540 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage); n 542 drivers/hid/hid-debug.c tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n"); n 545 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Logical Minimum(%d)\n", field->logical_minimum); n 546 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Logical Maximum(%d)\n", field->logical_maximum); n 549 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Physical Minimum(%d)\n", field->physical_minimum); n 550 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Physical Maximum(%d)\n", field->physical_maximum); n 553 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Unit Exponent(%d)\n", field->unit_exponent); n 574 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Unit(Invalid)\n"); n 579 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Unit(%s : ", systems[sys]); n 601 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Report Size(%u)\n", field->report_size); n 602 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Report Count(%u)\n", field->report_count); n 603 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Report Offset(%u)\n", field->report_offset); n 605 drivers/hid/hid-debug.c tab(n, f); seq_printf(f, "Flags( "); n 3222 drivers/hid/hid-logitech-hidpp.c unsigned long n; n 3272 drivers/hid/hid-logitech-hidpp.c n = atomic_inc_return(&battery_no) - 1; n 3276 drivers/hid/hid-logitech-hidpp.c sprintf(battery->name, "hidpp_battery_%ld", n); n 594 drivers/hid/hid-multitouch.c int r, n; n 614 drivers/hid/hid-multitouch.c for (n = 0; n < field->report_count; n++) { n 615 drivers/hid/hid-multitouch.c if (field->usage[n].hid == HID_DG_CONTACTID) n 1154 drivers/hid/hid-multitouch.c int r, n; n 1205 drivers/hid/hid-multitouch.c for (n = 0; n < count; n++) n 1207 drivers/hid/hid-multitouch.c &field->usage[n], field->value[n], n 227 drivers/hid/hid-picolcd_fb.c int chip, tile, n; n 246 drivers/hid/hid-picolcd_fb.c n = 0; n 253 drivers/hid/hid-picolcd_fb.c n += 2; n 254 drivers/hid/hid-picolcd_fb.c if (n >= HID_OUTPUT_FIFO_SIZE / 2) { n 263 drivers/hid/hid-picolcd_fb.c n = 0; n 273 drivers/hid/hid-picolcd_fb.c if (n) { n 1147 drivers/hid/hid-quirks.c int n = 0, m; n 1153 drivers/hid/hid-quirks.c for (; n < count && quirks_param[n]; n++) { n 1155 drivers/hid/hid-quirks.c m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x", n 1164 drivers/hid/hid-quirks.c quirks_param[n]); n 946 drivers/hid/hid-sony.c int n, m, offset, num_touch_data, max_touch_data; n 1027 drivers/hid/hid-sony.c for (n = 0; n < 6; n++) { n 1030 drivers/hid/hid-sony.c struct ds4_calibration_data *calib = &sc->ds4_calib_data[n]; n 1103 drivers/hid/hid-sony.c for (n = 0; n < 2; n++) { n 1111 drivers/hid/hid-sony.c input_mt_slot(sc->touchpad, n); n 1128 drivers/hid/hid-sony.c int n, offset, relx, rely; n 1158 drivers/hid/hid-sony.c for (n = 0; n < 2; n++) { n 1165 drivers/hid/hid-sony.c input_mt_slot(sc->touchpad, n); n 1185 drivers/hid/hid-sony.c if ((n == 0) || ((n == 1) && (active & 0x01))) { n 1859 drivers/hid/hid-sony.c int n; n 1877 drivers/hid/hid-sony.c for (n = 0; n < drv_data->led_count; n++) { n 1878 drivers/hid/hid-sony.c if (led == drv_data->leds[n] && (force_update || n 1879 drivers/hid/hid-sony.c (value != drv_data->led_state[n] || n 1880 drivers/hid/hid-sony.c drv_data->led_delay_on[n] || n 1881 drivers/hid/hid-sony.c drv_data->led_delay_off[n]))) { n 1883 drivers/hid/hid-sony.c drv_data->led_state[n] = value; n 1886 drivers/hid/hid-sony.c drv_data->led_delay_on[n] = 0; n 1887 drivers/hid/hid-sony.c drv_data->led_delay_off[n] = 0; n 1901 drivers/hid/hid-sony.c int n; n 1909 drivers/hid/hid-sony.c for (n = 0; n < drv_data->led_count; n++) { n 1910 drivers/hid/hid-sony.c if (led == drv_data->leds[n]) n 1911 drivers/hid/hid-sony.c return drv_data->led_state[n]; n 1923 drivers/hid/hid-sony.c int n; n 1944 drivers/hid/hid-sony.c for (n = 0; n < drv_data->led_count; n++) { n 1945 drivers/hid/hid-sony.c if (led == drv_data->leds[n]) n 1950 drivers/hid/hid-sony.c if (n >= drv_data->led_count) n 1954 drivers/hid/hid-sony.c if (new_on != drv_data->led_delay_on[n] || n 1955 drivers/hid/hid-sony.c new_off != drv_data->led_delay_off[n]) { n 1956 drivers/hid/hid-sony.c drv_data->led_delay_on[n] = new_on; n 1957 drivers/hid/hid-sony.c drv_data->led_delay_off[n] = new_off; n 1967 drivers/hid/hid-sony.c int n, ret = 0; n 2031 drivers/hid/hid-sony.c for (n = 0; n < sc->led_count; n++) { n 2034 drivers/hid/hid-sony.c name_sz = strlen(dev_name(&hdev->dev)) + strlen(ds4_name_str[n]) + 2; n 2038 drivers/hid/hid-sony.c hid_err(hdev, "Couldn't allocate memory for LED %d\n", n); n 2045 drivers/hid/hid-sony.c ds4_name_str[n]); n 2047 drivers/hid/hid-sony.c snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1); n 2049 drivers/hid/hid-sony.c led->brightness = sc->led_state[n]; n 2050 drivers/hid/hid-sony.c led->max_brightness = max_brightness[n]; n 2055 drivers/hid/hid-sony.c if (use_hw_blink[n]) n 2058 drivers/hid/hid-sony.c sc->leds[n] = led; n 2062 drivers/hid/hid-sony.c hid_err(hdev, "Failed to register LED %d\n", n); n 2086 drivers/hid/hid-sony.c int n; n 2114 drivers/hid/hid-sony.c for (n = 0; n < 4; n++) { n 2115 drivers/hid/hid-sony.c if (sc->led_delay_on[n] || sc->led_delay_off[n]) { n 2116 drivers/hid/hid-sony.c report->led[3 - n].duty_off = sc->led_delay_off[n]; n 2117 drivers/hid/hid-sony.c report->led[3 - n].duty_on = sc->led_delay_on[n]; n 2460 drivers/hid/hid-sony.c int n, ret; n 2525 drivers/hid/hid-sony.c for (n = 0; n < 6; n++) n 2526 drivers/hid/hid-sony.c sc->mac_address[5-n] = buf[4+n]; n 199 drivers/hid/i2c-hid/i2c-hid-core.c int n; n 201 drivers/hid/i2c-hid/i2c-hid-core.c for (n = 0; i2c_hid_quirks[n].idVendor; n++) n 202 drivers/hid/i2c-hid/i2c-hid-core.c if (i2c_hid_quirks[n].idVendor == idVendor && n 203 drivers/hid/i2c-hid/i2c-hid-core.c (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID || n 204 drivers/hid/i2c-hid/i2c-hid-core.c i2c_hid_quirks[n].idProduct == idProduct)) n 205 drivers/hid/i2c-hid/i2c-hid-core.c quirks = i2c_hid_quirks[n].quirks; n 739 drivers/hid/intel-ish-hid/ishtp/bus.c struct ishtp_cl_device *cl_device, *n; n 774 drivers/hid/intel-ish-hid/ishtp/bus.c list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list, n 990 drivers/hid/usbhid/hid-core.c int ret, n; n 1025 drivers/hid/usbhid/hid-core.c for (n = 0; n < num_descriptors; n++) n 1026 drivers/hid/usbhid/hid-core.c if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) n 1027 drivers/hid/usbhid/hid-core.c rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); n 1068 drivers/hid/usbhid/hid-core.c unsigned int n, insize = 0; n 1093 drivers/hid/usbhid/hid-core.c for (n = 0; n < interface->desc.bNumEndpoints; n++) { n 1098 drivers/hid/usbhid/hid-core.c endpoint = &interface->endpoint[n].desc; n 1326 drivers/hid/usbhid/hid-core.c unsigned int n, has_in = 0; n 1333 drivers/hid/usbhid/hid-core.c for (n = 0; n < interface->desc.bNumEndpoints; n++) n 1334 drivers/hid/usbhid/hid-core.c if (usb_endpoint_is_int_in(&interface->endpoint[n].desc)) n 491 drivers/hid/usbhid/hid-pidff.c static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n) n 495 drivers/hid/usbhid/hid-pidff.c if (n == 0) { n 501 drivers/hid/usbhid/hid-pidff.c pidff->effect_operation[PID_LOOP_COUNT].value[0] = n; n 210 drivers/hid/wacom.h static inline __u32 wacom_s32tou(s32 value, __u8 n) n 212 drivers/hid/wacom.h switch (n) { n 217 drivers/hid/wacom.h return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value; n 303 drivers/hid/wacom_sys.c u32 n; n 315 drivers/hid/wacom_sys.c n = hid_report_len(field->report); n 321 drivers/hid/wacom_sys.c data, n, WAC_CMD_RETRIES); n 322 drivers/hid/wacom_sys.c if (ret == n && features->type == HID_GENERIC) { n 324 drivers/hid/wacom_sys.c HID_FEATURE_REPORT, data, n, 0); n 376 drivers/hid/wacom_sys.c n = hid_report_len(field->report); n 382 drivers/hid/wacom_sys.c data, n, WAC_CMD_RETRIES); n 383 drivers/hid/wacom_sys.c if (ret == n) { n 385 drivers/hid/wacom_sys.c data, n, 0); n 1736 drivers/hid/wacom_sys.c unsigned long n; n 1744 drivers/hid/wacom_sys.c n = atomic_inc_return(&battery_no) - 1; n 1749 drivers/hid/wacom_sys.c sprintf(battery->bat_name, "wacom_battery_%ld", n); n 40 drivers/hid/wacom_wac.c static int wacom_numbered_button_to_key(int n); n 2740 drivers/hid/wacom_wac.c unsigned count, n; n 2748 drivers/hid/wacom_wac.c for (n = 0 ; n < count; n++) { n 2749 drivers/hid/wacom_wac.c if (field->usage[n].collection_index == collection_index) n 2750 drivers/hid/wacom_wac.c wacom_wac_event(hdev, field, &field->usage[n], n 2751 drivers/hid/wacom_wac.c field->value[n]); n 3871 drivers/hid/wacom_wac.c static int wacom_numbered_button_to_key(int n) n 3873 drivers/hid/wacom_wac.c if (n < 10) n 3874 drivers/hid/wacom_wac.c return BTN_0 + n; n 3875 drivers/hid/wacom_wac.c else if (n < 16) n 3876 drivers/hid/wacom_wac.c return BTN_A + (n-10); n 3877 drivers/hid/wacom_wac.c else if (n < 18) n 3878 drivers/hid/wacom_wac.c return BTN_BASE + (n-16); n 783 drivers/hv/vmbus_drv.c struct vmbus_dynid *dynid, *n; n 786 drivers/hv/vmbus_drv.c list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { n 828 drivers/hv/vmbus_drv.c struct vmbus_dynid *dynid, *n; n 838 drivers/hv/vmbus_drv.c list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { n 264 drivers/hwmon/abx500.c struct attribute *attr, int n) n 270 drivers/hwmon/abx500.c return data->ops.is_visible(attr, n); n 187 drivers/hwmon/adm1026.c #define INS_TO_REG(n, val) \ n 188 drivers/hwmon/adm1026.c SCALE(clamp_val(val, 0, 255 * adm1026_scaling[n] / 192), \ n 189 drivers/hwmon/adm1026.c adm1026_scaling[n], 192) n 190 drivers/hwmon/adm1026.c #define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n])) n 81 drivers/hwmon/adm9240.c static inline unsigned int IN_FROM_REG(u8 reg, int n) n 83 drivers/hwmon/adm9240.c return SCALE(reg, nom_mv[n], 192); n 86 drivers/hwmon/adm9240.c static inline u8 IN_TO_REG(unsigned long val, int n) n 88 drivers/hwmon/adm9240.c val = clamp_val(val, 0, nom_mv[n] * 255 / 192); n 89 drivers/hwmon/adm9240.c return SCALE(val, 192, nom_mv[n]); n 799 drivers/hwmon/asc7621.c #define PREAD(name, n, pri, rm, rl, m, s, r) \ n 800 drivers/hwmon/asc7621.c {.sda = SENSOR_ATTR(name, S_IRUGO, show_##r, NULL, n), \ n 804 drivers/hwmon/asc7621.c #define PWRITE(name, n, pri, rm, rl, m, s, r) \ n 805 drivers/hwmon/asc7621.c {.sda = SENSOR_ATTR(name, S_IRUGO | S_IWUSR, show_##r, store_##r, n), \ n 813 drivers/hwmon/asc7621.c #define PWRITEM(name, n, pri, rm, rl, m, s, r) \ n 814 drivers/hwmon/asc7621.c {.sda = SENSOR_ATTR(name, S_IRUGO | S_IWUSR, show_##r, store_##r, n), \ n 296 drivers/hwmon/fam15h_power.c int n = FAM15H_MIN_NUM_ATTRS; n 303 drivers/hwmon/fam15h_power.c n += 1; n 307 drivers/hwmon/fam15h_power.c n += 2; n 309 drivers/hwmon/fam15h_power.c fam15h_power_attrs = devm_kcalloc(&pdev->dev, n, n 316 drivers/hwmon/fam15h_power.c n = 0; n 317 drivers/hwmon/fam15h_power.c fam15h_power_attrs[n++] = &dev_attr_power1_crit.attr; n 321 drivers/hwmon/fam15h_power.c fam15h_power_attrs[n++] = &dev_attr_power1_input.attr; n 324 drivers/hwmon/fam15h_power.c fam15h_power_attrs[n++] = &dev_attr_power1_average.attr; n 325 drivers/hwmon/fam15h_power.c fam15h_power_attrs[n++] = &dev_attr_power1_average_interval.attr; n 208 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 210 drivers/hwmon/gl520sm.c u8 r = data->in_input[n]; n 212 drivers/hwmon/gl520sm.c if (n == 0) n 221 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 223 drivers/hwmon/gl520sm.c u8 r = data->in_min[n]; n 225 drivers/hwmon/gl520sm.c if (n == 0) n 234 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 236 drivers/hwmon/gl520sm.c u8 r = data->in_max[n]; n 238 drivers/hwmon/gl520sm.c if (n == 0) n 249 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 260 drivers/hwmon/gl520sm.c if (n == 0) n 265 drivers/hwmon/gl520sm.c data->in_min[n] = r; n 267 drivers/hwmon/gl520sm.c if (n < 4) n 268 drivers/hwmon/gl520sm.c gl520_write_value(client, GL520_REG_IN_MIN[n], n 269 drivers/hwmon/gl520sm.c (gl520_read_value(client, GL520_REG_IN_MIN[n]) n 272 drivers/hwmon/gl520sm.c gl520_write_value(client, GL520_REG_IN_MIN[n], r); n 283 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 292 drivers/hwmon/gl520sm.c if (n == 0) n 299 drivers/hwmon/gl520sm.c data->in_max[n] = r; n 301 drivers/hwmon/gl520sm.c if (n < 4) n 302 drivers/hwmon/gl520sm.c gl520_write_value(client, GL520_REG_IN_MAX[n], n 303 drivers/hwmon/gl520sm.c (gl520_read_value(client, GL520_REG_IN_MAX[n]) n 306 drivers/hwmon/gl520sm.c gl520_write_value(client, GL520_REG_IN_MAX[n], r); n 341 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 344 drivers/hwmon/gl520sm.c return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_input[n], n 345 drivers/hwmon/gl520sm.c data->fan_div[n])); n 351 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 354 drivers/hwmon/gl520sm.c return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[n], n 355 drivers/hwmon/gl520sm.c data->fan_div[n])); n 361 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 364 drivers/hwmon/gl520sm.c return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[n])); n 380 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 390 drivers/hwmon/gl520sm.c r = FAN_TO_REG(v, data->fan_div[n]); n 391 drivers/hwmon/gl520sm.c data->fan_min[n] = r; n 393 drivers/hwmon/gl520sm.c if (n == 0) n 403 drivers/hwmon/gl520sm.c if (data->fan_min[n] == 0) n 404 drivers/hwmon/gl520sm.c data->alarm_mask &= (n == 0) ? ~0x20 : ~0x40; n 406 drivers/hwmon/gl520sm.c data->alarm_mask |= (n == 0) ? 0x20 : 0x40; n 420 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 449 drivers/hwmon/gl520sm.c data->fan_div[n] = r; n 451 drivers/hwmon/gl520sm.c if (n == 0) n 504 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 507 drivers/hwmon/gl520sm.c return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_input[n])); n 513 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 516 drivers/hwmon/gl520sm.c return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[n])); n 522 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 525 drivers/hwmon/gl520sm.c return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max_hyst[n])); n 534 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 543 drivers/hwmon/gl520sm.c data->temp_max[n] = TEMP_TO_REG(v); n 544 drivers/hwmon/gl520sm.c gl520_write_value(client, GL520_REG_TEMP_MAX[n], data->temp_max[n]); n 555 drivers/hwmon/gl520sm.c int n = to_sensor_dev_attr(attr)->index; n 564 drivers/hwmon/gl520sm.c data->temp_max_hyst[n] = TEMP_TO_REG(v); n 565 drivers/hwmon/gl520sm.c gl520_write_value(client, GL520_REG_TEMP_MAX_HYST[n], n 566 drivers/hwmon/gl520sm.c data->temp_max_hyst[n]); n 79 drivers/hwmon/hwmon.c struct attribute *attr, int n) n 503 drivers/hwmon/hwmon.c int i, n; n 505 drivers/hwmon/hwmon.c for (i = n = 0; info->config[i]; i++) n 506 drivers/hwmon/hwmon.c n += hweight32(info->config[i]); n 508 drivers/hwmon/hwmon.c return n; n 331 drivers/hwmon/ibmpex.c char *n; n 333 drivers/hwmon/ibmpex.c n = kmalloc(32, GFP_KERNEL); n 334 drivers/hwmon/ibmpex.c if (!n) n 338 drivers/hwmon/ibmpex.c sprintf(n, "temp%d_input%s", n 341 drivers/hwmon/ibmpex.c sprintf(n, "power%d_average%s", n 345 drivers/hwmon/ibmpex.c data->sensors[sensor].attr[func].dev_attr.attr.name = n; n 355 drivers/hwmon/ibmpex.c kfree(n); n 187 drivers/hwmon/ibmpowernv.c size_t n; n 189 drivers/hwmon/ibmpowernv.c n = snprintf(sdata->label, sizeof(sdata->label), "%s", label); n 202 drivers/hwmon/ibmpowernv.c n += snprintf(sdata->label + n, n 203 drivers/hwmon/ibmpowernv.c sizeof(sdata->label) - n, " %d", n 206 drivers/hwmon/ibmpowernv.c n += snprintf(sdata->label + n, n 207 drivers/hwmon/ibmpowernv.c sizeof(sdata->label) - n, " phy%d", id); n 214 drivers/hwmon/ibmpowernv.c n += snprintf(sdata->label + n, sizeof(sdata->label) - n, n 101 drivers/hwmon/iio_hwmon.c int n; n 114 drivers/hwmon/iio_hwmon.c n = in_i++; n 118 drivers/hwmon/iio_hwmon.c n = temp_i++; n 122 drivers/hwmon/iio_hwmon.c n = curr_i++; n 126 drivers/hwmon/iio_hwmon.c n = power_i++; n 130 drivers/hwmon/iio_hwmon.c n = humidity_i++; n 139 drivers/hwmon/iio_hwmon.c prefix, n); n 126 drivers/hwmon/lm85.c #define INS_TO_REG(n, val) \ n 127 drivers/hwmon/lm85.c SCALE(clamp_val(val, 0, 255 * lm85_scaling[n] / 192), \ n 128 drivers/hwmon/lm85.c lm85_scaling[n], 192) n 130 drivers/hwmon/lm85.c #define INSEXT_FROM_REG(n, val, ext) \ n 131 drivers/hwmon/lm85.c SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n]) n 133 drivers/hwmon/lm85.c #define INS_FROM_REG(n, val) SCALE((val), 192, lm85_scaling[n]) n 154 drivers/hwmon/ltc2990.c struct attribute *a, int n) n 455 drivers/hwmon/max16065.c struct attribute *a, int n) n 459 drivers/hwmon/max16065.c int index = n / 4; n 322 drivers/hwmon/max6650.c int n) n 44 drivers/hwmon/max6697.c #define MAX6697_REG_STAT(n) (0x44 + (n)) n 19 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_BASE(base, n) ((base) + ((n) * 0x1000L)) n 21 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_PR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x00) n 22 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_CSR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x04) n 23 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_CR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x08) n 24 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_CNRx(base, n, ch) \ n 25 drivers/hwmon/npcm750-pwm-fan.c (NPCM7XX_PWM_REG_BASE(base, n) + 0x0C + (12 * (ch))) n 26 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_CMRx(base, n, ch) \ n 27 drivers/hwmon/npcm750-pwm-fan.c (NPCM7XX_PWM_REG_BASE(base, n) + 0x10 + (12 * (ch))) n 28 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_PDRx(base, n, ch) \ n 29 drivers/hwmon/npcm750-pwm-fan.c (NPCM7XX_PWM_REG_BASE(base, n) + 0x14 + (12 * (ch))) n 30 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_PIER(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x3C) n 31 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_PWM_REG_PIIR(base, n) (NPCM7XX_PWM_REG_BASE(base, n) + 0x40) n 80 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_BASE(base, n) ((base) + ((n) * 0x1000L)) n 82 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCNT1(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x00) n 83 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCRA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x02) n 84 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCRB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x04) n 85 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCNT2(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x06) n 86 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TPRSC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x08) n 87 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCKC(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0A) n 88 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TMCTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0C) n 89 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TICTRL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x0E) n 90 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TICLR(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x10) n 91 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TIEN(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x12) n 92 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCPA(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x14) n 93 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCPB(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x16) n 94 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TCPCFG(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x18) n 95 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TINASEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1A) n 96 drivers/hwmon/npcm750-pwm-fan.c #define NPCM7XX_FAN_REG_TINBSEL(base, n) (NPCM7XX_FAN_REG_BASE(base, n) + 0x1C) n 449 drivers/hwmon/ntc_thermistor.c u64 n, puo, pdo; n 461 drivers/hwmon/ntc_thermistor.c n = div_u64(pdo * (puv - uv), uv); n 463 drivers/hwmon/ntc_thermistor.c n = div_u64(puo * uv, puv - uv); n 465 drivers/hwmon/ntc_thermistor.c n = div64_u64_safe(pdo * puo * (puv - uv), n 468 drivers/hwmon/ntc_thermistor.c n = div64_u64_safe(pdo * puo * uv, pdo * (puv - uv) - puo * uv); n 470 drivers/hwmon/ntc_thermistor.c if (n > INT_MAX) n 471 drivers/hwmon/ntc_thermistor.c n = INT_MAX; n 472 drivers/hwmon/ntc_thermistor.c return n; n 148 drivers/hwmon/pwm-fan.c int n) n 154 drivers/hwmon/pwm-fan.c if (n == 1 && ctx->irq <= 0) n 60 drivers/hwmon/smsc47m192.c static inline unsigned int IN_FROM_REG(u8 reg, int n) n 62 drivers/hwmon/smsc47m192.c return SCALE(reg, nom_mv[n], 192); n 65 drivers/hwmon/smsc47m192.c static inline u8 IN_TO_REG(unsigned long val, int n) n 67 drivers/hwmon/smsc47m192.c val = clamp_val(val, 0, nom_mv[n] * 255 / 192); n 68 drivers/hwmon/smsc47m192.c return SCALE(val, 192, nom_mv[n]); n 37 drivers/hwtracing/coresight/coresight-etm.h #define ETMACVRn(n) (0x040 + (n * 4)) n 38 drivers/hwtracing/coresight/coresight-etm.h #define ETMACTRn(n) (0x080 + (n * 4)) n 39 drivers/hwtracing/coresight/coresight-etm.h #define ETMCNTRLDVRn(n) (0x140 + (n * 4)) n 40 drivers/hwtracing/coresight/coresight-etm.h #define ETMCNTENRn(n) (0x150 + (n * 4)) n 41 drivers/hwtracing/coresight/coresight-etm.h #define ETMCNTRLDEVRn(n) (0x160 + (n * 4)) n 42 drivers/hwtracing/coresight/coresight-etm.h #define ETMCNTVRn(n) (0x170 + (n * 4)) n 50 drivers/hwtracing/coresight/coresight-etm.h #define ETMEXTOUTEVRn(n) (0x1a0 + (n * 4)) n 51 drivers/hwtracing/coresight/coresight-etm.h #define ETMCIDCVRn(n) (0x1b0 + (n * 4)) n 47 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCSEQEVRn(n) (0x100 + (n * 4)) n 51 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCCNTRLDVRn(n) (0x140 + (n * 4)) n 52 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCCNTCTLRn(n) (0x150 + (n * 4)) n 53 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCCNTVRn(n) (0x160 + (n * 4)) n 62 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCIMSPECn(n) (0x1C0 + (n * 4)) n 72 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCRSCTLRn(n) (0x200 + (n * 4)) n 74 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCSSCCRn(n) (0x280 + (n * 4)) n 75 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCSSCSRn(n) (0x2A0 + (n * 4)) n 76 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCSSPCICRn(n) (0x2C0 + (n * 4)) n 84 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCACVRn(n) (0x400 + (n * 8)) n 85 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCACATRn(n) (0x480 + (n * 8)) n 86 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCDVCVRn(n) (0x500 + (n * 16)) n 87 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCDVCMRn(n) (0x580 + (n * 16)) n 88 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCCIDCVRn(n) (0x600 + (n * 8)) n 89 drivers/hwtracing/coresight/coresight-etm4x.h #define TRCVMIDCVRn(n) (0x640 + (n * 8)) n 441 drivers/hwtracing/coresight/coresight-platform.c int i, n; n 459 drivers/hwtracing/coresight/coresight-platform.c n = nr_graphs->integer.value; n 461 drivers/hwtracing/coresight/coresight-platform.c if (n != 1) n 465 drivers/hwtracing/coresight/coresight-platform.c if (graph->package.count != (n + 2)) n 472 drivers/hwtracing/coresight/coresight-platform.c for (i = 2; i < n + 2; i++) { n 546 drivers/hwtracing/stm/core.c int err, n; n 554 drivers/hwtracing/stm/core.c for (n = 0, pn = NULL; ids[n] && !pn; n++) n 555 drivers/hwtracing/stm/core.c pn = stp_policy_node_lookup(stm, ids[n]); n 109 drivers/i2c/busses/i2c-efm32.c #define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n)) n 108 drivers/i2c/busses/i2c-jz4780.c #define JZ4780_I2CSHCNT_ADJUST(n) (((n) - 8) < 6 ? 6 : ((n) - 8)) n 109 drivers/i2c/busses/i2c-jz4780.c #define JZ4780_I2CSLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1)) n 110 drivers/i2c/busses/i2c-jz4780.c #define JZ4780_I2CFHCNT_ADJUST(n) (((n) - 8) < 6 ? 6 : ((n) - 8)) n 111 drivers/i2c/busses/i2c-jz4780.c #define JZ4780_I2CFLCNT_ADJUST(n) (((n) - 1) < 8 ? 8 : ((n) - 1)) n 761 drivers/i2c/busses/i2c-mv64xxx.c const int tclk, const int n, const int m) n 764 drivers/i2c/busses/i2c-mv64xxx.c return tclk / (10 * (m + 1) * (1 << n)); n 766 drivers/i2c/busses/i2c-mv64xxx.c return tclk / (10 * (m + 1) * (2 << n)); n 774 drivers/i2c/busses/i2c-mv64xxx.c int m, n; n 776 drivers/i2c/busses/i2c-mv64xxx.c for (n = 0; n <= 7; n++) n 778 drivers/i2c/busses/i2c-mv64xxx.c freq = mv64xxx_calc_freq(drv_data, tclk, n, m); n 782 drivers/i2c/busses/i2c-mv64xxx.c drv_data->freq_n = n; n 802 drivers/i2c/busses/i2c-sh_mobile.c resource_size_t n; n 806 drivers/i2c/busses/i2c-sh_mobile.c for (n = res->start; n <= res->end; n++) { n 807 drivers/i2c/busses/i2c-sh_mobile.c ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr, n 810 drivers/i2c/busses/i2c-sh_mobile.c dev_err(&dev->dev, "cannot request IRQ %pa\n", &n); n 52 drivers/i2c/busses/i2c-stm32f4.c #define STM32F4_I2C_CR2_FREQ(n) ((n) & STM32F4_I2C_CR2_FREQ_MASK) n 83 drivers/i2c/busses/i2c-stm32f4.c #define STM32F4_I2C_CCR_CCR(n) ((n) & STM32F4_I2C_CCR_CCR_MASK) n 89 drivers/i2c/busses/i2c-stm32f4.c #define STM32F4_I2C_TRISE_VALUE(n) ((n) & STM32F4_I2C_TRISE_VALUE_MASK) n 80 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_CR2_NBYTES(n) (((n) & 0xff) << 16) n 88 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_CR2_SADD10(n) (((n) & \ n 91 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_CR2_SADD7(n) (((n) & 0x7f) << 1) n 97 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_OAR1_OA1_10(n) (((n) & \ n 100 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_OAR1_OA1_7(n) (((n) & 0x7f) << 1) n 109 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_OAR2_OA2MSK(n) (((n) & 0x7) << 8) n 111 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_OAR2_OA2_7(n) (((n) & 0x7f) << 1) n 118 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_ISR_ADDCODE_GET(n) \ n 119 drivers/i2c/busses/i2c-stm32f7.c (((n) & STM32F7_I2C_ISR_ADDCODE_MASK) >> 17) n 143 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_TIMINGR_PRESC(n) (((n) & 0xf) << 28) n 144 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_TIMINGR_SCLDEL(n) (((n) & 0xf) << 20) n 145 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_TIMINGR_SDADEL(n) (((n) & 0xf) << 16) n 146 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_TIMINGR_SCLH(n) (((n) & 0xff) << 8) n 147 drivers/i2c/busses/i2c-stm32f7.c #define STM32F7_I2C_TIMINGR_SCLL(n) ((n) & 0xff) n 22 drivers/i2c/busses/i2c-synquacer.c #define WAIT_PCLK(n, rate) \ n 23 drivers/i2c/busses/i2c-synquacer.c ndelay(DIV_ROUND_UP(DIV_ROUND_UP(1000000000, rate), n) + 10) n 38 drivers/i2c/i2c-core-acpi.c int n; n 82 drivers/i2c/i2c-core-acpi.c if (lookup->index != -1 && lookup->n++ != lookup->index) n 280 drivers/ide/ide-disk.c static unsigned long long sectors_to_MB(unsigned long long n) n 282 drivers/ide/ide-disk.c n <<= 9; /* make it bytes */ n 283 drivers/ide/ide-disk.c do_div(n, 1000000); /* make it MB */ n 284 drivers/ide/ide-disk.c return n; n 288 drivers/ide/ide-proc.c unsigned long n; n 325 drivers/ide/ide-proc.c n = count; n 326 drivers/ide/ide-proc.c while (n > 0) { n 330 drivers/ide/ide-proc.c while (n > 0 && *p != ':') { n 331 drivers/ide/ide-proc.c --n; n 341 drivers/ide/ide-proc.c if (n > 0) { n 342 drivers/ide/ide-proc.c --n; n 348 drivers/ide/ide-proc.c n -= q - p; n 350 drivers/ide/ide-proc.c if (n > 0 && !isspace(*p)) n 352 drivers/ide/ide-proc.c while (n > 0 && isspace(*p)) { n 353 drivers/ide/ide-proc.c --n; n 93 drivers/ide/ide-scan-pci.c struct list_head *l, *n; n 104 drivers/ide/ide-scan-pci.c list_for_each_safe(l, n, &ide_pci_drivers) { n 95 drivers/ide/ide-sysfs.c const char *buf, size_t n) n 99 drivers/ide/ide-sysfs.c if (strncmp(buf, "1", n)) n 104 drivers/ide/ide-sysfs.c return n; n 111 drivers/ide/ide-sysfs.c const char *buf, size_t n) n 115 drivers/ide/ide-sysfs.c if (strncmp(buf, "1", n)) n 121 drivers/ide/ide-sysfs.c return n; n 154 drivers/iio/accel/adxl345_core.c s64 n; n 166 drivers/iio/accel/adxl345_core.c n = div_s64(val * NHZ_PER_HZ + val2, ADXL345_BASE_RATE_NANO_HZ); n 170 drivers/iio/accel/adxl345_core.c clamp_val(ilog2(n), 0, n 410 drivers/iio/accel/bma180.c static ssize_t bma180_show_avail(char *buf, const int *vals, unsigned int n, n 416 drivers/iio/accel/bma180.c for (i = 0; i < n; i++) { n 257 drivers/iio/accel/mma8452.c int n) n 261 drivers/iio/accel/mma8452.c while (n-- > 0) n 263 drivers/iio/accel/mma8452.c vals[n][0], vals[n][1]); n 271 drivers/iio/accel/mma8452.c static int mma8452_get_int_plus_micros_index(const int (*vals)[2], int n, n 274 drivers/iio/accel/mma8452.c while (n-- > 0) n 275 drivers/iio/accel/mma8452.c if (val == vals[n][0] && val2 == vals[n][1]) n 276 drivers/iio/accel/mma8452.c return n; n 201 drivers/iio/adc/ad7606.c unsigned int n, bool micros) n 206 drivers/iio/adc/ad7606.c for (i = 0; i < n; i++) { n 255 drivers/iio/adc/ad_sigma_delta.c const struct ad_sd_calib_data *cb, unsigned int n) n 260 drivers/iio/adc/ad_sigma_delta.c for (i = 0; i < n; i++) { n 71 drivers/iio/adc/at91_adc.c #define AT91_ADC_CH(n) (1 << (n)) /* Channel Number */ n 74 drivers/iio/adc/at91_adc.c #define AT91_ADC_EOC(n) (1 << (n)) /* End of Conversion on Channel N */ n 75 drivers/iio/adc/at91_adc.c #define AT91_ADC_OVRE(n) (1 << ((n) + 8))/* Overrun Error on Channel N */ n 99 drivers/iio/adc/at91_adc.c #define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */ n 80 drivers/iio/adc/berlin2-adc.c #define BERLIN2_ADC_CHANNEL(n, t) \ n 82 drivers/iio/adc/berlin2-adc.c .channel = n, \ n 83 drivers/iio/adc/berlin2-adc.c .datasheet_name = "channel"#n, \ n 37 drivers/iio/adc/max1027.c #define MAX1027_CHAN(n) ((n) << 3) n 696 drivers/iio/adc/mxs-lradc-adc.c int ret, irq, virq, i, s, n; n 735 drivers/iio/adc/mxs-lradc-adc.c n = ARRAY_SIZE(mx23_lradc_adc_irq_names); n 740 drivers/iio/adc/mxs-lradc-adc.c n = ARRAY_SIZE(mx28_lradc_adc_irq_names); n 747 drivers/iio/adc/mxs-lradc-adc.c for (i = 0; i < n; i++) { n 65 drivers/iio/adc/sc27xx_adc.c #define SC27XX_VOLT_RATIO(n, d) \ n 66 drivers/iio/adc/sc27xx_adc.c (((n) << SC27XX_RATIO_NUMERATOR_OFFSET) | (d)) n 109 drivers/iio/adc/spear_adc.c static void spear_adc_set_ctrl(struct spear_adc_state *st, int n, n 112 drivers/iio/adc/spear_adc.c __raw_writel(val, &st->adc_base_spear6xx->ch_ctrl[n]); n 138 drivers/iio/adc/xilinx-xadc-core.c unsigned int n) n 142 drivers/iio/adc/xilinx-xadc-core.c for (i = 0; i < n; i++) n 588 drivers/iio/adc/xilinx-xadc-core.c unsigned int n; n 590 drivers/iio/adc/xilinx-xadc-core.c n = bitmap_weight(mask, indio_dev->masklength); n 593 drivers/iio/adc/xilinx-xadc-core.c xadc->data = kcalloc(n, sizeof(*xadc->data), GFP_KERNEL); n 482 drivers/iio/buffer/industrialio-buffer-dma.c int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, n 489 drivers/iio/buffer/industrialio-buffer-dma.c if (n < buffer->bytes_per_datum) n 506 drivers/iio/buffer/industrialio-buffer-dma.c n = rounddown(n, buffer->bytes_per_datum); n 507 drivers/iio/buffer/industrialio-buffer-dma.c if (n > block->bytes_used - queue->fileio.pos) n 508 drivers/iio/buffer/industrialio-buffer-dma.c n = block->bytes_used - queue->fileio.pos; n 510 drivers/iio/buffer/industrialio-buffer-dma.c if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { n 515 drivers/iio/buffer/industrialio-buffer-dma.c queue->fileio.pos += n; n 522 drivers/iio/buffer/industrialio-buffer-dma.c ret = n; n 131 drivers/iio/buffer/industrialio-hw-consumer.c struct hw_consumer_buffer *buf, *n; n 134 drivers/iio/buffer/industrialio-hw-consumer.c list_for_each_entry_safe(buf, n, &hwc->buffers, head) n 102 drivers/iio/buffer/kfifo_buf.c size_t n, char __user *buf) n 110 drivers/iio/buffer/kfifo_buf.c if (!kfifo_initialized(&kf->kf) || n < kfifo_esize(&kf->kf)) n 113 drivers/iio/buffer/kfifo_buf.c ret = kfifo_to_user(&kf->kf, buf, n, &copied); n 332 drivers/iio/common/ssp_sensors/ssp_spi.c struct ssp_msg *msg, *n; n 358 drivers/iio/common/ssp_sensors/ssp_spi.c list_for_each_entry_safe(msg, n, &data->pending_list, list) { n 443 drivers/iio/common/ssp_sensors/ssp_spi.c struct ssp_msg *msg, *n; n 446 drivers/iio/common/ssp_sensors/ssp_spi.c list_for_each_entry_safe(msg, n, &data->pending_list, list) { n 612 drivers/iio/common/st_sensors/st_sensors_core.c int i, n; n 615 drivers/iio/common/st_sensors/st_sensors_core.c for (n = 0; n < ST_SENSORS_MAX_4WAI; n++) { n 616 drivers/iio/common/st_sensors/st_sensors_core.c if (strcmp(name, list[i].sensors_supported[n]) == 0) n 256 drivers/iio/dac/ad5449.c static const char *ad5449_vref_name(struct ad5449 *st, int n) n 261 drivers/iio/dac/ad5449.c if (n == 0) n 46 drivers/iio/iio_core.h size_t n, loff_t *f_ps); n 34 drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c #define ST_LSM6DSX_SLV_ADDR(n, base) ((base) + (n) * 3) n 35 drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c #define ST_LSM6DSX_SLV_SUB_ADDR(n, base) ((base) + 1 + (n) * 3) n 36 drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c #define ST_LSM6DSX_SLV_CONFIG(n, base) ((base) + 2 + (n) * 3) n 103 drivers/iio/industrialio-buffer.c size_t n, loff_t *f_ps) n 130 drivers/iio/industrialio-buffer.c to_wait = min_t(size_t, n / datum_size, rb->watermark); n 139 drivers/iio/industrialio-buffer.c if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { n 150 drivers/iio/industrialio-buffer.c ret = rb->access->read_first_n(rb, n, buf); n 1222 drivers/iio/industrialio-core.c struct iio_dev_attr *p, *n; n 1224 drivers/iio/industrialio-core.c list_for_each_entry_safe(p, n, attr_list, l) { n 140 drivers/iio/light/cm32181.c int ret, i, n; n 142 drivers/iio/light/cm32181.c n = ARRAY_SIZE(als_it_value); n 143 drivers/iio/light/cm32181.c for (i = 0; i < n; i++) n 146 drivers/iio/light/cm32181.c if (i >= n) n 147 drivers/iio/light/cm32181.c i = n - 1; n 261 drivers/iio/light/cm32181.c int i, n, len; n 263 drivers/iio/light/cm32181.c n = ARRAY_SIZE(als_it_value); n 264 drivers/iio/light/cm32181.c for (i = 0, len = 0; i < n; i++) n 57 drivers/iio/light/opt3001.c #define OPT3001_REG_EXPONENT(n) ((n) >> 12) n 58 drivers/iio/light/opt3001.c #define OPT3001_REG_MANTISSA(n) ((n) & 0xfff) n 41 drivers/iio/light/tsl2563.c #define CALIB_FRAC(n, b) (((n) << CALIB_FRAC_BITS) / (b)) n 74 drivers/iio/light/tsl2563.c #define TSL2563_INT_PERSIST(n) ((n) & 0x0F) n 556 drivers/iio/light/tsl2583.c unsigned int n; n 568 drivers/iio/light/tsl2583.c n = value[0]; n 569 drivers/iio/light/tsl2583.c if ((n % 3) || n < 6 || n > max_ints) { n 575 drivers/iio/light/tsl2583.c if ((value[n - 2] | value[n - 1] | value[n]) != 0) { n 1014 drivers/iio/light/tsl2772.c int n, ret; n 1024 drivers/iio/light/tsl2772.c n = value[0]; n 1025 drivers/iio/light/tsl2772.c if ((n % 2) || n < 4 || n 1026 drivers/iio/light/tsl2772.c n > ((ARRAY_SIZE(chip->tsl2772_device_lux) - 1) * 2)) n 1029 drivers/iio/light/tsl2772.c if ((value[(n - 1)] | value[n]) != 0) n 113 drivers/iio/magnetometer/mag3110.c const int (*vals)[2], int n) n 117 drivers/iio/magnetometer/mag3110.c while (n-- > 0) n 119 drivers/iio/magnetometer/mag3110.c "%d.%06d ", vals[n][0], vals[n][1]); n 127 drivers/iio/magnetometer/mag3110.c static int mag3110_get_int_plus_micros_index(const int (*vals)[2], int n, n 130 drivers/iio/magnetometer/mag3110.c while (n-- > 0) n 131 drivers/iio/magnetometer/mag3110.c if (val == vals[n][0] && val2 == vals[n][1]) n 132 drivers/iio/magnetometer/mag3110.c return n; n 489 drivers/iio/pressure/bmp280-core.c const int n = data->chip_info->num_oversampling_humid_avail; n 491 drivers/iio/pressure/bmp280-core.c for (i = 0; i < n; i++) { n 506 drivers/iio/pressure/bmp280-core.c const int n = data->chip_info->num_oversampling_temp_avail; n 508 drivers/iio/pressure/bmp280-core.c for (i = 0; i < n; i++) { n 523 drivers/iio/pressure/bmp280-core.c const int n = data->chip_info->num_oversampling_press_avail; n 525 drivers/iio/pressure/bmp280-core.c for (i = 0; i < n; i++) { n 571 drivers/iio/pressure/bmp280-core.c static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n) n 576 drivers/iio/pressure/bmp280-core.c for (i = 0; i < n; i++) n 329 drivers/infiniband/core/addr.c struct neighbour *n; n 332 drivers/infiniband/core/addr.c n = dst_neigh_lookup(dst, daddr); n 333 drivers/infiniband/core/addr.c if (!n) n 336 drivers/infiniband/core/addr.c if (!(n->nud_state & NUD_VALID)) { n 337 drivers/infiniband/core/addr.c neigh_event_send(n, NULL); n 340 drivers/infiniband/core/addr.c neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev); n 343 drivers/infiniband/core/addr.c neigh_release(n); n 71 drivers/infiniband/core/cq.c int i, n, completed = 0; n 78 drivers/infiniband/core/cq.c while ((n = ib_poll_cq(cq, min_t(u32, batch, n 80 drivers/infiniband/core/cq.c for (i = 0; i < n; i++) { n 89 drivers/infiniband/core/cq.c completed += n; n 91 drivers/infiniband/core/cq.c if (n != batch || (budget != -1 && completed >= budget)) n 350 drivers/infiniband/core/umem.c int i, n = 0; n 354 drivers/infiniband/core/umem.c n += sg_dma_len(sg) >> PAGE_SHIFT; n 356 drivers/infiniband/core/umem.c return n; n 437 drivers/infiniband/hw/cxgb3/iwch_provider.c int shift, n, i; n 463 drivers/infiniband/hw/cxgb3/iwch_provider.c n = ib_umem_num_pages(mhp->umem); n 465 drivers/infiniband/hw/cxgb3/iwch_provider.c err = iwch_alloc_pbl(mhp, n); n 475 drivers/infiniband/hw/cxgb3/iwch_provider.c i = n = 0; n 480 drivers/infiniband/hw/cxgb3/iwch_provider.c err = iwch_write_pbl(mhp, pages, i, n); n 483 drivers/infiniband/hw/cxgb3/iwch_provider.c n += i; n 489 drivers/infiniband/hw/cxgb3/iwch_provider.c err = iwch_write_pbl(mhp, pages, i, n); n 2072 drivers/infiniband/hw/cxgb4/cm.c struct neighbour *n; n 2076 drivers/infiniband/hw/cxgb4/cm.c n = dst_neigh_lookup(dst, peer_ip); n 2077 drivers/infiniband/hw/cxgb4/cm.c if (!n) n 2082 drivers/infiniband/hw/cxgb4/cm.c if (n->dev->flags & IFF_LOOPBACK) { n 2100 drivers/infiniband/hw/cxgb4/cm.c n, pdev, rt_tos2priority(tos)); n 2119 drivers/infiniband/hw/cxgb4/cm.c pdev = get_real_dev(n->dev); n 2121 drivers/infiniband/hw/cxgb4/cm.c n, pdev, rt_tos2priority(tos)); n 2146 drivers/infiniband/hw/cxgb4/cm.c neigh_release(n); n 512 drivers/infiniband/hw/cxgb4/mem.c int shift, n, i; n 552 drivers/infiniband/hw/cxgb4/mem.c n = ib_umem_num_pages(mhp->umem); n 553 drivers/infiniband/hw/cxgb4/mem.c err = alloc_pbl(mhp, n); n 563 drivers/infiniband/hw/cxgb4/mem.c i = n = 0; n 569 drivers/infiniband/hw/cxgb4/mem.c mhp->attr.pbl_addr + (n << 3), i, n 573 drivers/infiniband/hw/cxgb4/mem.c n += i; n 580 drivers/infiniband/hw/cxgb4/mem.c mhp->attr.pbl_addr + (n << 3), i, n 10485 drivers/infiniband/hw/hfi1/chip.c int n = ilog2(state); n 10500 drivers/infiniband/hw/hfi1/chip.c name = n < ARRAY_SIZE(names) ? names[n] : NULL; n 13149 drivers/infiniband/hw/hfi1/chip.c int m, n; n 13153 drivers/infiniband/hw/hfi1/chip.c n = isrc % 64; n 13155 drivers/infiniband/hw/hfi1/chip.c dd->gi_mask[m] &= ~((u64)1 << n); n 13163 drivers/infiniband/hw/hfi1/chip.c n = isrc % 8; n 13165 drivers/infiniband/hw/hfi1/chip.c reg &= ~((u64)0xff << (8 * n)); n 13166 drivers/infiniband/hw/hfi1/chip.c reg |= ((u64)msix_intr & 0xff) << (8 * n); n 14198 drivers/infiniband/hw/hfi1/chip.c unsigned int m, n; n 14216 drivers/infiniband/hw/hfi1/chip.c n = ilog2(__roundup_pow_of_two(num_vls)); n 14219 drivers/infiniband/hw/hfi1/chip.c if ((m + n) > 7) n 14225 drivers/infiniband/hw/hfi1/chip.c *np = n; n 14227 drivers/infiniband/hw/hfi1/chip.c return 1 << (m + n); n 14254 drivers/infiniband/hw/hfi1/chip.c unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; n 14260 drivers/infiniband/hw/hfi1/chip.c rmt_entries = qos_rmt_entries(dd, &m, &n); n 14266 drivers/infiniband/hw/hfi1/chip.c rmt_entries = 1 << (m + n); n 14279 drivers/infiniband/hw/hfi1/chip.c idx = rmt->used + ((qpn << n) ^ i); n 14299 drivers/infiniband/hw/hfi1/chip.c rrd.index1_width = n; n 14301 drivers/infiniband/hw/hfi1/chip.c rrd.index2_width = m + n; n 14314 drivers/infiniband/hw/hfi1/chip.c dd->qos_shift = n + 1; n 264 drivers/infiniband/hw/hfi1/debugfs.c loff_t n = *pos; n 279 drivers/infiniband/hw/hfi1/debugfs.c } while (n--); n 240 drivers/infiniband/hw/hfi1/eprom.c #define DIRECTORY_SIZE(n) (sizeof(struct hfi1_eprom_footer) + \ n 241 drivers/infiniband/hw/hfi1/eprom.c (sizeof(struct hfi1_eprom_table_entry) * (n))) n 458 drivers/infiniband/hw/hfi1/iowait.h static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n) n 463 drivers/infiniband/hw/hfi1/iowait.h w->iow->count += n; n 2662 drivers/infiniband/hw/hfi1/mad.c int n = LINK_WIDTH_DEFAULT; n 2663 drivers/infiniband/hw/hfi1/mad.c u16 tx_width = n; n 2665 drivers/infiniband/hw/hfi1/mad.c while (link_width && n) { n 2666 drivers/infiniband/hw/hfi1/mad.c if (link_width & (1 << (n - 1))) { n 2667 drivers/infiniband/hw/hfi1/mad.c tx_width = n; n 2670 drivers/infiniband/hw/hfi1/mad.c n--; n 431 drivers/infiniband/hw/hfi1/mad.h #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) n 1617 drivers/infiniband/hw/hfi1/pio.c uint i, n = 0, top_idx = 0; n 1633 drivers/infiniband/hw/hfi1/pio.c if (n == ARRAY_SIZE(qps)) n 1641 drivers/infiniband/hw/hfi1/pio.c if (n) { n 1645 drivers/infiniband/hw/hfi1/pio.c n, top_idx); n 1649 drivers/infiniband/hw/hfi1/pio.c qps[n++] = qp; n 1655 drivers/infiniband/hw/hfi1/pio.c if (n) { n 1663 drivers/infiniband/hw/hfi1/pio.c if (n) n 1666 drivers/infiniband/hw/hfi1/pio.c for (i = 0; i < n; i++) n 189 drivers/infiniband/hw/hfi1/pio_copy.c static inline void jcopy(u8 *dest, const u8 *src, u32 n) n 191 drivers/infiniband/hw/hfi1/pio_copy.c switch (n) { n 668 drivers/infiniband/hw/hfi1/qp.c iter->n, n 767 drivers/infiniband/hw/hfi1/qp.c int n; n 770 drivers/infiniband/hw/hfi1/qp.c for (n = 0; n < dd->num_pports; n++) { n 771 drivers/infiniband/hw/hfi1/qp.c struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; n 1503 drivers/infiniband/hw/hfi1/rc.c u32 n = qp->s_acked; n 1504 drivers/infiniband/hw/hfi1/rc.c struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); n 1509 drivers/infiniband/hw/hfi1/rc.c qp->s_cur = n; n 1528 drivers/infiniband/hw/hfi1/rc.c if (++n == qp->s_size) n 1529 drivers/infiniband/hw/hfi1/rc.c n = 0; n 1530 drivers/infiniband/hw/hfi1/rc.c if (n == qp->s_tail) n 1532 drivers/infiniband/hw/hfi1/rc.c wqe = rvt_get_swqe_ptr(qp, n); n 1539 drivers/infiniband/hw/hfi1/rc.c qp->s_cur = n; n 1681 drivers/infiniband/hw/hfi1/rc.c u32 n = qp->s_last; n 1686 drivers/infiniband/hw/hfi1/rc.c wqe = rvt_get_swqe_ptr(qp, n); n 1696 drivers/infiniband/hw/hfi1/rc.c if (++n == qp->s_size) n 1697 drivers/infiniband/hw/hfi1/rc.c n = 0; n 1698 drivers/infiniband/hw/hfi1/rc.c if (n == qp->s_tail) n 13 drivers/infiniband/hw/hfi1/rc.h static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n) n 17 drivers/infiniband/hw/hfi1/rc.h next = n + 1; n 1769 drivers/infiniband/hw/hfi1/sdma.c uint i, n = 0, seq, tidx = 0; n 1792 drivers/infiniband/hw/hfi1/sdma.c if (n == ARRAY_SIZE(waits)) n 1800 drivers/infiniband/hw/hfi1/sdma.c if (n) { n 1805 drivers/infiniband/hw/hfi1/sdma.c n, n 1809 drivers/infiniband/hw/hfi1/sdma.c waits[n++] = wait; n 1817 drivers/infiniband/hw/hfi1/sdma.c if (n) n 1820 drivers/infiniband/hw/hfi1/sdma.c for (i = 0; i < n; i++) n 1100 drivers/infiniband/hw/hfi1/tid_rdma.c if (++sge->n >= RVT_SEGSZ) { n 1102 drivers/infiniband/hw/hfi1/tid_rdma.c sge->n = 0; n 1104 drivers/infiniband/hw/hfi1/tid_rdma.c sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; n 1105 drivers/infiniband/hw/hfi1/tid_rdma.c sge->length = sge->mr->map[sge->m]->segs[sge->n].length; n 1729 drivers/infiniband/hw/hfi1/tid_rdma.c wpriv->ss.sge.n = 0; n 2574 drivers/infiniband/hw/hfi1/tid_rdma.c u32 n = qp->s_acked; n 2581 drivers/infiniband/hw/hfi1/tid_rdma.c while (n != qp->s_tail) { n 2582 drivers/infiniband/hw/hfi1/tid_rdma.c wqe = rvt_get_swqe_ptr(qp, n); n 2588 drivers/infiniband/hw/hfi1/tid_rdma.c if (++n == qp->s_size) n 2589 drivers/infiniband/hw/hfi1/tid_rdma.c n = 0; n 3890 drivers/infiniband/hw/hfi1/tid_rdma.c epriv->ss.sge.n = 0; n 1664 drivers/infiniband/hw/hfi1/verbs.c int i, n; n 1666 drivers/infiniband/hw/hfi1/verbs.c n = 0; n 1669 drivers/infiniband/hw/hfi1/verbs.c n++; n 1671 drivers/infiniband/hw/hfi1/verbs.c names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len, n 1679 drivers/infiniband/hw/hfi1/verbs.c p = names_out + (n + num_extra_names) * sizeof(char *); n 1683 drivers/infiniband/hw/hfi1/verbs.c for (i = 0; i < n; i++) { n 1689 drivers/infiniband/hw/hfi1/verbs.c *num_cntrs = n; n 1242 drivers/infiniband/hw/hns/hns_roce_device.h void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); n 1243 drivers/infiniband/hw/hns/hns_roce_device.h void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); n 1244 drivers/infiniband/hw/hns/hns_roce_device.h void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n); n 1977 drivers/infiniband/hw/hns/hns_roce_hw_v1.c static void *get_cqe(struct hns_roce_cq *hr_cq, int n) n 1980 drivers/infiniband/hw/hns/hns_roce_hw_v1.c n * HNS_ROCE_V1_CQE_ENTRY_SIZE); n 1983 drivers/infiniband/hw/hns/hns_roce_hw_v1.c static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) n 1985 drivers/infiniband/hw/hns/hns_roce_hw_v1.c struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); n 1989 drivers/infiniband/hw/hns/hns_roce_hw_v1.c !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL; n 2451 drivers/infiniband/hw/hns/hns_roce_hw_v2.c static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) n 2454 drivers/infiniband/hw/hns/hns_roce_hw_v2.c n * HNS_ROCE_V2_CQE_ENTRY_SIZE); n 2457 drivers/infiniband/hw/hns/hns_roce_hw_v2.c static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) n 2459 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); n 2463 drivers/infiniband/hw/hns/hns_roce_hw_v2.c !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL; n 2471 drivers/infiniband/hw/hns/hns_roce_hw_v2.c static void *get_srq_wqe(struct hns_roce_srq *srq, int n) n 2473 drivers/infiniband/hw/hns/hns_roce_hw_v2.c return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift); n 1031 drivers/infiniband/hw/hns/hns_roce_mr.c u32 n; n 1058 drivers/infiniband/hw/hns/hns_roce_mr.c i = n = 0; n 1074 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages); n 1077 drivers/infiniband/hw/hns/hns_roce_mr.c n += i; n 1083 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages); n 1139 drivers/infiniband/hw/hns/hns_roce_mr.c int n; n 1152 drivers/infiniband/hw/hns/hns_roce_mr.c n = ib_umem_page_count(mr->umem); n 1155 drivers/infiniband/hw/hns/hns_roce_mr.c if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { n 1169 drivers/infiniband/hw/hns/hns_roce_mr.c if (n > pbl_size) { n 1181 drivers/infiniband/hw/hns/hns_roce_mr.c access_flags, n, mr); n 1271 drivers/infiniband/hw/hns/hns_roce_qp.c void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) n 1273 drivers/infiniband/hw/hns/hns_roce_qp.c return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); n 1276 drivers/infiniband/hw/hns/hns_roce_qp.c void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) n 1278 drivers/infiniband/hw/hns/hns_roce_qp.c return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); n 1281 drivers/infiniband/hw/hns/hns_roce_qp.c void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n) n 1284 drivers/infiniband/hw/hns/hns_roce_qp.c (n << hr_qp->sge.sge_shift)); n 69 drivers/infiniband/hw/mlx4/cq.c static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) n 71 drivers/infiniband/hw/mlx4/cq.c return mlx4_buf_offset(&buf->buf, n * buf->entry_size); n 74 drivers/infiniband/hw/mlx4/cq.c static void *get_cqe(struct mlx4_ib_cq *cq, int n) n 76 drivers/infiniband/hw/mlx4/cq.c return get_cqe_from_buf(&cq->buf, n); n 79 drivers/infiniband/hw/mlx4/cq.c static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) n 81 drivers/infiniband/hw/mlx4/cq.c struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); n 85 drivers/infiniband/hw/mlx4/cq.c !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; n 145 drivers/infiniband/hw/mlx4/cq.c int n; n 152 drivers/infiniband/hw/mlx4/cq.c n = ib_umem_page_count(*umem); n 153 drivers/infiniband/hw/mlx4/cq.c shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); n 154 drivers/infiniband/hw/mlx4/cq.c err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); n 750 drivers/infiniband/hw/mlx4/mcg.c struct mcast_group *group = NULL, *cur_group, *n; n 754 drivers/infiniband/hw/mlx4/mcg.c list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) { n 412 drivers/infiniband/hw/mlx4/mr.c int n; n 424 drivers/infiniband/hw/mlx4/mr.c n = ib_umem_page_count(mr->umem); n 425 drivers/infiniband/hw/mlx4/mr.c shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); n 428 drivers/infiniband/hw/mlx4/mr.c convert_access(access_flags), n, shift, &mr->mmr); n 503 drivers/infiniband/hw/mlx4/mr.c int n; n 515 drivers/infiniband/hw/mlx4/mr.c n = ib_umem_page_count(mmr->umem); n 519 drivers/infiniband/hw/mlx4/mr.c virt_addr, length, n, shift, n 197 drivers/infiniband/hw/mlx4/qp.c static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) n 199 drivers/infiniband/hw/mlx4/qp.c return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); n 202 drivers/infiniband/hw/mlx4/qp.c static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) n 204 drivers/infiniband/hw/mlx4/qp.c return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); n 212 drivers/infiniband/hw/mlx4/qp.c static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) n 220 drivers/infiniband/hw/mlx4/qp.c buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); n 872 drivers/infiniband/hw/mlx4/qp.c int n; n 925 drivers/infiniband/hw/mlx4/qp.c n = ib_umem_page_count(qp->umem); n 926 drivers/infiniband/hw/mlx4/qp.c shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); n 927 drivers/infiniband/hw/mlx4/qp.c err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); n 1080 drivers/infiniband/hw/mlx4/qp.c int n; n 1120 drivers/infiniband/hw/mlx4/qp.c n = ib_umem_page_count(qp->umem); n 1121 drivers/infiniband/hw/mlx4/qp.c shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); n 1122 drivers/infiniband/hw/mlx4/qp.c err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); n 42 drivers/infiniband/hw/mlx4/srq.c static void *get_wqe(struct mlx4_ib_srq *srq, int n) n 44 drivers/infiniband/hw/mlx4/srq.c return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); n 68 drivers/infiniband/hw/mlx5/cq.c static void *get_cqe(struct mlx5_ib_cq *cq, int n) n 70 drivers/infiniband/hw/mlx5/cq.c return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); n 73 drivers/infiniband/hw/mlx5/cq.c static u8 sw_ownership_bit(int n, int nent) n 75 drivers/infiniband/hw/mlx5/cq.c return (n & nent) ? 1 : 0; n 78 drivers/infiniband/hw/mlx5/cq.c static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) n 80 drivers/infiniband/hw/mlx5/cq.c void *cqe = get_cqe(cq, n & cq->ibcq.cqe); n 86 drivers/infiniband/hw/mlx5/cq.c !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { n 2035 drivers/infiniband/hw/mlx5/mr.c int n = 0; n 2039 drivers/infiniband/hw/mlx5/mr.c n++; n 2046 drivers/infiniband/hw/mlx5/mr.c n++; n 2058 drivers/infiniband/hw/mlx5/mr.c return n; n 2158 drivers/infiniband/hw/mlx5/mr.c int n; n 2169 drivers/infiniband/hw/mlx5/mr.c n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, n 2171 drivers/infiniband/hw/mlx5/mr.c if (n != data_sg_nents) n 2172 drivers/infiniband/hw/mlx5/mr.c return n; n 2183 drivers/infiniband/hw/mlx5/mr.c n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, n 2212 drivers/infiniband/hw/mlx5/mr.c return n; n 2223 drivers/infiniband/hw/mlx5/mr.c int n; n 2233 drivers/infiniband/hw/mlx5/mr.c n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset, n 2246 drivers/infiniband/hw/mlx5/mr.c return n; n 2256 drivers/infiniband/hw/mlx5/mr.c int n; n 2271 drivers/infiniband/hw/mlx5/mr.c n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents, n 2274 drivers/infiniband/hw/mlx5/mr.c if (n == data_sg_nents + meta_sg_nents) n 2285 drivers/infiniband/hw/mlx5/mr.c n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents, n 2288 drivers/infiniband/hw/mlx5/mr.c if (n == data_sg_nents + meta_sg_nents) n 2292 drivers/infiniband/hw/mlx5/mr.c n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents, n 2295 drivers/infiniband/hw/mlx5/mr.c if (unlikely(n != data_sg_nents + meta_sg_nents)) n 2314 drivers/infiniband/hw/mlx5/mr.c int n; n 2323 drivers/infiniband/hw/mlx5/mr.c n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, n 2326 drivers/infiniband/hw/mlx5/mr.c n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, n 2333 drivers/infiniband/hw/mlx5/mr.c return n; n 590 drivers/infiniband/hw/mlx5/qp.c int n; n 592 drivers/infiniband/hw/mlx5/qp.c n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs - n 595 drivers/infiniband/hw/mlx5/qp.c return n >= 0 ? n : 0; n 4052 drivers/infiniband/hw/mlx5/qp.c size_t n) n 4054 drivers/infiniband/hw/mlx5/qp.c while (likely(n)) { n 4056 drivers/infiniband/hw/mlx5/qp.c size_t copysz = min_t(size_t, leftlen, n); n 4061 drivers/infiniband/hw/mlx5/qp.c n -= copysz; n 4063 drivers/infiniband/hw/mlx5/qp.c stride = !n ? ALIGN(copysz, 16) : copysz; n 14 drivers/infiniband/hw/mlx5/srq.c static void *get_wqe(struct mlx5_ib_srq *srq, int n) n 16 drivers/infiniband/hw/mlx5/srq.c return mlx5_frag_buf_get_wqe(&srq->fbc, n); n 864 drivers/infiniband/hw/mthca/mthca_provider.c int n, i; n 891 drivers/infiniband/hw/mthca/mthca_provider.c n = ib_umem_num_pages(mr->umem); n 893 drivers/infiniband/hw/mthca/mthca_provider.c mr->mtt = mthca_alloc_mtt(dev, n); n 905 drivers/infiniband/hw/mthca/mthca_provider.c i = n = 0; n 917 drivers/infiniband/hw/mthca/mthca_provider.c err = mthca_write_mtt(dev, mr->mtt, n, pages, i); n 920 drivers/infiniband/hw/mthca/mthca_provider.c n += i; n 926 drivers/infiniband/hw/mthca/mthca_provider.c err = mthca_write_mtt(dev, mr->mtt, n, pages, i); n 208 drivers/infiniband/hw/mthca/mthca_qp.c static void *get_recv_wqe(struct mthca_qp *qp, int n) n 211 drivers/infiniband/hw/mthca/mthca_qp.c return qp->queue.direct.buf + (n << qp->rq.wqe_shift); n 213 drivers/infiniband/hw/mthca/mthca_qp.c return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + n 214 drivers/infiniband/hw/mthca/mthca_qp.c ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); n 217 drivers/infiniband/hw/mthca/mthca_qp.c static void *get_send_wqe(struct mthca_qp *qp, int n) n 221 drivers/infiniband/hw/mthca/mthca_qp.c (n << qp->sq.wqe_shift); n 224 drivers/infiniband/hw/mthca/mthca_qp.c (n << qp->sq.wqe_shift)) >> n 226 drivers/infiniband/hw/mthca/mthca_qp.c ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & n 74 drivers/infiniband/hw/mthca/mthca_srq.c static void *get_wqe(struct mthca_srq *srq, int n) n 77 drivers/infiniband/hw/mthca/mthca_srq.c return srq->queue.direct.buf + (n << srq->wqe_shift); n 79 drivers/infiniband/hw/mthca/mthca_srq.c return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + n 80 drivers/infiniband/hw/mthca/mthca_srq.c ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); n 185 drivers/infiniband/hw/qib/qib_debugfs.c loff_t n = *pos; n 200 drivers/infiniband/hw/qib/qib_debugfs.c } while (n--); n 4230 drivers/infiniband/hw/qib/qib_iba7220.c unsigned i, n; n 4244 drivers/infiniband/hw/qib/qib_iba7220.c n = dd->piobcnt2k + dd->piobcnt4k; n 4245 drivers/infiniband/hw/qib/qib_iba7220.c i = n - dd->cspec->sdmabufcnt; n 4247 drivers/infiniband/hw/qib/qib_iba7220.c for (; i < n; ++i) { n 4258 drivers/infiniband/hw/qib/qib_iba7220.c ppd->sdma_state.last_sendbuf = n; n 1372 drivers/infiniband/hw/qib/qib_iba7322.c int took, multi, n = 0; n 1380 drivers/infiniband/hw/qib/qib_iba7322.c if (n++) { n 1411 drivers/infiniband/hw/qib/qib_iba7322.c snprintf(msg, len, "%sMORE:%llX", n ? "," : "", n 2790 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_irq_notify *n = n 2794 drivers/infiniband/hw/qib/qib_iba7322.c if (n->rcv) { n 2795 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; n 2799 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; n 2807 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_irq_notify *n = n 2811 drivers/infiniband/hw/qib/qib_iba7322.c if (n->rcv) { n 2812 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; n 2816 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; n 2821 drivers/infiniband/hw/qib/qib_iba7322.c "release on HCA notify 0x%p n 0x%p\n", ref, n); n 2822 drivers/infiniband/hw/qib/qib_iba7322.c kfree(n); n 3341 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_irq_notify *n; n 3345 drivers/infiniband/hw/qib/qib_iba7322.c n = kzalloc(sizeof(*n), GFP_KERNEL); n 3346 drivers/infiniband/hw/qib/qib_iba7322.c if (n) { n 3349 drivers/infiniband/hw/qib/qib_iba7322.c m->notifier = n; n 3350 drivers/infiniband/hw/qib/qib_iba7322.c n->notify.irq = pci_irq_vector(dd->pcidev, msixnum); n 3351 drivers/infiniband/hw/qib/qib_iba7322.c n->notify.notify = qib_irq_notifier_notify; n 3352 drivers/infiniband/hw/qib/qib_iba7322.c n->notify.release = qib_irq_notifier_release; n 3353 drivers/infiniband/hw/qib/qib_iba7322.c n->arg = m->arg; n 3354 drivers/infiniband/hw/qib/qib_iba7322.c n->rcv = m->rcv; n 3357 drivers/infiniband/hw/qib/qib_iba7322.c n->notify.irq, n->rcv, &n->notify); n 3359 drivers/infiniband/hw/qib/qib_iba7322.c n->notify.irq, n 3360 drivers/infiniband/hw/qib/qib_iba7322.c &n->notify); n 3363 drivers/infiniband/hw/qib/qib_iba7322.c kfree(n); n 6141 drivers/infiniband/hw/qib/qib_iba7322.c char *n; n 6147 drivers/infiniband/hw/qib/qib_iba7322.c val = simple_strtoul(str, &n, 0); n 6148 drivers/infiniband/hw/qib/qib_iba7322.c if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + n 6169 drivers/infiniband/hw/qib/qib_iba7322.c int ret = 0, n; n 6185 drivers/infiniband/hw/qib/qib_iba7322.c n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS; n 6186 drivers/infiniband/hw/qib/qib_iba7322.c qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL); n 6188 drivers/infiniband/hw/qib/qib_iba7322.c qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL); n 6202 drivers/infiniband/hw/qib/qib_iba7322.c for (n = 0; n < dd->num_pports; ++n) { n 6203 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_pportdata *ppd = dd->pport + n; n 6293 drivers/infiniband/hw/qib/qib_iba7322.c unsigned n, regno; n 6311 drivers/infiniband/hw/qib/qib_iba7322.c n = dd->first_user_ctxt / dd->num_pports; n 6313 drivers/infiniband/hw/qib/qib_iba7322.c n = dd->first_user_ctxt - 1; n 6318 drivers/infiniband/hw/qib/qib_iba7322.c ctxt = (i % n) * dd->num_pports + pidx; n 6319 drivers/infiniband/hw/qib/qib_iba7322.c else if (i % n) n 6320 drivers/infiniband/hw/qib/qib_iba7322.c ctxt = (i % n) + 1; n 6878 drivers/infiniband/hw/qib/qib_iba7322.c int n, ret = 0; n 6888 drivers/infiniband/hw/qib/qib_iba7322.c n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */ n 6890 drivers/infiniband/hw/qib/qib_iba7322.c n = dd->cspec->sdmabufcnt; /* failsafe for init */ n 6892 drivers/infiniband/hw/qib/qib_iba7322.c ((dd->num_pports == 1 || ppd->port == 2) ? n : n 6894 drivers/infiniband/hw/qib/qib_iba7322.c lastbuf = erstbuf + n; n 368 drivers/infiniband/hw/qib/qib_mad.c static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) n 371 drivers/infiniband/hw/qib/qib_mad.c (u32)n); n 387 drivers/infiniband/hw/qib/qib_mad.c static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n) n 390 drivers/infiniband/hw/qib/qib_mad.c (u32)n); n 608 drivers/infiniband/hw/qib/qib_mad.c unsigned i, n = qib_get_npkeys(dd); n 612 drivers/infiniband/hw/qib/qib_mad.c for (i = 0; i < n; i++) n 1046 drivers/infiniband/hw/qib/qib_mad.c unsigned i, n = qib_get_npkeys(dd); n 1048 drivers/infiniband/hw/qib/qib_mad.c for (i = 0; i < n; i++) n 292 drivers/infiniband/hw/qib/qib_mad.h #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) n 52 drivers/infiniband/hw/qib/qib_qp.c unsigned n, u16 qpt_mask) n 56 drivers/infiniband/hw/qib/qib_qp.c if (((off & qpt_mask) >> 1) >= n) n 139 drivers/infiniband/hw/qib/qib_qp.c unsigned n; n 142 drivers/infiniband/hw/qib/qib_qp.c n = 1 << (ret + 2 * (port - 1)); n 144 drivers/infiniband/hw/qib/qib_qp.c if (qpt->flags & n) n 147 drivers/infiniband/hw/qib/qib_qp.c qpt->flags |= n; n 218 drivers/infiniband/hw/qib/qib_qp.c unsigned n, qp_inuse = 0; n 220 drivers/infiniband/hw/qib/qib_qp.c for (n = 0; n < dd->num_pports; n++) { n 221 drivers/infiniband/hw/qib/qib_qp.c struct qib_ibport *ibp = &dd->pport[n].ibport_data; n 433 drivers/infiniband/hw/qib/qib_qp.c iter->n, n 737 drivers/infiniband/hw/qib/qib_rc.c u32 n = qp->s_acked; n 738 drivers/infiniband/hw/qib/qib_rc.c struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); n 741 drivers/infiniband/hw/qib/qib_rc.c qp->s_cur = n; n 757 drivers/infiniband/hw/qib/qib_rc.c if (++n == qp->s_size) n 758 drivers/infiniband/hw/qib/qib_rc.c n = 0; n 759 drivers/infiniband/hw/qib/qib_rc.c if (n == qp->s_tail) n 761 drivers/infiniband/hw/qib/qib_rc.c wqe = rvt_get_swqe_ptr(qp, n); n 765 drivers/infiniband/hw/qib/qib_rc.c qp->s_cur = n; n 859 drivers/infiniband/hw/qib/qib_rc.c u32 n = qp->s_last; n 863 drivers/infiniband/hw/qib/qib_rc.c wqe = rvt_get_swqe_ptr(qp, n); n 871 drivers/infiniband/hw/qib/qib_rc.c if (++n == qp->s_size) n 872 drivers/infiniband/hw/qib/qib_rc.c n = 0; n 873 drivers/infiniband/hw/qib/qib_rc.c if (n == qp->s_tail) n 1687 drivers/infiniband/hw/qib/qib_rc.c static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n) n 1691 drivers/infiniband/hw/qib/qib_rc.c next = n + 1; n 85 drivers/infiniband/hw/qib/qib_tx.c unsigned n = 0; n 106 drivers/infiniband/hw/qib/qib_tx.c n++; n 363 drivers/infiniband/hw/qib/qib_tx.c void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) n 368 drivers/infiniband/hw/qib/qib_tx.c __clear_bit(n, dd->pio_writing); n 369 drivers/infiniband/hw/qib/qib_tx.c if (__test_and_clear_bit(n, dd->pio_need_disarm)) n 370 drivers/infiniband/hw/qib/qib_tx.c dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); n 186 drivers/infiniband/hw/qib/qib_ud.c if (++sge->n >= RVT_SEGSZ) { n 189 drivers/infiniband/hw/qib/qib_ud.c sge->n = 0; n 192 drivers/infiniband/hw/qib/qib_ud.c sge->mr->map[sge->m]->segs[sge->n].vaddr; n 194 drivers/infiniband/hw/qib/qib_ud.c sge->mr->map[sge->m]->segs[sge->n].length; n 907 drivers/infiniband/hw/qib/qib_user_sdma.c int tidsmsize, n; n 910 drivers/infiniband/hw/qib/qib_user_sdma.c n = npages*((2*PAGE_SIZE/frag_size)+1); n 911 drivers/infiniband/hw/qib/qib_user_sdma.c pktsize = struct_size(pkt, addr, n); n 933 drivers/infiniband/hw/qib/qib_user_sdma.c pkt->addrlimit = n + ARRAY_SIZE(pkt->addr); n 162 drivers/infiniband/hw/qib/qib_verbs.c if (++sge.n >= RVT_SEGSZ) { n 165 drivers/infiniband/hw/qib/qib_verbs.c sge.n = 0; n 168 drivers/infiniband/hw/qib/qib_verbs.c sge.mr->map[sge.m]->segs[sge.n].vaddr; n 170 drivers/infiniband/hw/qib/qib_verbs.c sge.mr->map[sge.m]->segs[sge.n].length; n 195 drivers/infiniband/hw/qib/qib_verbs.c if (++sge->n >= RVT_SEGSZ) { n 198 drivers/infiniband/hw/qib/qib_verbs.c sge->n = 0; n 201 drivers/infiniband/hw/qib/qib_verbs.c sge->mr->map[sge->m]->segs[sge->n].vaddr; n 203 drivers/infiniband/hw/qib/qib_verbs.c sge->mr->map[sge->m]->segs[sge->n].length; n 404 drivers/infiniband/hw/qib/qib_verbs.c static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) n 406 drivers/infiniband/hw/qib/qib_verbs.c data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); n 407 drivers/infiniband/hw/qib/qib_verbs.c data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); n 421 drivers/infiniband/hw/qib/qib_verbs.c static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) n 423 drivers/infiniband/hw/qib/qib_verbs.c data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); n 424 drivers/infiniband/hw/qib/qib_verbs.c data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); n 674 drivers/infiniband/hw/qib/qib_verbs.c unsigned i, n; n 676 drivers/infiniband/hw/qib/qib_verbs.c n = 0; n 685 drivers/infiniband/hw/qib/qib_verbs.c if (n == ARRAY_SIZE(qps)) n 692 drivers/infiniband/hw/qib/qib_verbs.c qps[n++] = qp; n 697 drivers/infiniband/hw/qib/qib_verbs.c for (i = 0; i < n; i++) { n 1151 drivers/infiniband/hw/qib/qib_verbs.c unsigned i, n; n 1155 drivers/infiniband/hw/qib/qib_verbs.c n = 0; n 1165 drivers/infiniband/hw/qib/qib_verbs.c if (n == ARRAY_SIZE(qps)) n 1171 drivers/infiniband/hw/qib/qib_verbs.c qps[n++] = qp; n 1177 drivers/infiniband/hw/qib/qib_verbs.c for (i = 0; i < n; i++) { n 73 drivers/infiniband/hw/usnic/usnic_debugfs.c int n; n 86 drivers/infiniband/hw/usnic/usnic_debugfs.c n = scnprintf(ptr, left, n 90 drivers/infiniband/hw/usnic/usnic_debugfs.c UPDATE_PTR_LEFT(n, ptr, left); n 92 drivers/infiniband/hw/usnic/usnic_debugfs.c n = scnprintf(ptr, left, "Port_Num:%hu\n", n 94 drivers/infiniband/hw/usnic/usnic_debugfs.c UPDATE_PTR_LEFT(n, ptr, left); n 96 drivers/infiniband/hw/usnic/usnic_debugfs.c n = usnic_transport_sock_to_str(ptr, left, n 98 drivers/infiniband/hw/usnic/usnic_debugfs.c UPDATE_PTR_LEFT(n, ptr, left); n 99 drivers/infiniband/hw/usnic/usnic_debugfs.c n = scnprintf(ptr, left, "\n"); n 100 drivers/infiniband/hw/usnic/usnic_debugfs.c UPDATE_PTR_LEFT(n, ptr, left); n 74 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c unsigned n; n 93 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c n = scnprintf(ptr, left, n 102 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c UPDATE_PTR_LEFT(n, ptr, left); n 109 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c n = scnprintf(ptr, left, " %d %s%s", n 114 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c UPDATE_PTR_LEFT(n, ptr, left); n 116 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c n = scnprintf(ptr, left, "\n"); n 117 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c UPDATE_PTR_LEFT(n, ptr, left); n 119 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c n = scnprintf(ptr, left, "%s: no VFs\n", n 121 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c UPDATE_PTR_LEFT(n, ptr, left); n 225 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c int i, j, n; n 234 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c n = scnprintf(ptr, left, n 240 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c UPDATE_PTR_LEFT(n, ptr, left); n 246 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c n = scnprintf(ptr, left, "%s[%d] ", n 249 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c UPDATE_PTR_LEFT(n, ptr, left); n 253 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c n = scnprintf(ptr, left, "\n"); n 254 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c UPDATE_PTR_LEFT(n, ptr, left); n 65 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h #define PVRDMA_MASK(n) ((n << 1) - 1) n 581 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) n 584 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.offset + n * qp->sq.wqe_size); n 587 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) n 590 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->rq.offset + n * qp->rq.wqe_size); n 382 drivers/infiniband/sw/rdmavt/cq.c u32 head, tail, n; n 439 drivers/infiniband/sw/rdmavt/cq.c n = cq->ibcq.cqe + 1 + head - tail; n 441 drivers/infiniband/sw/rdmavt/cq.c n = head - tail; n 442 drivers/infiniband/sw/rdmavt/cq.c if (unlikely((u32)cqe < n)) { n 446 drivers/infiniband/sw/rdmavt/cq.c for (n = 0; tail != head; n++) { n 448 drivers/infiniband/sw/rdmavt/cq.c u_wc->uqueue[n] = old_u_wc->uqueue[tail]; n 450 drivers/infiniband/sw/rdmavt/cq.c k_wc->kqueue[n] = old_k_wc->kqueue[tail]; n 458 drivers/infiniband/sw/rdmavt/cq.c RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n); n 462 drivers/infiniband/sw/rdmavt/cq.c k_wc->head = n; n 151 drivers/infiniband/sw/rdmavt/mcast.c struct rb_node *n; n 156 drivers/infiniband/sw/rdmavt/mcast.c n = ibp->mcast_tree.rb_node; n 157 drivers/infiniband/sw/rdmavt/mcast.c while (n) { n 161 drivers/infiniband/sw/rdmavt/mcast.c mcast = rb_entry(n, struct rvt_mcast, rb_node); n 166 drivers/infiniband/sw/rdmavt/mcast.c n = n->rb_left; n 168 drivers/infiniband/sw/rdmavt/mcast.c n = n->rb_right; n 196 drivers/infiniband/sw/rdmavt/mcast.c struct rb_node **n = &ibp->mcast_tree.rb_node; n 202 drivers/infiniband/sw/rdmavt/mcast.c while (*n) { n 206 drivers/infiniband/sw/rdmavt/mcast.c pn = *n; n 213 drivers/infiniband/sw/rdmavt/mcast.c n = &pn->rb_left; n 217 drivers/infiniband/sw/rdmavt/mcast.c n = &pn->rb_right; n 261 drivers/infiniband/sw/rdmavt/mcast.c rb_link_node(&mcast->rb_node, pn, n); n 350 drivers/infiniband/sw/rdmavt/mcast.c struct rb_node *n; n 360 drivers/infiniband/sw/rdmavt/mcast.c n = ibp->mcast_tree.rb_node; n 362 drivers/infiniband/sw/rdmavt/mcast.c if (!n) { n 367 drivers/infiniband/sw/rdmavt/mcast.c mcast = rb_entry(n, struct rvt_mcast, rb_node); n 371 drivers/infiniband/sw/rdmavt/mcast.c n = n->rb_left; n 373 drivers/infiniband/sw/rdmavt/mcast.c n = n->rb_right; n 182 drivers/infiniband/sw/rdmavt/mr.c u32 n; n 206 drivers/infiniband/sw/rdmavt/mr.c n = r; n 211 drivers/infiniband/sw/rdmavt/mr.c if (r == n) n 387 drivers/infiniband/sw/rdmavt/mr.c int n, m; n 397 drivers/infiniband/sw/rdmavt/mr.c n = ib_umem_num_pages(umem); n 399 drivers/infiniband/sw/rdmavt/mr.c mr = __rvt_alloc_mr(n, pd); n 414 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 423 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = vaddr; n 424 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].length = PAGE_SIZE; n 425 drivers/infiniband/sw/rdmavt/mr.c trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE); n 426 drivers/infiniband/sw/rdmavt/mr.c if (++n == RVT_SEGSZ) { n 428 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 606 drivers/infiniband/sw/rdmavt/mr.c int m, n; n 612 drivers/infiniband/sw/rdmavt/mr.c n = mapped_segs % RVT_SEGSZ; n 613 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = (void *)addr; n 614 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].length = ps; n 616 drivers/infiniband/sw/rdmavt/mr.c trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps); n 790 drivers/infiniband/sw/rdmavt/mr.c int m, n; n 809 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 811 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; n 812 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].length = ps; n 813 drivers/infiniband/sw/rdmavt/mr.c trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps); n 814 drivers/infiniband/sw/rdmavt/mr.c if (++n == RVT_SEGSZ) { n 816 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 919 drivers/infiniband/sw/rdmavt/mr.c unsigned n, m; n 945 drivers/infiniband/sw/rdmavt/mr.c isge->n = 0; n 981 drivers/infiniband/sw/rdmavt/mr.c n = entries_spanned_by_off % RVT_SEGSZ; n 984 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 985 drivers/infiniband/sw/rdmavt/mr.c while (off >= mr->map[m]->segs[n].length) { n 986 drivers/infiniband/sw/rdmavt/mr.c off -= mr->map[m]->segs[n].length; n 987 drivers/infiniband/sw/rdmavt/mr.c n++; n 988 drivers/infiniband/sw/rdmavt/mr.c if (n >= RVT_SEGSZ) { n 990 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 995 drivers/infiniband/sw/rdmavt/mr.c isge->vaddr = mr->map[m]->segs[n].vaddr + off; n 996 drivers/infiniband/sw/rdmavt/mr.c isge->length = mr->map[m]->segs[n].length - off; n 999 drivers/infiniband/sw/rdmavt/mr.c isge->n = n; n 1030 drivers/infiniband/sw/rdmavt/mr.c unsigned n, m; n 1055 drivers/infiniband/sw/rdmavt/mr.c sge->n = 0; n 1088 drivers/infiniband/sw/rdmavt/mr.c n = entries_spanned_by_off % RVT_SEGSZ; n 1091 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 1092 drivers/infiniband/sw/rdmavt/mr.c while (off >= mr->map[m]->segs[n].length) { n 1093 drivers/infiniband/sw/rdmavt/mr.c off -= mr->map[m]->segs[n].length; n 1094 drivers/infiniband/sw/rdmavt/mr.c n++; n 1095 drivers/infiniband/sw/rdmavt/mr.c if (n >= RVT_SEGSZ) { n 1097 drivers/infiniband/sw/rdmavt/mr.c n = 0; n 1102 drivers/infiniband/sw/rdmavt/mr.c sge->vaddr = mr->map[m]->segs[n].vaddr + off; n 1103 drivers/infiniband/sw/rdmavt/mr.c sge->length = mr->map[m]->segs[n].length - off; n 1106 drivers/infiniband/sw/rdmavt/mr.c sge->n = n; n 134 drivers/infiniband/sw/rdmavt/qp.c static void cacheless_memcpy(void *dst, void *src, size_t n) n 142 drivers/infiniband/sw/rdmavt/qp.c __copy_user_nocache(dst, (void __user *)src, n, 0); n 542 drivers/infiniband/sw/rdmavt/qp.c unsigned n; n 545 drivers/infiniband/sw/rdmavt/qp.c n = 1 << (ret + 2 * (port_num - 1)); n 547 drivers/infiniband/sw/rdmavt/qp.c if (qpt->flags & n) n 550 drivers/infiniband/sw/rdmavt/qp.c qpt->flags |= n; n 620 drivers/infiniband/sw/rdmavt/qp.c unsigned n; n 643 drivers/infiniband/sw/rdmavt/qp.c for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) { n 644 drivers/infiniband/sw/rdmavt/qp.c struct rvt_ack_entry *e = &qp->s_ack_queue[n]; n 771 drivers/infiniband/sw/rdmavt/qp.c u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); n 788 drivers/infiniband/sw/rdmavt/qp.c qpp = &rdi->qp_dev->qp_table[n]; n 797 drivers/infiniband/sw/rdmavt/qp.c trace_rvt_qpremove(qp, n); n 1427 drivers/infiniband/sw/rdmavt/qp.c u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); n 1429 drivers/infiniband/sw/rdmavt/qp.c qp->next = rdi->qp_dev->qp_table[n]; n 1430 drivers/infiniband/sw/rdmavt/qp.c rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); n 1431 drivers/infiniband/sw/rdmavt/qp.c trace_rvt_qpinsert(qp, n); n 2746 drivers/infiniband/sw/rdmavt/qp.c int n = iter->n; n 2766 drivers/infiniband/sw/rdmavt/qp.c for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) { n 2770 drivers/infiniband/sw/rdmavt/qp.c if (n < iter->specials) { n 2774 drivers/infiniband/sw/rdmavt/qp.c pidx = n % rdi->ibdev.phys_port_cnt; n 2776 drivers/infiniband/sw/rdmavt/qp.c qp = rcu_dereference(rvp->qp[n & 1]); n 2780 drivers/infiniband/sw/rdmavt/qp.c (n - iter->specials)]); n 2786 drivers/infiniband/sw/rdmavt/qp.c iter->n = n; n 179 drivers/infiniband/sw/rdmavt/srq.c u32 sz, size, n, head, tail; n 227 drivers/infiniband/sw/rdmavt/srq.c n = head; n 228 drivers/infiniband/sw/rdmavt/srq.c if (n < tail) n 229 drivers/infiniband/sw/rdmavt/srq.c n += srq->rq.size - tail; n 231 drivers/infiniband/sw/rdmavt/srq.c n -= tail; n 232 drivers/infiniband/sw/rdmavt/srq.c if (size <= n) { n 236 drivers/infiniband/sw/rdmavt/srq.c n = 0; n 247 drivers/infiniband/sw/rdmavt/srq.c n++; n 255 drivers/infiniband/sw/rdmavt/srq.c RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n); n 258 drivers/infiniband/sw/rdmavt/srq.c tmp_rq.kwq->head = n; n 63 drivers/infiniband/sw/rdmavt/trace_mr.h TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len), n 64 drivers/infiniband/sw/rdmavt/trace_mr.h TP_ARGS(mr, m, n, v, len), n 76 drivers/infiniband/sw/rdmavt/trace_mr.h __field(u16, n) n 86 drivers/infiniband/sw/rdmavt/trace_mr.h __entry->n = n; n 101 drivers/infiniband/sw/rdmavt/trace_mr.h __entry->n, n 109 drivers/infiniband/sw/rdmavt/trace_mr.h TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len), n 110 drivers/infiniband/sw/rdmavt/trace_mr.h TP_ARGS(mr, m, n, v, len)); n 114 drivers/infiniband/sw/rdmavt/trace_mr.h TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len), n 115 drivers/infiniband/sw/rdmavt/trace_mr.h TP_ARGS(mr, m, n, v, len)); n 119 drivers/infiniband/sw/rdmavt/trace_mr.h TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len), n 120 drivers/infiniband/sw/rdmavt/trace_mr.h TP_ARGS(mr, m, n, v, len)); n 139 drivers/infiniband/sw/rdmavt/trace_mr.h __field(u16, n) n 153 drivers/infiniband/sw/rdmavt/trace_mr.h __entry->n = sge->m; n 169 drivers/infiniband/sw/rdmavt/trace_mr.h __entry->n, n 306 drivers/infiniband/sw/rxe/rxe_mr.c int m, n; n 326 drivers/infiniband/sw/rxe/rxe_mr.c lookup_iova(mem, iova, &m, &n, &offset); n 328 drivers/infiniband/sw/rxe/rxe_mr.c if (offset + length > mem->map[m]->buf[n].size) { n 334 drivers/infiniband/sw/rxe/rxe_mr.c addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset; n 125 drivers/infiniband/sw/rxe/rxe_qp.c static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) n 129 drivers/infiniband/sw/rxe/rxe_qp.c qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); n 1030 drivers/infiniband/sw/rxe/rxe_verbs.c int n; n 1034 drivers/infiniband/sw/rxe/rxe_verbs.c n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); n 1043 drivers/infiniband/sw/rxe/rxe_verbs.c return n; n 907 drivers/infiniband/ulp/ipoib/ipoib_cm.c struct ipoib_cm_rx *rx, *n; n 914 drivers/infiniband/ulp/ipoib/ipoib_cm.c list_for_each_entry_safe(rx, n, &list, list) { n 48 drivers/infiniband/ulp/ipoib/ipoib_fs.c int i, n; n 50 drivers/infiniband/ulp/ipoib/ipoib_fs.c for (n = 0, i = 0; i < 8; ++i) { n 51 drivers/infiniband/ulp/ipoib/ipoib_fs.c n += sprintf(buf + n, "%x", n 54 drivers/infiniband/ulp/ipoib/ipoib_fs.c buf[n++] = ':'; n 61 drivers/infiniband/ulp/ipoib/ipoib_fs.c loff_t n = *pos; n 67 drivers/infiniband/ulp/ipoib/ipoib_fs.c while (n--) { n 160 drivers/infiniband/ulp/ipoib/ipoib_fs.c loff_t n = *pos; n 166 drivers/infiniband/ulp/ipoib/ipoib_fs.c while (n--) { n 436 drivers/infiniband/ulp/ipoib/ipoib_ib.c int n, i; n 439 drivers/infiniband/ulp/ipoib/ipoib_ib.c n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); n 440 drivers/infiniband/ulp/ipoib/ipoib_ib.c for (i = 0; i < n; ++i) { n 447 drivers/infiniband/ulp/ipoib/ipoib_ib.c return n == MAX_SEND_CQE; n 457 drivers/infiniband/ulp/ipoib/ipoib_ib.c int n, i; n 466 drivers/infiniband/ulp/ipoib/ipoib_ib.c n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); n 468 drivers/infiniband/ulp/ipoib/ipoib_ib.c for (i = 0; i < n; i++) { n 482 drivers/infiniband/ulp/ipoib/ipoib_ib.c if (n != t) n 503 drivers/infiniband/ulp/ipoib/ipoib_ib.c int n, i; n 507 drivers/infiniband/ulp/ipoib/ipoib_ib.c n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); n 509 drivers/infiniband/ulp/ipoib/ipoib_ib.c for (i = 0; i < n; i++) { n 517 drivers/infiniband/ulp/ipoib/ipoib_ib.c if (n < budget) { n 524 drivers/infiniband/ulp/ipoib/ipoib_ib.c return n < 0 ? 0 : n; n 975 drivers/infiniband/ulp/ipoib/ipoib_ib.c int i, n; n 985 drivers/infiniband/ulp/ipoib/ipoib_ib.c n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); n 986 drivers/infiniband/ulp/ipoib/ipoib_ib.c for (i = 0; i < n; ++i) { n 1004 drivers/infiniband/ulp/ipoib/ipoib_ib.c } while (n == IPOIB_NUM_WC); n 558 drivers/infiniband/ulp/ipoib/ipoib_main.c struct rb_node *n = priv->path_tree.rb_node; n 562 drivers/infiniband/ulp/ipoib/ipoib_main.c while (n) { n 563 drivers/infiniband/ulp/ipoib/ipoib_main.c path = rb_entry(n, struct ipoib_path, rb_node); n 569 drivers/infiniband/ulp/ipoib/ipoib_main.c n = n->rb_left; n 571 drivers/infiniband/ulp/ipoib/ipoib_main.c n = n->rb_right; n 582 drivers/infiniband/ulp/ipoib/ipoib_main.c struct rb_node **n = &priv->path_tree.rb_node; n 587 drivers/infiniband/ulp/ipoib/ipoib_main.c while (*n) { n 588 drivers/infiniband/ulp/ipoib/ipoib_main.c pn = *n; n 594 drivers/infiniband/ulp/ipoib/ipoib_main.c n = &pn->rb_left; n 596 drivers/infiniband/ulp/ipoib/ipoib_main.c n = &pn->rb_right; n 601 drivers/infiniband/ulp/ipoib/ipoib_main.c rb_link_node(&path->rb_node, pn, n); n 651 drivers/infiniband/ulp/ipoib/ipoib_main.c struct rb_node *n; n 657 drivers/infiniband/ulp/ipoib/ipoib_main.c n = rb_first(&priv->path_tree); n 659 drivers/infiniband/ulp/ipoib/ipoib_main.c while (n) { n 660 drivers/infiniband/ulp/ipoib/ipoib_main.c path = rb_entry(n, struct ipoib_path, rb_node); n 669 drivers/infiniband/ulp/ipoib/ipoib_main.c n = rb_next(n); n 1480 drivers/infiniband/ulp/ipoib/ipoib_main.c struct ipoib_neigh *n; n 1490 drivers/infiniband/ulp/ipoib/ipoib_main.c for (n = rcu_dereference_protected(*np, n 1492 drivers/infiniband/ulp/ipoib/ipoib_main.c n != NULL; n 1493 drivers/infiniband/ulp/ipoib/ipoib_main.c n = rcu_dereference_protected(*np, n 1495 drivers/infiniband/ulp/ipoib/ipoib_main.c if (n == neigh) { n 1505 drivers/infiniband/ulp/ipoib/ipoib_main.c np = &n->hnext; n 162 drivers/infiniband/ulp/ipoib/ipoib_multicast.c struct rb_node *n = priv->multicast_tree.rb_node; n 164 drivers/infiniband/ulp/ipoib/ipoib_multicast.c while (n) { n 168 drivers/infiniband/ulp/ipoib/ipoib_multicast.c mcast = rb_entry(n, struct ipoib_mcast, rb_node); n 173 drivers/infiniband/ulp/ipoib/ipoib_multicast.c n = n->rb_left; n 175 drivers/infiniband/ulp/ipoib/ipoib_multicast.c n = n->rb_right; n 186 drivers/infiniband/ulp/ipoib/ipoib_multicast.c struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; n 188 drivers/infiniband/ulp/ipoib/ipoib_multicast.c while (*n) { n 192 drivers/infiniband/ulp/ipoib/ipoib_multicast.c pn = *n; n 198 drivers/infiniband/ulp/ipoib/ipoib_multicast.c n = &pn->rb_left; n 200 drivers/infiniband/ulp/ipoib/ipoib_multicast.c n = &pn->rb_right; n 205 drivers/infiniband/ulp/ipoib/ipoib_multicast.c rb_link_node(&mcast->rb_node, pn, n); n 1019 drivers/infiniband/ulp/ipoib/ipoib_multicast.c struct rb_node *n; n 1025 drivers/infiniband/ulp/ipoib/ipoib_multicast.c n = rb_first(&priv->multicast_tree); n 1027 drivers/infiniband/ulp/ipoib/ipoib_multicast.c while (n) { n 1028 drivers/infiniband/ulp/ipoib/ipoib_multicast.c mcast = rb_entry(n, struct ipoib_mcast, rb_node); n 1043 drivers/infiniband/ulp/ipoib/ipoib_multicast.c n = rb_next(n); n 1071 drivers/infiniband/ulp/iser/iscsi_iser.c struct iser_conn *iser_conn, *n; n 1084 drivers/infiniband/ulp/iser/iscsi_iser.c list_for_each_entry_safe(iser_conn, n, &ig.connlist, n 447 drivers/infiniband/ulp/iser/iser_memory.c int n; n 454 drivers/infiniband/ulp/iser/iser_memory.c n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K); n 455 drivers/infiniband/ulp/iser/iser_memory.c if (unlikely(n != mem->dma_nents)) { n 457 drivers/infiniband/ulp/iser/iser_memory.c n, mem->dma_nents); n 458 drivers/infiniband/ulp/iser/iser_memory.c return n < 0 ? n : -EINVAL; n 2508 drivers/infiniband/ulp/isert/ib_isert.c struct isert_conn *isert_conn, *n; n 2523 drivers/infiniband/ulp/isert/ib_isert.c list_for_each_entry_safe(isert_conn, n, n 2534 drivers/infiniband/ulp/isert/ib_isert.c list_for_each_entry_safe(isert_conn, n, n 514 drivers/infiniband/ulp/srp/ib_srp.c int n) n 520 drivers/infiniband/ulp/srp/ib_srp.c for (i = 0; i < n; i++) n 1530 drivers/infiniband/ulp/srp/ib_srp.c int n, err; n 1559 drivers/infiniband/ulp/srp/ib_srp.c n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, n 1561 drivers/infiniband/ulp/srp/ib_srp.c if (unlikely(n < 0)) { n 1565 drivers/infiniband/ulp/srp/ib_srp.c sg_offset_p ? *sg_offset_p : -1, n); n 1566 drivers/infiniband/ulp/srp/ib_srp.c return n; n 1596 drivers/infiniband/ulp/srp/ib_srp.c return n; n 1681 drivers/infiniband/ulp/srp/ib_srp.c int i, n; n 1683 drivers/infiniband/ulp/srp/ib_srp.c n = srp_map_finish_fr(state, req, ch, count, &sg_offset); n 1684 drivers/infiniband/ulp/srp/ib_srp.c if (unlikely(n < 0)) n 1685 drivers/infiniband/ulp/srp/ib_srp.c return n; n 1687 drivers/infiniband/ulp/srp/ib_srp.c count -= n; n 1688 drivers/infiniband/ulp/srp/ib_srp.c for (i = 0; i < n; i++) n 2025 drivers/infiniband/ulp/srpt/ib_srpt.c struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n; n 2029 drivers/infiniband/ulp/srpt/ib_srpt.c list_for_each_entry(n, &sport->nexus_list, entry) { n 2030 drivers/infiniband/ulp/srpt/ib_srpt.c if (memcmp(n->i_port_id, i_port_id, 16) == 0 && n 2031 drivers/infiniband/ulp/srpt/ib_srpt.c memcmp(n->t_port_id, t_port_id, 16) == 0) { n 2032 drivers/infiniband/ulp/srpt/ib_srpt.c nexus = n; n 195 drivers/input/input-poller.c struct attribute *attr, int n) n 259 drivers/input/joystick/db9.c static int db9_saturn_report(unsigned char id, unsigned char data[60], struct input_dev *devs[], int n, int max_pads) n 265 drivers/input/joystick/db9.c for (j = 0; j < tmp && n < max_pads; j += 10, n++) { n 266 drivers/input/joystick/db9.c dev = devs[n]; n 322 drivers/input/joystick/db9.c return n; n 328 drivers/input/joystick/db9.c int type, n, max_pads; n 334 drivers/input/joystick/db9.c n = 1; n 338 drivers/input/joystick/db9.c n = 1; n 342 drivers/input/joystick/db9.c n = 2; n 348 drivers/input/joystick/db9.c for (tmp = 0, i = 0; i < n; i++) { n 142 drivers/input/joystick/gf2k.c #define GB(p,n,s) gf2k_get_bits(data, p, n, s) n 30 drivers/input/joystick/iforce/iforce-packets.c int n = LO(cmd); n 45 drivers/input/joystick/iforce/iforce-packets.c if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { n 53 drivers/input/joystick/iforce/iforce-packets.c XMIT_INC(iforce->xmit.head, n+2); n 64 drivers/input/joystick/iforce/iforce-packets.c if (n < c) c=n; n 69 drivers/input/joystick/iforce/iforce-packets.c if (n != c) { n 72 drivers/input/joystick/iforce/iforce-packets.c n - c); n 74 drivers/input/joystick/iforce/iforce-packets.c XMIT_INC(head, n); n 27 drivers/input/joystick/iforce/iforce-usb.c int n, c; n 40 drivers/input/joystick/iforce/iforce-usb.c n = iforce->xmit.buf[iforce->xmit.tail]; n 43 drivers/input/joystick/iforce/iforce-usb.c iforce_usb->out->transfer_buffer_length = n + 1; n 48 drivers/input/joystick/iforce/iforce-usb.c if (n < c) c=n; n 53 drivers/input/joystick/iforce/iforce-usb.c if (n != c) { n 56 drivers/input/joystick/iforce/iforce-usb.c n-c); n 58 drivers/input/joystick/iforce/iforce-usb.c XMIT_INC(iforce->xmit.tail, n); n 60 drivers/input/joystick/iforce/iforce-usb.c if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) { n 63 drivers/input/joystick/iforce/iforce-usb.c "usb_submit_urb failed %d\n", n); n 61 drivers/input/joystick/iforce/iforce.h #define XMIT_INC(var, n) (var)+=n; (var)&= XMIT_SIZE -1 n 1016 drivers/input/keyboard/applespi.c int i, n; n 1023 drivers/input/keyboard/applespi.c n = 0; n 1029 drivers/input/keyboard/applespi.c applespi->pos[n].x = le16_to_int(f->abs_x); n 1030 drivers/input/keyboard/applespi.c applespi->pos[n].y = tp_info->y_min + tp_info->y_max - n 1032 drivers/input/keyboard/applespi.c n++; n 1038 drivers/input/keyboard/applespi.c input_mt_assign_slots(input, applespi->slots, applespi->pos, n, 0); n 1040 drivers/input/keyboard/applespi.c for (i = 0; i < n; i++) n 102 drivers/input/keyboard/lm8323.c #define PWM_RAMP(s, t, n, u) ((!!(s) << 14) | ((t) & 0x3f) << 8 | \ n 103 drivers/input/keyboard/lm8323.c ((n) & 0x7f) | ((u) ? 0 : 0x80)) n 40 drivers/input/keyboard/lpc32xx-keys.c #define LPC32XX_KSCAN_DEB_NUM_DEB_PASS(n) ((n) & 0xFF) n 49 drivers/input/keyboard/lpc32xx-keys.c #define LPC32XX_KSCAN_SCTRL_SCAN_DELAY(n) ((n) & 0xFF) n 54 drivers/input/keyboard/lpc32xx-keys.c #define LPC32XX_KSCAN_MSEL_SELECT(n) ((n) & 0xF) n 47 drivers/input/keyboard/pxa27x_keypad.c #define KPC_MKRN(n) ((((n) - 1) & 0x7) << 26) /* matrix key row number */ n 48 drivers/input/keyboard/pxa27x_keypad.c #define KPC_MKCN(n) ((((n) - 1) & 0x7) << 23) /* matrix key column number */ n 49 drivers/input/keyboard/pxa27x_keypad.c #define KPC_DKN(n) ((((n) - 1) & 0x7) << 6) /* direct key number */ n 56 drivers/input/keyboard/pxa27x_keypad.c #define KPC_MS(n) (0x1 << (13 + (n))) /* Matrix scan line 'n' */ n 70 drivers/input/keyboard/pxa27x_keypad.c #define KPDK_DK(n) ((n) & 0xff) n 77 drivers/input/keyboard/pxa27x_keypad.c #define KPREC_RECOUNT0(n) ((n) & 0xff) n 78 drivers/input/keyboard/pxa27x_keypad.c #define KPREC_RECOUNT1(n) (((n) >> 16) & 0xff) n 84 drivers/input/keyboard/pxa27x_keypad.c #define KPAS_MUKP(n) (((n) >> 26) & 0x1f) n 85 drivers/input/keyboard/pxa27x_keypad.c #define KPAS_RP(n) (((n) >> 4) & 0xf) n 86 drivers/input/keyboard/pxa27x_keypad.c #define KPAS_CP(n) ((n) & 0xf) n 253 drivers/input/keyboard/pxa27x_keypad.c unsigned int n = MAX_MATRIX_KEY_NUM + (i << 1); n 257 drivers/input/keyboard/pxa27x_keypad.c keypad->keycodes[n] = keycode; n 261 drivers/input/keyboard/pxa27x_keypad.c keypad->keycodes[n + 1] = keycode; n 92 drivers/input/keyboard/sh_keysc.c int i, k, n; n 106 drivers/input/keyboard/sh_keysc.c n = keyin_nr * i; n 116 drivers/input/keyboard/sh_keysc.c __set_bit(n + k, keys); n 1229 drivers/input/misc/ims-pcu.c struct attribute *attr, int n) n 61 drivers/input/misc/pmic8xxx-pwrkey.c #define PM8058_REGULATOR_BANK_SEL(n) ((n) << PM8058_REGULATOR_BANK_SHIFT) n 66 drivers/input/misc/yealink.c #define _PIC(t, h, hm, n) \ n 68 drivers/input/misc/yealink.c .u = { .p = { .name = (n), .a = (h), .m = (hm) } } } n 510 drivers/input/mouse/alps.c static void alps_report_mt_data(struct psmouse *psmouse, int n) n 517 drivers/input/mouse/alps.c input_mt_assign_slots(dev, slot, f->mt, n, 0); n 518 drivers/input/mouse/alps.c for (i = 0; i < n; i++) n 610 drivers/input/mouse/bcm5974.c int raw_n, i, n = 0; n 621 drivers/input/mouse/bcm5974.c dev->pos[n].x = raw2int(f->abs_x); n 622 drivers/input/mouse/bcm5974.c dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y); n 623 drivers/input/mouse/bcm5974.c dev->index[n++] = f; n 626 drivers/input/mouse/bcm5974.c input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); n 628 drivers/input/mouse/bcm5974.c for (i = 0; i < n; i++) n 29 drivers/input/mouse/cypress_ps2.c static void cypress_set_packet_size(struct psmouse *psmouse, unsigned int n) n 32 drivers/input/mouse/cypress_ps2.c cytp->pkt_size = n; n 493 drivers/input/mouse/cypress_ps2.c int n = report_data->contact_cnt; n 497 drivers/input/mouse/cypress_ps2.c if (n > CYTP_MAX_MT_SLOTS) n 498 drivers/input/mouse/cypress_ps2.c n = CYTP_MAX_MT_SLOTS; n 499 drivers/input/mouse/cypress_ps2.c for (i = 0; i < n; i++) n 522 drivers/input/mouse/cypress_ps2.c int n; n 526 drivers/input/mouse/cypress_ps2.c n = report_data.contact_cnt; n 527 drivers/input/mouse/cypress_ps2.c if (n > CYTP_MAX_MT_SLOTS) n 528 drivers/input/mouse/cypress_ps2.c n = CYTP_MAX_MT_SLOTS; n 530 drivers/input/mouse/cypress_ps2.c for (i = 0; i < n; i++) { n 536 drivers/input/mouse/cypress_ps2.c input_mt_assign_slots(input, slots, pos, n, 0); n 538 drivers/input/mouse/cypress_ps2.c for (i = 0; i < n; i++) { n 29 drivers/input/mouse/pxa930_trkball.c #define TBCR_Y_FLT(n) (((n) & 0xf) << 6) n 30 drivers/input/mouse/pxa930_trkball.c #define TBCR_X_FLT(n) (((n) & 0xf) << 2) n 32 drivers/input/mouse/pxa930_trkball.c #define TBCNTR_YM(n) (((n) >> 24) & 0xff) n 33 drivers/input/mouse/pxa930_trkball.c #define TBCNTR_YP(n) (((n) >> 16) & 0xff) n 34 drivers/input/mouse/pxa930_trkball.c #define TBCNTR_XM(n) (((n) >> 8) & 0xff) n 35 drivers/input/mouse/pxa930_trkball.c #define TBCNTR_XP(n) ((n) & 0xff) n 213 drivers/input/mouse/trackpoint.c struct attribute *attr, int n) n 984 drivers/input/serio/i8042.c int n = 0; n 992 drivers/input/serio/i8042.c if (n >= 10) { n 997 drivers/input/serio/i8042.c if (n != 0) n 1000 drivers/input/serio/i8042.c if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) { n 1005 drivers/input/serio/i8042.c } while (n < 2 || ctr[0] != ctr[1]); n 123 drivers/input/touchscreen/88pm860x-ts.c int data, n, ret; n 133 drivers/input/touchscreen/88pm860x-ts.c if (!of_property_read_u32(np, "marvell,88pm860x-gpadc-prebias", &n)) n 134 drivers/input/touchscreen/88pm860x-ts.c data |= (n << 1) & PM8607_GPADC_PREBIAS_MASK; n 135 drivers/input/touchscreen/88pm860x-ts.c if (!of_property_read_u32(np, "marvell,88pm860x-gpadc-slot-cycle", &n)) n 136 drivers/input/touchscreen/88pm860x-ts.c data |= (n << 3) & PM8607_GPADC_SLOT_CYCLE_MASK; n 137 drivers/input/touchscreen/88pm860x-ts.c if (!of_property_read_u32(np, "marvell,88pm860x-gpadc-off-scale", &n)) n 138 drivers/input/touchscreen/88pm860x-ts.c data |= (n << 5) & PM8607_GPADC_OFF_SCALE_MASK; n 139 drivers/input/touchscreen/88pm860x-ts.c if (!of_property_read_u32(np, "marvell,88pm860x-gpadc-sw-cal", &n)) n 140 drivers/input/touchscreen/88pm860x-ts.c data |= (n << 7) & PM8607_GPADC_SW_CAL_MASK; n 154 drivers/input/touchscreen/88pm860x-ts.c if (!of_property_read_u32(np, "marvell,88pm860x-pen-prebias", &n)) n 155 drivers/input/touchscreen/88pm860x-ts.c data |= n & PM8607_PD_PREBIAS_MASK; n 156 drivers/input/touchscreen/88pm860x-ts.c if (!of_property_read_u32(np, "marvell,88pm860x-pen-prechg", &n)) n 157 drivers/input/touchscreen/88pm860x-ts.c data |= n & PM8607_PD_PRECHG_MASK; n 598 drivers/input/touchscreen/ad7877.c struct attribute *attr, int n) n 45 drivers/input/touchscreen/elants_i2c.c #define ELAN_TS_RESOLUTION(n, m) (((n) - 1) * (m)) n 190 drivers/input/touchscreen/htcpen.c static int htcpen_isa_suspend(struct device *dev, unsigned int n, n 198 drivers/input/touchscreen/htcpen.c static int htcpen_isa_resume(struct device *dev, unsigned int n) n 101 drivers/input/touchscreen/pixcir_i2c_ts.c int n, i, slot; n 105 drivers/input/touchscreen/pixcir_i2c_ts.c n = report->num_touches; n 106 drivers/input/touchscreen/pixcir_i2c_ts.c if (n > PIXCIR_MAX_SLOTS) n 107 drivers/input/touchscreen/pixcir_i2c_ts.c n = PIXCIR_MAX_SLOTS; n 110 drivers/input/touchscreen/pixcir_i2c_ts.c input_mt_assign_slots(ts->input, slots, report->pos, n, 0); n 112 drivers/input/touchscreen/pixcir_i2c_ts.c for (i = 0; i < n; i++) { n 339 drivers/input/touchscreen/tsc200x-core.c struct attribute *attr, int n) n 55 drivers/interconnect/core.c static void icc_summary_show_one(struct seq_file *s, struct icc_node *n) n 57 drivers/interconnect/core.c if (!n) n 61 drivers/interconnect/core.c n->name, n->avg_bw, n->peak_bw); n 74 drivers/interconnect/core.c struct icc_node *n; n 76 drivers/interconnect/core.c list_for_each_entry(n, &provider->nodes, node_list) { n 79 drivers/interconnect/core.c icc_summary_show_one(s, n); n 80 drivers/interconnect/core.c hlist_for_each_entry(r, &n->req_list, req_node) { n 131 drivers/interconnect/core.c struct icc_node *n, *node = NULL; n 146 drivers/interconnect/core.c list_for_each_entry_safe(node, n, &traverse_list, search_list) { n 184 drivers/interconnect/core.c list_for_each_entry_reverse(n, &visited_list, search_list) n 185 drivers/interconnect/core.c n->is_traversed = false; n 344 drivers/interconnect/qcom/qcs404.c struct icc_node *n; n 356 drivers/interconnect/qcom/qcs404.c list_for_each_entry(n, &provider->nodes, node_list) n 357 drivers/interconnect/qcom/qcs404.c qcom_icc_aggregate(n, 0, n->avg_bw, n->peak_bw, n 511 drivers/interconnect/qcom/qcs404.c struct icc_node *n, *tmp; n 513 drivers/interconnect/qcom/qcs404.c list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { n 514 drivers/interconnect/qcom/qcs404.c icc_node_del(n); n 515 drivers/interconnect/qcom/qcs404.c icc_node_destroy(n->id); n 559 drivers/interconnect/qcom/sdm845.c int n[SDM845_MAX_VCD]) n 565 drivers/interconnect/qcom/sdm845.c memset(n, 0, sizeof(int) * SDM845_MAX_VCD); n 578 drivers/interconnect/qcom/sdm845.c n[batch]++; n 585 drivers/interconnect/qcom/sdm845.c if (n[batch] >= MAX_RPMH_PAYLOAD) { n 587 drivers/interconnect/qcom/sdm845.c n[batch] -= cur_vcd_size; n 588 drivers/interconnect/qcom/sdm845.c n[batch + 1] = cur_vcd_size; n 871 drivers/interconnect/qcom/sdm845.c struct icc_node *n, *tmp; n 873 drivers/interconnect/qcom/sdm845.c list_for_each_entry_safe(n, tmp, &provider->nodes, node_list) { n 874 drivers/interconnect/qcom/sdm845.c icc_node_del(n); n 875 drivers/interconnect/qcom/sdm845.c icc_node_destroy(n->id); n 2558 drivers/iommu/amd_iommu.c int p, n; n 2562 drivers/iommu/amd_iommu.c n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE); n 2563 drivers/iommu/amd_iommu.c if (p + n > boundary_size) n 2565 drivers/iommu/amd_iommu.c npages += n; n 712 drivers/iommu/arm-smmu-v3.c static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) n 724 drivers/iommu/arm-smmu-v3.c return space >= n; n 775 drivers/iommu/arm-smmu-v3.c static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) n 777 drivers/iommu/arm-smmu-v3.c u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; n 1275 drivers/iommu/arm-smmu-v3.c u32 prod, int n) n 1283 drivers/iommu/arm-smmu-v3.c for (i = 0; i < n; ++i) { n 1308 drivers/iommu/arm-smmu-v3.c u64 *cmds, int n, bool sync) n 1326 drivers/iommu/arm-smmu-v3.c while (!queue_has_space(&llq, n + sync)) { n 1334 drivers/iommu/arm-smmu-v3.c head.prod = queue_inc_prod_n(&llq, n + sync) | n 1351 drivers/iommu/arm-smmu-v3.c arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); n 1353 drivers/iommu/arm-smmu-v3.c prod = queue_inc_prod_n(&llq, n); n 1403 drivers/iommu/arm-smmu-v3.c llq.prod = queue_inc_prod_n(&llq, n); n 96 drivers/iommu/arm-smmu.h #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) n 101 drivers/iommu/arm-smmu.h #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) n 119 drivers/iommu/arm-smmu.h #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) n 134 drivers/iommu/arm-smmu.h #define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2)) n 136 drivers/iommu/arm-smmu.h #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) n 340 drivers/iommu/arm-smmu.h static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) n 342 drivers/iommu/arm-smmu.h return smmu->base + (n << smmu->pgshift); n 379 drivers/iommu/arm-smmu.h #define ARM_SMMU_CB(s, n) ((s)->numpage + (n)) n 391 drivers/iommu/arm-smmu.h #define arm_smmu_cb_read(s, n, o) \ n 392 drivers/iommu/arm-smmu.h arm_smmu_readl((s), ARM_SMMU_CB((s), (n)), (o)) n 393 drivers/iommu/arm-smmu.h #define arm_smmu_cb_write(s, n, o, v) \ n 394 drivers/iommu/arm-smmu.h arm_smmu_writel((s), ARM_SMMU_CB((s), (n)), (o), (v)) n 395 drivers/iommu/arm-smmu.h #define arm_smmu_cb_readq(s, n, o) \ n 396 drivers/iommu/arm-smmu.h arm_smmu_readq((s), ARM_SMMU_CB((s), (n)), (o)) n 397 drivers/iommu/arm-smmu.h #define arm_smmu_cb_writeq(s, n, o, v) \ n 398 drivers/iommu/arm-smmu.h arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) n 135 drivers/iommu/exynos-iommu.c #define CFG_QOS(n) ((n & 0xF) << 7) n 409 drivers/iommu/exynos-iommu.c unsigned int i, n, itype; n 420 drivers/iommu/exynos-iommu.c n = ARRAY_SIZE(sysmmu_faults); n 425 drivers/iommu/exynos-iommu.c n = ARRAY_SIZE(sysmmu_v5_faults); n 433 drivers/iommu/exynos-iommu.c for (i = 0; i < n; i++, finfo++) n 437 drivers/iommu/exynos-iommu.c BUG_ON(i == n); n 136 drivers/iommu/io-pgtable-arm-v7s.c #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2)) n 141 drivers/iommu/io-pgtable-arm-v7s.c #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24) n 143 drivers/iommu/io-pgtable-arm-v7s.c #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2)) n 144 drivers/iommu/io-pgtable-arm-v7s.c #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16)) n 154 drivers/iommu/io-pgtable-arm.c #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) n 188 drivers/iommu/ipmmu-vmsa.c #define IMMAIR_ATTR_SHIFT(n) ((n) << 3) n 199 drivers/iommu/ipmmu-vmsa.c #define IMPMBA(n) (0x0280 + ((n) * 4)) n 200 drivers/iommu/ipmmu-vmsa.c #define IMPMBD(n) (0x02c0 + ((n) * 4)) n 202 drivers/iommu/ipmmu-vmsa.c #define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n)) n 203 drivers/iommu/ipmmu-vmsa.c #define IMUCTR0(n) (0x0300 + ((n) * 16)) n 204 drivers/iommu/ipmmu-vmsa.c #define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) n 208 drivers/iommu/ipmmu-vmsa.c #define IMUCTR_TTSEL_MMU(n) ((n) << 4) n 214 drivers/iommu/ipmmu-vmsa.c #define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n)) n 215 drivers/iommu/ipmmu-vmsa.c #define IMUASID0(n) (0x0308 + ((n) * 16)) n 216 drivers/iommu/ipmmu-vmsa.c #define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) n 20 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_GLOBAL_REG_N(b, n, r, v) SET_GLOBAL_REG(b, ((r) + (n << 2)), (v)) n 21 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_GLOBAL_REG_N(b, n, r) GET_GLOBAL_REG(b, ((r) + (n << 2))) n 183 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_RWVMID(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWVMID, v) n 184 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_RWE(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWE, v) n 185 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_RWGE(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWGE, v) n 186 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CBVMID(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), CBVMID, v) n 187 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_IRPTNDX(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), IRPTNDX, v) n 191 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_VMID(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), VMID, v) n 192 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CBNDX(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), CBNDX, v) n 193 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BYPASSD(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BYPASSD, v) n 194 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BPRCOSH(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCOSH, v) n 195 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BPRCISH(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCISH, v) n 196 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BPRCNSH(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCNSH, v) n 197 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BPSHCFG(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPSHCFG, v) n 198 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NSCFG(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), NSCFG, v) n 199 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BPMTCFG(b, n, v) SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMTCFG, v) n 200 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BPMEMTYPE(b, n, v) \ n 201 drivers/iommu/msm_iommu_hw-8xxx.h SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMEMTYPE, v) n 301 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_RWVMID(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWVMID) n 302 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_RWE(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWE) n 303 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_RWGE(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWGE) n 304 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CBVMID(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), CBVMID) n 305 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_IRPTNDX(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), IRPTNDX) n 309 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_VMID(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), VMID) n 310 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CBNDX(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), CBNDX) n 311 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BYPASSD(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BYPASSD) n 312 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BPRCOSH(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCOSH) n 313 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BPRCISH(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCISH) n 314 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BPRCNSH(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCNSH) n 315 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BPSHCFG(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPSHCFG) n 316 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NSCFG(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), NSCFG) n 317 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BPMTCFG(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMTCFG) n 318 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BPMEMTYPE(b, n) GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMEMTYPE) n 700 drivers/iommu/msm_iommu_hw-8xxx.h #define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2)) n 701 drivers/iommu/msm_iommu_hw-8xxx.h #define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> \ n 702 drivers/iommu/msm_iommu_hw-8xxx.h ((n) * 2 + 16)) n 746 drivers/iommu/msm_iommu_hw-8xxx.h #define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0) n 747 drivers/iommu/msm_iommu_hw-8xxx.h #define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2))) n 298 drivers/iommu/omap-iommu.c struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) n 304 drivers/iommu/omap-iommu.c l.vict = n; n 16 drivers/iommu/omap-iommu.h #define for_each_iotlb_cr(obj, n, __i, cr) \ n 18 drivers/iommu/omap-iommu.h (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ n 235 drivers/iommu/omap-iommu.h struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n); n 77 drivers/irqchip/alphascale_asm9260-icoll.h #define ASM9260_HW_ICOLL_INTERRUPTn(n) (0x0060 + ((n) >> 2) * 0x10) n 86 drivers/irqchip/alphascale_asm9260-icoll.h #define ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n) (((n) & 0x3) << 3) n 87 drivers/irqchip/alphascale_asm9260-icoll.h #define ASM9260_BM_ICOLL_INTERRUPTn_ENABLE(n) (1 << (2 + \ n 88 drivers/irqchip/alphascale_asm9260-icoll.h ASM9260_BM_ICOLL_INTERRUPTn_SHIFT(n))) n 99 drivers/irqchip/alphascale_asm9260-icoll.h #define ASM9260_HW_ICOLL_CLEARn(n) (((n >> 5) * 0x10) \ n 101 drivers/irqchip/alphascale_asm9260-icoll.h #define ASM9260_BM_CLEAR_BIT(n) BIT(n & 0x1f) n 39 drivers/irqchip/irq-atmel-aic.c #define AT91_AIC_SMR(n) ((n) * 4) n 41 drivers/irqchip/irq-atmel-aic.c #define AT91_AIC_SVR(n) (0x80 + ((n) * 4)) n 48 drivers/irqchip/irq-bcm2835.c #define MAKE_HWIRQ(b, n) ((b << 5) | (n)) n 32 drivers/irqchip/irq-davinci-cp-intc.c #define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2)) n 33 drivers/irqchip/irq-davinci-cp-intc.c #define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2)) n 34 drivers/irqchip/irq-davinci-cp-intc.c #define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2)) n 35 drivers/irqchip/irq-davinci-cp-intc.c #define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2)) n 36 drivers/irqchip/irq-davinci-cp-intc.c #define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2)) n 37 drivers/irqchip/irq-davinci-cp-intc.c #define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2)) n 33 drivers/irqchip/irq-dw-apb-ictl.c int n; n 37 drivers/irqchip/irq-dw-apb-ictl.c for (n = 0; n < d->revmap_size; n += 32) { n 38 drivers/irqchip/irq-dw-apb-ictl.c struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n); n 267 drivers/irqchip/irq-gic-v3-mbi.c int ret, n; n 274 drivers/irqchip/irq-gic-v3-mbi.c n = of_property_count_elems_of_size(np, "mbi-ranges", sizeof(u32)); n 275 drivers/irqchip/irq-gic-v3-mbi.c if (n <= 0 || n % 2) n 278 drivers/irqchip/irq-gic-v3-mbi.c mbi_range_nr = n / 2; n 283 drivers/irqchip/irq-gic-v3-mbi.c for (n = 0; n < mbi_range_nr; n++) { n 284 drivers/irqchip/irq-gic-v3-mbi.c ret = of_property_read_u32_index(np, "mbi-ranges", n * 2, n 285 drivers/irqchip/irq-gic-v3-mbi.c &mbi_ranges[n].spi_start); n 288 drivers/irqchip/irq-gic-v3-mbi.c ret = of_property_read_u32_index(np, "mbi-ranges", n * 2 + 1, n 289 drivers/irqchip/irq-gic-v3-mbi.c &mbi_ranges[n].nr_spis); n 293 drivers/irqchip/irq-gic-v3-mbi.c mbi_ranges[n].bm = kcalloc(BITS_TO_LONGS(mbi_ranges[n].nr_spis), n 295 drivers/irqchip/irq-gic-v3-mbi.c if (!mbi_ranges[n].bm) { n 299 drivers/irqchip/irq-gic-v3-mbi.c pr_info("MBI range [%d:%d]\n", mbi_ranges[n].spi_start, n 300 drivers/irqchip/irq-gic-v3-mbi.c mbi_ranges[n].spi_start + mbi_ranges[n].nr_spis - 1); n 331 drivers/irqchip/irq-gic-v3-mbi.c for (n = 0; n < mbi_range_nr; n++) n 332 drivers/irqchip/irq-gic-v3-mbi.c kfree(mbi_ranges[n].bm); n 1632 drivers/irqchip/irq-gic-v3.c int n; n 1641 drivers/irqchip/irq-gic-v3.c n = of_property_count_elems_of_size(child_part, "affinity", n 1643 drivers/irqchip/irq-gic-v3.c WARN_ON(n <= 0); n 1645 drivers/irqchip/irq-gic-v3.c for (i = 0; i < n; i++) { n 19 drivers/irqchip/irq-imx-irqsteer.c #define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) n 20 drivers/irqchip/irq-imx-irqsteer.c #define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) n 21 drivers/irqchip/irq-imx-irqsteer.c #define CHANSTATUS(n, t) (CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4) n 153 drivers/irqchip/irq-mmp.c unsigned long mask, status, n; n 174 drivers/irqchip/irq-mmp.c for_each_set_bit(n, &status, BITS_PER_LONG) { n 175 drivers/irqchip/irq-mmp.c generic_handle_irq(icu_data[i].virq_base + n); n 37 drivers/irqchip/irq-mxs.c #define HW_ICOLL_INTERRUPTn(n) ((n) * 0x10) n 36 drivers/irqchip/irq-orion.c int n, base = 0; n 38 drivers/irqchip/irq-orion.c for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) { n 56 drivers/irqchip/irq-orion.c int n, ret, base, num_chips = 0; n 76 drivers/irqchip/irq-orion.c for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) { n 80 drivers/irqchip/irq-orion.c of_address_to_resource(np, n, &r); n 84 drivers/irqchip/irq-orion.c np, n); n 88 drivers/irqchip/irq-orion.c panic("%pOFn: unable to map resource %d", np, n); n 86 drivers/irqchip/irq-renesas-h8s.c int n; n 93 drivers/irqchip/irq-renesas-h8s.c for (n = 0; n <= 'k' - 'a'; n++) n 94 drivers/irqchip/irq-renesas-h8s.c writew(0x0000, IPRA + (n * 2)); n 25 drivers/irqchip/irq-renesas-irqc.c #define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10)) n 36 drivers/irqchip/irq-renesas-irqc.c #define IRQC_CONFIG(n) (0x180 + ((n) * 0x04)) n 31 drivers/irqchip/irq-renesas-rza1.c #define ICR1_IRQS(n, sense) ((sense) << ((n) * 2)) /* IRQ Sense Select */ n 36 drivers/irqchip/irq-renesas-rza1.c #define ICR1_IRQS_MASK(n) ICR1_IRQS((n), 3) n 298 drivers/irqchip/irq-s3c24xx.c unsigned int n, offset, irq; n 316 drivers/irqchip/irq-s3c24xx.c n = __ffs(src); n 317 drivers/irqchip/irq-s3c24xx.c src &= ~(1 << n); n 318 drivers/irqchip/irq-s3c24xx.c irq = irq_find_mapping(sub_intc->domain, offset + n); n 1166 drivers/irqchip/irq-s3c24xx.c static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n, n 237 drivers/irqchip/irq-stm32-exti.c int n, i, irq_base = 0; n 245 drivers/irqchip/irq-stm32-exti.c for_each_set_bit(n, &pending, IRQS_PER_BANK) { n 246 drivers/irqchip/irq-stm32-exti.c virq = irq_find_mapping(domain, irq_base + n); n 36 drivers/irqchip/irq-vf610-mscm-ir.c #define MSCM_IRSPRC(n) (0x80 + 2 * (n)) n 237 drivers/irqchip/qcom-pdc.c int ret, n; n 239 drivers/irqchip/qcom-pdc.c n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32)); n 240 drivers/irqchip/qcom-pdc.c if (n <= 0 || n % 3) n 243 drivers/irqchip/qcom-pdc.c pdc_region_cnt = n / 3; n 250 drivers/irqchip/qcom-pdc.c for (n = 0; n < pdc_region_cnt; n++) { n 252 drivers/irqchip/qcom-pdc.c n * 3 + 0, n 253 drivers/irqchip/qcom-pdc.c &pdc_region[n].pin_base); n 257 drivers/irqchip/qcom-pdc.c n * 3 + 1, n 258 drivers/irqchip/qcom-pdc.c &pdc_region[n].parent_base); n 262 drivers/irqchip/qcom-pdc.c n * 3 + 2, n 263 drivers/irqchip/qcom-pdc.c &pdc_region[n].cnt); n 144 drivers/isdn/capi/capi.c struct ackqueue_entry *n; n 146 drivers/isdn/capi/capi.c n = kmalloc(sizeof(*n), GFP_ATOMIC); n 147 drivers/isdn/capi/capi.c if (unlikely(!n)) { n 151 drivers/isdn/capi/capi.c n->datahandle = datahandle; n 152 drivers/isdn/capi/capi.c INIT_LIST_HEAD(&n->list); n 154 drivers/isdn/capi/capi.c list_add_tail(&n->list, &mp->ackqueue); n 128 drivers/isdn/capi/capilib.c struct list_head *l, *n; n 131 drivers/isdn/capi/capilib.c list_for_each_safe(l, n, head) { n 145 drivers/isdn/capi/capilib.c struct list_head *l, *n; n 148 drivers/isdn/capi/capilib.c list_for_each_safe(l, n, head) { n 572 drivers/isdn/capi/capiutil.c size_t n, r; n 578 drivers/isdn/capi/capiutil.c n = vsnprintf(cdb->p, r, fmt, f); n 580 drivers/isdn/capi/capiutil.c if (n >= r) { n 585 drivers/isdn/capi/capiutil.c while ((ns - cdb->pos) <= n) n 600 drivers/isdn/capi/capiutil.c n = vsnprintf(cdb->p, r, fmt, f); n 603 drivers/isdn/capi/capiutil.c cdb->p += n; n 604 drivers/isdn/capi/capiutil.c cdb->pos += n; n 528 drivers/isdn/hardware/mISDN/netjet.c u32 m, v, n = 0; n 568 drivers/isdn/hardware/mISDN/netjet.c n = p[0]; n 570 drivers/isdn/hardware/mISDN/netjet.c n <<= 8; n 576 drivers/isdn/hardware/mISDN/netjet.c v |= n; n 585 drivers/isdn/hardware/mISDN/netjet.c n = p[i]; n 586 drivers/isdn/hardware/mISDN/netjet.c v |= (bc->bch.nr & 1) ? n : n << 8; n 360 drivers/isdn/mISDN/dsp_blowfish.c #define EROUND(a, b, n) do { b ^= P[n]; a ^= bf_F(b); } while (0) n 361 drivers/isdn/mISDN/dsp_blowfish.c #define DROUND(a, b, n) do { a ^= bf_F(b); b ^= P[n]; } while (0) n 124 drivers/isdn/mISDN/dsp_dtmf.c int k, n, i; n 190 drivers/isdn/mISDN/dsp_dtmf.c for (n = 0; n < DSP_DTMF_NPOINTS; n++) { n 124 drivers/isdn/mISDN/dsp_pipeline.c struct dsp_element_entry *entry, *n; n 129 drivers/isdn/mISDN/dsp_pipeline.c list_for_each_entry_safe(entry, n, &dsp_elements, list) n 159 drivers/isdn/mISDN/dsp_pipeline.c struct dsp_element_entry *entry, *n; n 165 drivers/isdn/mISDN/dsp_pipeline.c list_for_each_entry_safe(entry, n, &dsp_elements, list) { n 193 drivers/isdn/mISDN/dsp_pipeline.c struct dsp_pipeline_entry *entry, *n; n 195 drivers/isdn/mISDN/dsp_pipeline.c list_for_each_entry_safe(entry, n, &pipeline->list, list) { n 223 drivers/isdn/mISDN/dsp_pipeline.c struct dsp_element_entry *entry, *n; n 244 drivers/isdn/mISDN/dsp_pipeline.c list_for_each_entry_safe(entry, n, &dsp_elements, list) n 609 drivers/leds/leds-lm3533.c struct attribute *attr, int n) n 767 drivers/leds/leds-lp5523.c int i, n, ret; n 770 drivers/leds/leds-lp5523.c n = min_t(int, len, LP5523_MAX_LEDS); n 774 drivers/leds/leds-lp5523.c for (i = 0; i < n; i++) { n 159 drivers/leds/leds-pca955x.c static int pca955x_write_psc(struct i2c_client *client, int n, u8 val) n 165 drivers/leds/leds-pca955x.c pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n, n 169 drivers/leds/leds-pca955x.c __func__, n, val, ret); n 180 drivers/leds/leds-pca955x.c static int pca955x_write_pwm(struct i2c_client *client, int n, u8 val) n 186 drivers/leds/leds-pca955x.c pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n, n 190 drivers/leds/leds-pca955x.c __func__, n, val, ret); n 198 drivers/leds/leds-pca955x.c static int pca955x_write_ls(struct i2c_client *client, int n, u8 val) n 204 drivers/leds/leds-pca955x.c pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n, n 208 drivers/leds/leds-pca955x.c __func__, n, val, ret); n 216 drivers/leds/leds-pca955x.c static int pca955x_read_ls(struct i2c_client *client, int n, u8 *val) n 222 drivers/leds/leds-pca955x.c pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n); n 225 drivers/leds/leds-pca955x.c __func__, n, ret); n 291 drivers/leds/leds-pca955x.c static int pca955x_read_input(struct i2c_client *client, int n, u8 *val) n 293 drivers/leds/leds-pca955x.c int ret = i2c_smbus_read_byte_data(client, n); n 297 drivers/leds/leds-pca955x.c __func__, n, ret); n 279 drivers/leds/leds-tca6507.c int n = tca->reg_file[bit] & ~mask; n 281 drivers/leds/leds-tca6507.c n |= mask; n 282 drivers/leds/leds-tca6507.c if (tca->reg_file[bit] != n) { n 283 drivers/leds/leds-tca6507.c tca->reg_file[bit] = n; n 296 drivers/leds/leds-tca6507.c int n; n 301 drivers/leds/leds-tca6507.c n = tca->reg_file[reg] & ~mask; n 302 drivers/leds/leds-tca6507.c n |= new; n 303 drivers/leds/leds-tca6507.c if (tca->reg_file[reg] != n) { n 304 drivers/leds/leds-tca6507.c tca->reg_file[reg] = n; n 31 drivers/leds/trigger/ledtrig-backlight.c struct bl_trig_notifier *n = container_of(p, n 33 drivers/leds/trigger/ledtrig-backlight.c struct led_classdev *led = n->led; n 45 drivers/leds/trigger/ledtrig-backlight.c if (new_status == n->old_status) n 48 drivers/leds/trigger/ledtrig-backlight.c if ((n->old_status == UNBLANK) ^ n->invert) { n 49 drivers/leds/trigger/ledtrig-backlight.c n->brightness = led->brightness; n 52 drivers/leds/trigger/ledtrig-backlight.c led_set_brightness_nosleep(led, n->brightness); n 55 drivers/leds/trigger/ledtrig-backlight.c n->old_status = new_status; n 63 drivers/leds/trigger/ledtrig-backlight.c struct bl_trig_notifier *n = led_trigger_get_drvdata(dev); n 65 drivers/leds/trigger/ledtrig-backlight.c return sprintf(buf, "%u\n", n->invert); n 72 drivers/leds/trigger/ledtrig-backlight.c struct bl_trig_notifier *n = led_trigger_get_drvdata(dev); n 83 drivers/leds/trigger/ledtrig-backlight.c n->invert = invert; n 86 drivers/leds/trigger/ledtrig-backlight.c if ((n->old_status == BLANK) ^ n->invert) n 89 drivers/leds/trigger/ledtrig-backlight.c led_set_brightness_nosleep(led, n->brightness); n 105 drivers/leds/trigger/ledtrig-backlight.c struct bl_trig_notifier *n; n 107 drivers/leds/trigger/ledtrig-backlight.c n = kzalloc(sizeof(struct bl_trig_notifier), GFP_KERNEL); n 108 drivers/leds/trigger/ledtrig-backlight.c if (!n) n 110 drivers/leds/trigger/ledtrig-backlight.c led_set_trigger_data(led, n); n 112 drivers/leds/trigger/ledtrig-backlight.c n->led = led; n 113 drivers/leds/trigger/ledtrig-backlight.c n->brightness = led->brightness; n 114 drivers/leds/trigger/ledtrig-backlight.c n->old_status = UNBLANK; n 115 drivers/leds/trigger/ledtrig-backlight.c n->notifier.notifier_call = fb_notifier_callback; n 117 drivers/leds/trigger/ledtrig-backlight.c ret = fb_register_client(&n->notifier); n 126 drivers/leds/trigger/ledtrig-backlight.c struct bl_trig_notifier *n = led_get_trigger_data(led); n 128 drivers/leds/trigger/ledtrig-backlight.c fb_unregister_client(&n->notifier); n 129 drivers/leds/trigger/ledtrig-backlight.c kfree(n); n 57 drivers/leds/trigger/ledtrig-gpio.c struct device_attribute *attr, const char *buf, size_t n) n 71 drivers/leds/trigger/ledtrig-gpio.c return n; n 85 drivers/leds/trigger/ledtrig-gpio.c struct device_attribute *attr, const char *buf, size_t n) n 104 drivers/leds/trigger/ledtrig-gpio.c return n; n 118 drivers/leds/trigger/ledtrig-gpio.c struct device_attribute *attr, const char *buf, size_t n) n 132 drivers/leds/trigger/ledtrig-gpio.c return n; n 138 drivers/leds/trigger/ledtrig-gpio.c return n; n 154 drivers/leds/trigger/ledtrig-gpio.c return ret ? ret : n; n 117 drivers/lightnvm/pblk-write.c int n = 0; n 130 drivers/lightnvm/pblk-write.c if (n < rqd_ppas && lba_list[paddr] != addr_empty) n 140 drivers/lightnvm/pblk-write.c n++; n 203 drivers/macintosh/macio-adb.c int i, n, err; n 250 drivers/macintosh/macio-adb.c n = in_8(&adb->dcount.r) & HMB; n 251 drivers/macintosh/macio-adb.c for (i = 0; i < n; ++i) n 253 drivers/macintosh/macio-adb.c ibuf_len = n; n 352 drivers/macintosh/smu.c static inline int bcd2hex (int n) n 354 drivers/macintosh/smu.c return (((n & 0xf0) >> 4) * 10) + (n & 0xf); n 358 drivers/macintosh/smu.c static inline int hex2bcd (int n) n 360 drivers/macintosh/smu.c return ((n / 10) << 4) + (n % 10); n 350 drivers/macintosh/therm_adt746x.c static ssize_t store_##name(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) \ n 360 drivers/macintosh/therm_adt746x.c return n; \ n 364 drivers/macintosh/therm_adt746x.c static ssize_t store_##name(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) \ n 372 drivers/macintosh/therm_adt746x.c return n; \ n 235 drivers/macintosh/via-pmu.c void pmu_blink(int n); n 2580 drivers/macintosh/via-pmu.c void pmu_blink(int n) n 2586 drivers/macintosh/via-pmu.c for (; n > 0; --n) { n 34 drivers/mailbox/pl320-ipc.c #define MBOX_MASK(n) (1 << (n)) n 38 drivers/mailbox/pl320-ipc.c #define CHAN_MASK(n) (1 << (n)) n 246 drivers/md/bcache/alloc.c size_t n; n 248 drivers/md/bcache/alloc.c get_random_bytes(&n, sizeof(n)); n 250 drivers/md/bcache/alloc.c n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); n 251 drivers/md/bcache/alloc.c n += ca->sb.first_bucket; n 253 drivers/md/bcache/alloc.c b = ca->buckets + n; n 491 drivers/md/bcache/alloc.c struct bkey *k, int n, bool wait) n 500 drivers/md/bcache/alloc.c BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET); n 506 drivers/md/bcache/alloc.c for (i = 0; i < n; i++) { n 528 drivers/md/bcache/alloc.c struct bkey *k, int n, bool wait) n 533 drivers/md/bcache/alloc.c ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); n 902 drivers/md/bcache/bcache.h #define kobj_attribute_write(n, fn) \ n 903 drivers/md/bcache/bcache.h static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn) n 905 drivers/md/bcache/bcache.h #define kobj_attribute_rw(n, show, store) \ n 906 drivers/md/bcache/bcache.h static struct kobj_attribute ksysfs_##n = \ n 907 drivers/md/bcache/bcache.h __ATTR(n, 0600, show, store) n 972 drivers/md/bcache/bcache.h struct bkey *k, int n, bool wait); n 974 drivers/md/bcache/bcache.h struct bkey *k, int n, bool wait); n 967 drivers/md/bcache/bset.c unsigned int inorder, j, n = 1; n 970 drivers/md/bcache/bset.c unsigned int p = n << 4; n 975 drivers/md/bcache/bset.c j = n; n 980 drivers/md/bcache/bset.c n = j * 2; n 982 drivers/md/bcache/bset.c n = j * 2 + 1; n 985 drivers/md/bcache/bset.c n = j * 2; n 987 drivers/md/bcache/bset.c n = j * 2 + 1; n 989 drivers/md/bcache/bset.c } while (n < t->size); n 997 drivers/md/bcache/bset.c if (n & 1) { n 1181 drivers/md/bcache/btree.c struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); n 1183 drivers/md/bcache/btree.c if (!IS_ERR_OR_NULL(n)) { n 1184 drivers/md/bcache/btree.c mutex_lock(&n->write_lock); n 1185 drivers/md/bcache/btree.c bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); n 1186 drivers/md/bcache/btree.c bkey_copy_key(&n->key, &b->key); n 1187 drivers/md/bcache/btree.c mutex_unlock(&n->write_lock); n 1190 drivers/md/bcache/btree.c return n; n 1540 drivers/md/bcache/btree.c struct btree *n; n 1545 drivers/md/bcache/btree.c n = btree_node_alloc_replacement(replace, NULL); n 1549 drivers/md/bcache/btree.c btree_node_free(n); n 1550 drivers/md/bcache/btree.c rw_unlock(true, n); n 1554 drivers/md/bcache/btree.c bch_btree_node_write_sync(n); n 1557 drivers/md/bcache/btree.c bch_keylist_add(&keys, &n->key); n 1566 drivers/md/bcache/btree.c rw_unlock(true, n); n 1703 drivers/md/bcache/btree.c struct btree *n = NULL; n 1709 drivers/md/bcache/btree.c n = btree_node_alloc_replacement(b, NULL); n 1711 drivers/md/bcache/btree.c if (!IS_ERR_OR_NULL(n)) { n 1712 drivers/md/bcache/btree.c bch_btree_node_write_sync(n); n 1714 drivers/md/bcache/btree.c bch_btree_set_root(n); n 1716 drivers/md/bcache/btree.c rw_unlock(true, n); n 1782 drivers/md/bcache/btree.c struct keybuf_key *w, *n; n 1790 drivers/md/bcache/btree.c rbtree_postorder_for_each_entry_safe(w, n, n 136 drivers/md/bcache/extents.c size_t n = PTR_BUCKET_NR(b->c, k, j); n 138 drivers/md/bcache/extents.c pr_err(" bucket %zu", n); n 139 drivers/md/bcache/extents.c if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) n 365 drivers/md/bcache/journal.c uint64_t start = i->j.last_seq, end = i->j.seq, n = start; n 371 drivers/md/bcache/journal.c if (n != i->j.seq) { n 372 drivers/md/bcache/journal.c if (n == start && is_discard_enabled(s)) n 374 drivers/md/bcache/journal.c n, i->j.seq - 1, start, end); n 377 drivers/md/bcache/journal.c n, i->j.seq - 1, start, end); n 402 drivers/md/bcache/journal.c n = i->j.seq + 1; n 644 drivers/md/bcache/journal.c unsigned int iter, n = 0; n 685 drivers/md/bcache/journal.c k->ptr[n++] = MAKE_PTR(0, n 691 drivers/md/bcache/journal.c if (n) { n 693 drivers/md/bcache/journal.c SET_KEY_PTRS(k, n); n 167 drivers/md/bcache/request.c struct bkey *n = bkey_next(src); n 173 drivers/md/bcache/request.c src = n; n 202 drivers/md/bcache/request.c struct bio *bio = op->bio, *n; n 239 drivers/md/bcache/request.c n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); n 241 drivers/md/bcache/request.c n->bi_end_io = bch_data_insert_endio; n 242 drivers/md/bcache/request.c n->bi_private = cl; n 254 drivers/md/bcache/request.c bio_csum(n, k); n 259 drivers/md/bcache/request.c bio_set_op_attrs(n, REQ_OP_WRITE, 0); n 260 drivers/md/bcache/request.c bch_submit_bbio(n, op->c, k, 0); n 261 drivers/md/bcache/request.c } while (n != bio); n 527 drivers/md/bcache/request.c struct bio *n, *bio = &s->bio.bio; n 561 drivers/md/bcache/request.c n = bio_next_split(bio, min_t(uint64_t, INT_MAX, n 565 drivers/md/bcache/request.c bio_key = &container_of(n, struct bbio, bio)->key; n 568 drivers/md/bcache/request.c bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); n 569 drivers/md/bcache/request.c bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); n 571 drivers/md/bcache/request.c n->bi_end_io = bch_cache_read_endio; n 572 drivers/md/bcache/request.c n->bi_private = &s->cl; n 585 drivers/md/bcache/request.c __bch_submit_bbio(n, b->c); n 586 drivers/md/bcache/request.c return n == bio ? MAP_DONE : MAP_CONTINUE; n 816 drivers/md/bcache/super.c size_t n; n 830 drivers/md/bcache/super.c n = d->nr_stripes * sizeof(atomic_t); n 831 drivers/md/bcache/super.c d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL); n 835 drivers/md/bcache/super.c n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); n 836 drivers/md/bcache/super.c d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL); n 2535 drivers/md/bcache/super.c static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) n 1018 drivers/md/bcache/sysfs.c size_t n = ca->sb.nbuckets, i; n 1042 drivers/md/bcache/sysfs.c for (i = ca->sb.first_bucket; i < n; i++) n 1046 drivers/md/bcache/sysfs.c sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL); n 1048 drivers/md/bcache/sysfs.c while (n && n 1049 drivers/md/bcache/sysfs.c !cached[n - 1]) n 1050 drivers/md/bcache/sysfs.c --n; n 1052 drivers/md/bcache/sysfs.c while (cached < p + n && n 1054 drivers/md/bcache/sysfs.c cached++, n--; n 1056 drivers/md/bcache/sysfs.c for (i = 0; i < n; i++) n 1059 drivers/md/bcache/sysfs.c if (n) n 1060 drivers/md/bcache/sysfs.c do_div(sum, n); n 1063 drivers/md/bcache/sysfs.c q[i] = INITIAL_PRIO - cached[n * (i + 1) / n 1080 drivers/md/bcache/sysfs.c n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); n 47 drivers/md/bcache/sysfs.h #define write_attribute(n) __sysfs_attribute(n, 0200) n 48 drivers/md/bcache/sysfs.h #define read_attribute(n) __sysfs_attribute(n, 0444) n 49 drivers/md/bcache/sysfs.h #define rw_attribute(n) __sysfs_attribute(n, 0644) n 124 drivers/md/bcache/util.c bool bch_is_zero(const char *p, size_t n) n 128 drivers/md/bcache/util.c for (i = 0; i < n; i++) n 364 drivers/md/bcache/util.h bool bch_is_zero(const char *p, size_t n); n 455 drivers/md/bcache/util.h #define __DIV_SAFE(n, d, zero) \ n 457 drivers/md/bcache/util.h typeof(n) _n = (n); \ n 462 drivers/md/bcache/util.h #define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0) n 473 drivers/md/bcache/util.h struct rb_node **n = &(root)->rb_node, *parent = NULL; \ n 477 drivers/md/bcache/util.h while (*n) { \ n 478 drivers/md/bcache/util.h parent = *n; \ n 479 drivers/md/bcache/util.h this = container_of(*n, typeof(*(new)), member); \ n 483 drivers/md/bcache/util.h n = res < 0 \ n 484 drivers/md/bcache/util.h ? &(*n)->rb_left \ n 485 drivers/md/bcache/util.h : &(*n)->rb_right; \ n 488 drivers/md/bcache/util.h rb_link_node(&(new)->member, parent, n); \ n 497 drivers/md/bcache/util.h struct rb_node *n = (root)->rb_node; \ n 501 drivers/md/bcache/util.h while (n) { \ n 502 drivers/md/bcache/util.h this = container_of(n, typeof(search), member); \ n 508 drivers/md/bcache/util.h n = res < 0 \ n 509 drivers/md/bcache/util.h ? n->rb_left \ n 510 drivers/md/bcache/util.h : n->rb_right; \ n 517 drivers/md/bcache/util.h struct rb_node *n = (root)->rb_node; \ n 521 drivers/md/bcache/util.h while (n) { \ n 522 drivers/md/bcache/util.h this = container_of(n, typeof(search), member); \ n 526 drivers/md/bcache/util.h n = n->rb_left; \ n 528 drivers/md/bcache/util.h n = n->rb_right; \ n 250 drivers/md/dm-bufio.c struct rb_node *n = c->buffer_tree.rb_node; n 253 drivers/md/dm-bufio.c while (n) { n 254 drivers/md/dm-bufio.c b = container_of(n, struct dm_buffer, node); n 259 drivers/md/dm-bufio.c n = (b->block < block) ? n->rb_left : n->rb_right; n 1426 drivers/md/dm-bufio.c void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) n 1428 drivers/md/dm-bufio.c c->minimum_buffers = n; n 26 drivers/md/dm-cache-policy-smq.c static unsigned safe_div(unsigned n, unsigned d) n 28 drivers/md/dm-cache-policy-smq.c return d ? n / d : 0u; n 31 drivers/md/dm-cache-policy-smq.c static unsigned safe_mod(unsigned n, unsigned d) n 33 drivers/md/dm-cache-policy-smq.c return d ? n % d : 0u; n 732 drivers/md/dm-cache-target.c static dm_block_t block_div(dm_block_t b, uint32_t n) n 734 drivers/md/dm-cache-target.c do_div(b, n); n 3308 drivers/md/dm-cache-target.c uint64_t n = from_cblock(cache->cache_size); n 3310 drivers/md/dm-cache-target.c if (b >= n) { n 3312 drivers/md/dm-cache-target.c cache_device_name(cache), b, n); n 3316 drivers/md/dm-cache-target.c if (e > n) { n 3318 drivers/md/dm-cache-target.c cache_device_name(cache), e, n); n 1681 drivers/md/dm-clone-target.c static int validate_nr_regions(unsigned long n, char **error) n 1687 drivers/md/dm-clone-target.c if (n > (1UL << 31)) { n 70 drivers/md/dm-delay.c struct bio *n; n 73 drivers/md/dm-delay.c n = bio->bi_next; n 76 drivers/md/dm-delay.c bio = n; n 809 drivers/md/dm-era-target.c static bool valid_nr_blocks(dm_block_t n) n 815 drivers/md/dm-era-target.c return n < (1ull << 31); n 108 drivers/md/dm-init.c const unsigned int n = dev->dmi.target_count - 1; n 128 drivers/md/dm-init.c dev->table[n] = sp; n 143 drivers/md/dm-init.c dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE, n 145 drivers/md/dm-init.c if (!dev->target_args_array[n]) n 407 drivers/md/dm-integrity.c #define sector_to_block(ic, n) \ n 409 drivers/md/dm-integrity.c BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \ n 410 drivers/md/dm-integrity.c (n) >>= (ic)->sb->log2_sectors_per_block; \ n 675 drivers/md/dm-integrity.c static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n) n 680 drivers/md/dm-integrity.c access_journal_check(ic, section, n, true, "access_journal_entry"); n 682 drivers/md/dm-integrity.c rel_sector = n % JOURNAL_BLOCK_SECTORS; n 683 drivers/md/dm-integrity.c offset = n / JOURNAL_BLOCK_SECTORS; n 689 drivers/md/dm-integrity.c static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n) n 691 drivers/md/dm-integrity.c n <<= ic->sb->log2_sectors_per_block; n 693 drivers/md/dm-integrity.c n += JOURNAL_BLOCK_SECTORS; n 695 drivers/md/dm-integrity.c access_journal_check(ic, section, n, false, "access_journal_data"); n 697 drivers/md/dm-integrity.c return access_journal(ic, section, n); n 1100 drivers/md/dm-integrity.c struct rb_node **n = &ic->in_progress.rb_node; n 1115 drivers/md/dm-integrity.c while (*n) { n 1116 drivers/md/dm-integrity.c struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node); n 1118 drivers/md/dm-integrity.c parent = *n; n 1120 drivers/md/dm-integrity.c n = &range->node.rb_left; n 1122 drivers/md/dm-integrity.c n = &range->node.rb_right; n 1128 drivers/md/dm-integrity.c rb_link_node(&new_range->node, parent, n); n 1223 drivers/md/dm-integrity.c struct rb_node *n = ic->journal_tree_root.rb_node; n 1226 drivers/md/dm-integrity.c while (n) { n 1227 drivers/md/dm-integrity.c struct journal_node *j = container_of(n, struct journal_node, node); n 1233 drivers/md/dm-integrity.c n = j->node.rb_left; n 1235 drivers/md/dm-integrity.c n = j->node.rb_right; n 2071 drivers/md/dm-integrity.c unsigned i, j, n; n 2093 drivers/md/dm-integrity.c for (n = 0; n < commit_sections; n++) { n 2157 drivers/md/dm-integrity.c unsigned i, j, n; n 2168 drivers/md/dm-integrity.c for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { n 2563 drivers/md/dm-integrity.c unsigned i, j, n; n 2568 drivers/md/dm-integrity.c for (n = 0; n < n_sections; n++) { n 2569 drivers/md/dm-integrity.c i = start_section + n; n 2770 drivers/md/dm-integrity.c static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x) n 2772 drivers/md/dm-integrity.c struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier); n 2969 drivers/md/dm-integrity.c #define EMIT_ALG(a, n) \ n 2972 drivers/md/dm-integrity.c DMEMIT(" %s:%s", n, ic->a.alg_string); \ n 597 drivers/md/dm-raid.c unsigned int n = 1, f = 1, r = 0; n 609 drivers/md/dm-raid.c n = copies; n 626 drivers/md/dm-raid.c return r | (f << RAID10_FAR_COPIES_SHIFT) | n; n 676 drivers/md/dm-snap.c struct hlist_bl_node *pos, *n; n 683 drivers/md/dm-snap.c hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) n 1566 drivers/md/dm-snap.c struct bio *n; n 1569 drivers/md/dm-snap.c n = bio->bi_next; n 1572 drivers/md/dm-snap.c bio = n; n 1583 drivers/md/dm-snap.c struct bio *n; n 1587 drivers/md/dm-snap.c n = bio->bi_next; n 1592 drivers/md/dm-snap.c bio = n; n 1601 drivers/md/dm-snap.c struct bio *n; n 1604 drivers/md/dm-snap.c n = bio->bi_next; n 1607 drivers/md/dm-snap.c bio = n; n 2581 drivers/md/dm-snap.c sector_t n; n 2590 drivers/md/dm-snap.c for (n = 0; n < size; n += merging_snap->ti->max_io_len) n 2591 drivers/md/dm-snap.c if (__origin_write(&o->snapshots, sector + n, NULL) == n 910 drivers/md/dm-stats.c unsigned n; n 924 drivers/md/dm-stats.c n = 0; n 936 drivers/md/dm-stats.c (*histogram_boundaries)[n] = hi; n 940 drivers/md/dm-stats.c n++; n 74 drivers/md/dm-table.c static unsigned int int_log(unsigned int n, unsigned int base) n 78 drivers/md/dm-table.c while (n > 1) { n 79 drivers/md/dm-table.c n = dm_div_up(n, base); n 89 drivers/md/dm-table.c static inline unsigned int get_child(unsigned int n, unsigned int k) n 91 drivers/md/dm-table.c return (n * CHILDREN_PER_NODE) + k; n 98 drivers/md/dm-table.c unsigned int l, unsigned int n) n 100 drivers/md/dm-table.c return t->index[l] + (n * KEYS_PER_NODE); n 107 drivers/md/dm-table.c static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) n 110 drivers/md/dm-table.c n = get_child(n, CHILDREN_PER_NODE - 1); n 112 drivers/md/dm-table.c if (n >= t->counts[l]) n 115 drivers/md/dm-table.c return get_node(t, l, n)[KEYS_PER_NODE - 1]; n 124 drivers/md/dm-table.c unsigned int n, k; n 127 drivers/md/dm-table.c for (n = 0U; n < t->counts[l]; n++) { n 128 drivers/md/dm-table.c node = get_node(t, l, n); n 131 drivers/md/dm-table.c node[k] = high(t, l + 1, get_child(n, k)); n 1365 drivers/md/dm-table.c unsigned int l, n = 0, k = 0; n 1372 drivers/md/dm-table.c n = get_child(n, k); n 1373 drivers/md/dm-table.c node = get_node(t, l, n); n 1380 drivers/md/dm-table.c return &t->targets[(KEYS_PER_NODE * n) + k]; n 2846 drivers/md/dm-thin.c static bool is_factor(sector_t block_size, uint32_t n) n 2848 drivers/md/dm-thin.c return !sector_div(block_size, n); n 130 drivers/md/dm-verity-fec.c unsigned n, i, offset; n 141 drivers/md/dm-verity-fec.c fec_for_each_buffer_rs_block(fio, n, i) { n 142 drivers/md/dm-verity-fec.c block = fec_buffer_rs_block(v, fio, n, i); n 212 drivers/md/dm-verity-fec.c unsigned n, k; n 286 drivers/md/dm-verity-fec.c fec_for_each_buffer_rs_block(fio, n, j) { n 287 drivers/md/dm-verity-fec.c k = fec_buffer_rs_index(n, j) + block_offset; n 292 drivers/md/dm-verity-fec.c rs_block = fec_buffer_rs_block(v, fio, n, j); n 308 drivers/md/dm-verity-fec.c unsigned n; n 313 drivers/md/dm-verity-fec.c fec_for_each_prealloc_buffer(n) { n 314 drivers/md/dm-verity-fec.c if (fio->bufs[n]) n 317 drivers/md/dm-verity-fec.c fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT); n 318 drivers/md/dm-verity-fec.c if (unlikely(!fio->bufs[n])) { n 325 drivers/md/dm-verity-fec.c fec_for_each_extra_buffer(fio, n) { n 326 drivers/md/dm-verity-fec.c if (fio->bufs[n]) n 329 drivers/md/dm-verity-fec.c fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT); n 331 drivers/md/dm-verity-fec.c if (unlikely(!fio->bufs[n])) n 334 drivers/md/dm-verity-fec.c fio->nbufs = n; n 348 drivers/md/dm-verity-fec.c unsigned n; n 350 drivers/md/dm-verity-fec.c fec_for_each_buffer(fio, n) n 351 drivers/md/dm-verity-fec.c memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS); n 488 drivers/md/dm-verity-fec.c unsigned n; n 497 drivers/md/dm-verity-fec.c fec_for_each_prealloc_buffer(n) n 498 drivers/md/dm-verity-fec.c mempool_free(fio->bufs[n], &f->prealloc_pool); n 500 drivers/md/dm-verity-fec.c fec_for_each_extra_buffer(fio, n) n 501 drivers/md/dm-verity-fec.c mempool_free(fio->bufs[n], &f->extra_pool); n 1934 drivers/md/dm-zoned-metadata.c unsigned int n = 0; n 1943 drivers/md/dm-zoned-metadata.c n += BITS_PER_LONG; n 1950 drivers/md/dm-zoned-metadata.c n++; n 1954 drivers/md/dm-zoned-metadata.c return n; n 2042 drivers/md/dm-zoned-metadata.c unsigned int n = 0; n 2063 drivers/md/dm-zoned-metadata.c n += count; n 2071 drivers/md/dm-zoned-metadata.c if (likely(zone->weight + n <= zone_nr_blocks)) n 2072 drivers/md/dm-zoned-metadata.c zone->weight += n; n 2076 drivers/md/dm-zoned-metadata.c zone_nr_blocks - n); n 2091 drivers/md/dm-zoned-metadata.c int n = 0; n 2100 drivers/md/dm-zoned-metadata.c n += BITS_PER_LONG; n 2107 drivers/md/dm-zoned-metadata.c n++; n 2111 drivers/md/dm-zoned-metadata.c return n; n 2122 drivers/md/dm-zoned-metadata.c unsigned int n = 0; n 2143 drivers/md/dm-zoned-metadata.c n += count; n 2151 drivers/md/dm-zoned-metadata.c if (zone->weight >= n) n 2152 drivers/md/dm-zoned-metadata.c zone->weight -= n; n 2155 drivers/md/dm-zoned-metadata.c dmz_id(zmd, zone), zone->weight, n); n 2199 drivers/md/dm-zoned-metadata.c int n = 0; n 2219 drivers/md/dm-zoned-metadata.c n += set_bit - bit; n 2227 drivers/md/dm-zoned-metadata.c return n; n 2279 drivers/md/dm-zoned-metadata.c int n = 0; n 2286 drivers/md/dm-zoned-metadata.c n += BITS_PER_LONG; n 2293 drivers/md/dm-zoned-metadata.c n++; n 2297 drivers/md/dm-zoned-metadata.c return n; n 2310 drivers/md/dm-zoned-metadata.c int n = 0; n 2316 drivers/md/dm-zoned-metadata.c n = 0; n 2324 drivers/md/dm-zoned-metadata.c n += dmz_count_bits(bitmap, bit, nr_bits); n 2332 drivers/md/dm-zoned-metadata.c zone->weight = n; n 127 drivers/md/md-faulty.c int n = conf->nfaults; n 154 drivers/md/md-faulty.c n = i; n 156 drivers/md/md-faulty.c if (n >= MaxFault) n 158 drivers/md/md-faulty.c conf->faults[n] = start; n 159 drivers/md/md-faulty.c conf->modes[n] = mode; n 160 drivers/md/md-faulty.c if (conf->nfaults == n) n 161 drivers/md/md-faulty.c conf->nfaults = n+1; n 224 drivers/md/md-faulty.c int n; n 226 drivers/md/md-faulty.c if ((n=atomic_read(&conf->counters[WriteTransient])) != 0) n 228 drivers/md/md-faulty.c n, conf->period[WriteTransient]); n 230 drivers/md/md-faulty.c if ((n=atomic_read(&conf->counters[ReadTransient])) != 0) n 232 drivers/md/md-faulty.c n, conf->period[ReadTransient]); n 234 drivers/md/md-faulty.c if ((n=atomic_read(&conf->counters[WritePersistent])) != 0) n 236 drivers/md/md-faulty.c n, conf->period[WritePersistent]); n 238 drivers/md/md-faulty.c if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0) n 240 drivers/md/md-faulty.c n, conf->period[ReadPersistent]); n 243 drivers/md/md-faulty.c if ((n=atomic_read(&conf->counters[ReadFixable])) != 0) n 245 drivers/md/md-faulty.c n, conf->period[ReadFixable]); n 247 drivers/md/md-faulty.c if ((n=atomic_read(&conf->counters[WriteAll])) != 0) n 3018 drivers/md/md.c unsigned int n; n 3021 drivers/md/md.c rv = kstrtouint(buf, 10, &n); n 3024 drivers/md/md.c atomic_set(&rdev->corrected_errors, n); n 4006 drivers/md/md.c unsigned int n; n 4009 drivers/md/md.c err = kstrtouint(buf, 10, &n); n 4022 drivers/md/md.c mddev->new_layout = n; n 4028 drivers/md/md.c mddev->new_layout = n; n 4030 drivers/md/md.c mddev->layout = n; n 4055 drivers/md/md.c unsigned int n; n 4058 drivers/md/md.c err = kstrtouint(buf, 10, &n); n 4066 drivers/md/md.c err = update_raid_disks(mddev, n); n 4073 drivers/md/md.c if (olddisks < n && n 4076 drivers/md/md.c if (olddisks > n && n 4081 drivers/md/md.c mddev->delta_disks = n - olddisks; n 4082 drivers/md/md.c mddev->raid_disks = n; n 4085 drivers/md/md.c mddev->raid_disks = n; n 4107 drivers/md/md.c unsigned long n; n 4110 drivers/md/md.c err = kstrtoul(buf, 10, &n); n 4123 drivers/md/md.c mddev->new_chunk_sectors = n >> 9; n 4129 drivers/md/md.c mddev->new_chunk_sectors = n >> 9; n 4131 drivers/md/md.c mddev->chunk_sectors = n >> 9; n 4150 drivers/md/md.c unsigned long long n; n 4154 drivers/md/md.c n = MaxSector; n 4156 drivers/md/md.c err = kstrtoull(buf, 10, &n); n 4159 drivers/md/md.c if (n != (sector_t)n) n 4170 drivers/md/md.c mddev->recovery_cp = n; n 4230 drivers/md/md.c int n; n 4231 drivers/md/md.c for (n=0; list[n]; n++) n 4232 drivers/md/md.c if (cmd_match(word, list[n])) n 4234 drivers/md/md.c return n; n 4404 drivers/md/md.c unsigned int n; n 4407 drivers/md/md.c rv = kstrtouint(buf, 10, &n); n 4410 drivers/md/md.c atomic_set(&mddev->max_corr_read_errors, n); n 4838 drivers/md/md.c long n; n 4840 drivers/md/md.c if (kstrtol(buf, 10, &n)) n 4843 drivers/md/md.c if (n != 0 && n != 1) n 4846 drivers/md/md.c mddev->parallel_resync = n; n 51 drivers/md/persistent-data/dm-btree-internal.h void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, n 108 drivers/md/persistent-data/dm-btree-internal.h static inline __le64 *key_ptr(struct btree_node *n, uint32_t index) n 110 drivers/md/persistent-data/dm-btree-internal.h return n->keys + index; n 113 drivers/md/persistent-data/dm-btree-internal.h static inline void *value_base(struct btree_node *n) n 115 drivers/md/persistent-data/dm-btree-internal.h return &n->keys[le32_to_cpu(n->header.max_entries)]; n 118 drivers/md/persistent-data/dm-btree-internal.h static inline void *value_ptr(struct btree_node *n, uint32_t index) n 120 drivers/md/persistent-data/dm-btree-internal.h uint32_t value_size = le32_to_cpu(n->header.value_size); n 121 drivers/md/persistent-data/dm-btree-internal.h return value_base(n) + (value_size * index); n 127 drivers/md/persistent-data/dm-btree-internal.h static inline uint64_t value64(struct btree_node *n, uint32_t index) n 129 drivers/md/persistent-data/dm-btree-internal.h __le64 *values_le = value_base(n); n 137 drivers/md/persistent-data/dm-btree-internal.h int lower_bound(struct btree_node *n, uint64_t key); n 56 drivers/md/persistent-data/dm-btree-remove.c static void node_shift(struct btree_node *n, int shift) n 58 drivers/md/persistent-data/dm-btree-remove.c uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); n 59 drivers/md/persistent-data/dm-btree-remove.c uint32_t value_size = le32_to_cpu(n->header.value_size); n 64 drivers/md/persistent-data/dm-btree-remove.c BUG_ON((void *) key_ptr(n, shift) >= value_ptr(n, shift)); n 65 drivers/md/persistent-data/dm-btree-remove.c memmove(key_ptr(n, 0), n 66 drivers/md/persistent-data/dm-btree-remove.c key_ptr(n, shift), n 68 drivers/md/persistent-data/dm-btree-remove.c memmove(value_ptr(n, 0), n 69 drivers/md/persistent-data/dm-btree-remove.c value_ptr(n, shift), n 72 drivers/md/persistent-data/dm-btree-remove.c BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); n 73 drivers/md/persistent-data/dm-btree-remove.c memmove(key_ptr(n, shift), n 74 drivers/md/persistent-data/dm-btree-remove.c key_ptr(n, 0), n 76 drivers/md/persistent-data/dm-btree-remove.c memmove(value_ptr(n, shift), n 77 drivers/md/persistent-data/dm-btree-remove.c value_ptr(n, 0), n 111 drivers/md/persistent-data/dm-btree-remove.c static void delete_at(struct btree_node *n, unsigned index) n 113 drivers/md/persistent-data/dm-btree-remove.c unsigned nr_entries = le32_to_cpu(n->header.nr_entries); n 115 drivers/md/persistent-data/dm-btree-remove.c uint32_t value_size = le32_to_cpu(n->header.value_size); n 119 drivers/md/persistent-data/dm-btree-remove.c memmove(key_ptr(n, index), n 120 drivers/md/persistent-data/dm-btree-remove.c key_ptr(n, index + 1), n 123 drivers/md/persistent-data/dm-btree-remove.c memmove(value_ptr(n, index), n 124 drivers/md/persistent-data/dm-btree-remove.c value_ptr(n, index + 1), n 128 drivers/md/persistent-data/dm-btree-remove.c n->header.nr_entries = cpu_to_le32(nr_entries - 1); n 131 drivers/md/persistent-data/dm-btree-remove.c static unsigned merge_threshold(struct btree_node *n) n 133 drivers/md/persistent-data/dm-btree-remove.c return le32_to_cpu(n->header.max_entries) / 3; n 139 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *n; n 157 drivers/md/persistent-data/dm-btree-remove.c result->n = dm_block_data(result->block); n 160 drivers/md/persistent-data/dm-btree-remove.c inc_children(info->tm, result->n, vt); n 202 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *left = l->n; n 203 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *right = r->n; n 350 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *left = l->n; n 351 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *center = c->n; n 352 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *right = r->n; n 412 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *n; n 414 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(s)); n 416 drivers/md/persistent-data/dm-btree-remove.c if (le32_to_cpu(n->header.nr_entries) == 1) { n 418 drivers/md/persistent-data/dm-btree-remove.c dm_block_t b = value64(n, 0); n 424 drivers/md/persistent-data/dm-btree-remove.c memcpy(n, dm_block_data(child), n 432 drivers/md/persistent-data/dm-btree-remove.c i = lower_bound(n, key); n 437 drivers/md/persistent-data/dm-btree-remove.c has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); n 451 drivers/md/persistent-data/dm-btree-remove.c static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index) n 453 drivers/md/persistent-data/dm-btree-remove.c int i = lower_bound(n, key); n 456 drivers/md/persistent-data/dm-btree-remove.c (i >= le32_to_cpu(n->header.nr_entries)) || n 457 drivers/md/persistent-data/dm-btree-remove.c (le64_to_cpu(n->keys[i]) != key)) n 474 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *n; n 492 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(s)); n 494 drivers/md/persistent-data/dm-btree-remove.c if (le32_to_cpu(n->header.flags) & LEAF_NODE) n 495 drivers/md/persistent-data/dm-btree-remove.c return do_leaf(n, key, index); n 501 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(s)); n 502 drivers/md/persistent-data/dm-btree-remove.c if (le32_to_cpu(n->header.flags) & LEAF_NODE) n 503 drivers/md/persistent-data/dm-btree-remove.c return do_leaf(n, key, index); n 505 drivers/md/persistent-data/dm-btree-remove.c i = lower_bound(n, key); n 512 drivers/md/persistent-data/dm-btree-remove.c root = value64(n, i); n 524 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *n; n 537 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(&spine)); n 539 drivers/md/persistent-data/dm-btree-remove.c root = value64(n, index); n 543 drivers/md/persistent-data/dm-btree-remove.c BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries)); n 547 drivers/md/persistent-data/dm-btree-remove.c value_ptr(n, index)); n 549 drivers/md/persistent-data/dm-btree-remove.c delete_at(n, index); n 566 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *n; n 584 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(s)); n 586 drivers/md/persistent-data/dm-btree-remove.c if (le32_to_cpu(n->header.flags) & LEAF_NODE) { n 587 drivers/md/persistent-data/dm-btree-remove.c *index = lower_bound(n, key); n 595 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(s)); n 596 drivers/md/persistent-data/dm-btree-remove.c if (le32_to_cpu(n->header.flags) & LEAF_NODE) { n 597 drivers/md/persistent-data/dm-btree-remove.c *index = lower_bound(n, key); n 601 drivers/md/persistent-data/dm-btree-remove.c i = lower_bound(n, key); n 608 drivers/md/persistent-data/dm-btree-remove.c root = value64(n, i); n 621 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *n; n 633 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(&spine)); n 634 drivers/md/persistent-data/dm-btree-remove.c root = value64(n, index); n 642 drivers/md/persistent-data/dm-btree-remove.c n = dm_block_data(shadow_current(&spine)); n 647 drivers/md/persistent-data/dm-btree-remove.c if (index >= le32_to_cpu(n->header.nr_entries)) { n 652 drivers/md/persistent-data/dm-btree-remove.c k = le64_to_cpu(n->keys[index]); n 656 drivers/md/persistent-data/dm-btree-remove.c value_ptr(n, index)); n 658 drivers/md/persistent-data/dm-btree-remove.c delete_at(n, index); n 26 drivers/md/persistent-data/dm-btree-spine.c struct btree_node *n = dm_block_data(b); n 27 drivers/md/persistent-data/dm-btree-spine.c struct node_header *h = &n->header; n 41 drivers/md/persistent-data/dm-btree-spine.c struct btree_node *n = dm_block_data(b); n 42 drivers/md/persistent-data/dm-btree-spine.c struct node_header *h = &n->header; n 41 drivers/md/persistent-data/dm-btree.c static int bsearch(struct btree_node *n, uint64_t key, int want_hi) n 43 drivers/md/persistent-data/dm-btree.c int lo = -1, hi = le32_to_cpu(n->header.nr_entries); n 47 drivers/md/persistent-data/dm-btree.c uint64_t mid_key = le64_to_cpu(n->keys[mid]); n 61 drivers/md/persistent-data/dm-btree.c int lower_bound(struct btree_node *n, uint64_t key) n 63 drivers/md/persistent-data/dm-btree.c return bsearch(n, key, 0); n 66 drivers/md/persistent-data/dm-btree.c static int upper_bound(struct btree_node *n, uint64_t key) n 68 drivers/md/persistent-data/dm-btree.c return bsearch(n, key, 1); n 71 drivers/md/persistent-data/dm-btree.c void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, n 75 drivers/md/persistent-data/dm-btree.c uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); n 77 drivers/md/persistent-data/dm-btree.c if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) n 79 drivers/md/persistent-data/dm-btree.c dm_tm_inc(tm, value64(n, i)); n 82 drivers/md/persistent-data/dm-btree.c vt->inc(vt->context, value_ptr(n, i)); n 116 drivers/md/persistent-data/dm-btree.c uint32_t total, n; n 121 drivers/md/persistent-data/dm-btree.c n = total / 3; /* rounds down */ n 123 drivers/md/persistent-data/dm-btree.c return 3 * n; n 130 drivers/md/persistent-data/dm-btree.c struct btree_node *n; n 141 drivers/md/persistent-data/dm-btree.c n = dm_block_data(b); n 142 drivers/md/persistent-data/dm-btree.c memset(n, 0, block_size); n 143 drivers/md/persistent-data/dm-btree.c n->header.flags = cpu_to_le32(LEAF_NODE); n 144 drivers/md/persistent-data/dm-btree.c n->header.nr_entries = cpu_to_le32(0); n 145 drivers/md/persistent-data/dm-btree.c n->header.max_entries = cpu_to_le32(max_entries); n 146 drivers/md/persistent-data/dm-btree.c n->header.value_size = cpu_to_le32(info->value_type.size); n 164 drivers/md/persistent-data/dm-btree.c struct btree_node *n; n 200 drivers/md/persistent-data/dm-btree.c dm_bm_prefetch(bm, value64(f->n, i)); n 239 drivers/md/persistent-data/dm-btree.c f->n = dm_block_data(f->b); n 241 drivers/md/persistent-data/dm-btree.c f->nr_children = le32_to_cpu(f->n->header.nr_entries); n 244 drivers/md/persistent-data/dm-btree.c flags = le32_to_cpu(f->n->header.flags); n 305 drivers/md/persistent-data/dm-btree.c flags = le32_to_cpu(f->n->header.flags); n 307 drivers/md/persistent-data/dm-btree.c b = value64(f->n, f->current_child); n 314 drivers/md/persistent-data/dm-btree.c b = value64(f->n, f->current_child); n 326 drivers/md/persistent-data/dm-btree.c value_ptr(f->n, i)); n 425 drivers/md/persistent-data/dm-btree.c struct btree_node *n; n 431 drivers/md/persistent-data/dm-btree.c n = dm_block_data(node); n 432 drivers/md/persistent-data/dm-btree.c flags = le32_to_cpu(n->header.flags); n 433 drivers/md/persistent-data/dm-btree.c nr_entries = le32_to_cpu(n->header.nr_entries); n 436 drivers/md/persistent-data/dm-btree.c i = lower_bound(n, key); n 449 drivers/md/persistent-data/dm-btree.c r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le); n 452 drivers/md/persistent-data/dm-btree.c r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le); n 456 drivers/md/persistent-data/dm-btree.c i = upper_bound(n, key); n 462 drivers/md/persistent-data/dm-btree.c *rkey = le64_to_cpu(n->keys[i]); n 463 drivers/md/persistent-data/dm-btree.c memcpy(value_le, value_ptr(n, i), info->value_type.size); n 771 drivers/md/persistent-data/dm-btree.c struct btree_node *n; n 782 drivers/md/persistent-data/dm-btree.c n = dm_block_data(shadow_current(&spine)); n 784 drivers/md/persistent-data/dm-btree.c if (need_insert(n, keys, level, index)) { n 795 drivers/md/persistent-data/dm-btree.c r = insert_at(sizeof(uint64_t), n, index, n 802 drivers/md/persistent-data/dm-btree.c block = value64(n, index); n 810 drivers/md/persistent-data/dm-btree.c n = dm_block_data(shadow_current(&spine)); n 812 drivers/md/persistent-data/dm-btree.c if (need_insert(n, keys, level, index)) { n 816 drivers/md/persistent-data/dm-btree.c r = insert_at(info->value_type.size, n, index, n 828 drivers/md/persistent-data/dm-btree.c value_ptr(n, index), n 831 drivers/md/persistent-data/dm-btree.c value_ptr(n, index)); n 833 drivers/md/persistent-data/dm-btree.c memcpy_disk(value_ptr(n, index), n 956 drivers/md/persistent-data/dm-btree.c struct btree_node *n; n 963 drivers/md/persistent-data/dm-btree.c n = dm_block_data(node); n 965 drivers/md/persistent-data/dm-btree.c nr = le32_to_cpu(n->header.nr_entries); n 967 drivers/md/persistent-data/dm-btree.c if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { n 968 drivers/md/persistent-data/dm-btree.c r = walk_node(info, value64(n, i), fn, context); n 972 drivers/md/persistent-data/dm-btree.c keys = le64_to_cpu(*key_ptr(n, i)); n 973 drivers/md/persistent-data/dm-btree.c r = fn(context, &keys, value_ptr(n, i)); n 999 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth - 1; n 1000 drivers/md/persistent-data/dm-btree.c struct btree_node *bn = dm_block_data(n->b); n 1014 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth - 1; n 1015 drivers/md/persistent-data/dm-btree.c struct btree_node *bn = dm_block_data(n->b); n 1023 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth; n 1030 drivers/md/persistent-data/dm-btree.c r = bn_read_lock(c->info, b, &n->b); n 1034 drivers/md/persistent-data/dm-btree.c n->index = 0; n 1051 drivers/md/persistent-data/dm-btree.c struct cursor_node *n; n 1058 drivers/md/persistent-data/dm-btree.c n = c->nodes + c->depth - 1; n 1059 drivers/md/persistent-data/dm-btree.c bn = dm_block_data(n->b); n 1061 drivers/md/persistent-data/dm-btree.c n->index++; n 1062 drivers/md/persistent-data/dm-btree.c if (n->index < le32_to_cpu(bn->header.nr_entries)) n 1074 drivers/md/persistent-data/dm-btree.c struct cursor_node *n; n 1079 drivers/md/persistent-data/dm-btree.c n = c->nodes + c->depth - 1; n 1080 drivers/md/persistent-data/dm-btree.c bn = dm_block_data(n->b); n 1085 drivers/md/persistent-data/dm-btree.c memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le)); n 1151 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth - 1; n 1152 drivers/md/persistent-data/dm-btree.c struct btree_node *bn = dm_block_data(n->b); n 1157 drivers/md/persistent-data/dm-btree.c *key = le64_to_cpu(*key_ptr(bn, n->index)); n 1158 drivers/md/persistent-data/dm-btree.c memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size); n 563 drivers/md/raid10.c int n,f; n 590 drivers/md/raid10.c for (n = 0; n < geo->near_copies; n++) { n 1602 drivers/md/raid10.c int n = conf->copies; n 1605 drivers/md/raid10.c while (n--) { n 409 drivers/media/cec/cec-adap.c struct cec_data *data, *n; n 424 drivers/media/cec/cec-adap.c list_for_each_entry_safe(data, n, &adap->wait_queue, list) { n 39 drivers/media/cec/cec-notifier.c struct cec_notifier *n; n 42 drivers/media/cec/cec-notifier.c list_for_each_entry(n, &cec_notifiers, head) { n 43 drivers/media/cec/cec-notifier.c if (n->hdmi_dev == hdmi_dev && n 45 drivers/media/cec/cec-notifier.c (n->conn_name && !strcmp(n->conn_name, conn_name)))) { n 46 drivers/media/cec/cec-notifier.c kref_get(&n->kref); n 48 drivers/media/cec/cec-notifier.c return n; n 51 drivers/media/cec/cec-notifier.c n = kzalloc(sizeof(*n), GFP_KERNEL); n 52 drivers/media/cec/cec-notifier.c if (!n) n 54 drivers/media/cec/cec-notifier.c n->hdmi_dev = hdmi_dev; n 56 drivers/media/cec/cec-notifier.c n->conn_name = kstrdup(conn_name, GFP_KERNEL); n 57 drivers/media/cec/cec-notifier.c if (!n->conn_name) { n 58 drivers/media/cec/cec-notifier.c kfree(n); n 59 drivers/media/cec/cec-notifier.c n = NULL; n 63 drivers/media/cec/cec-notifier.c n->phys_addr = CEC_PHYS_ADDR_INVALID; n 65 drivers/media/cec/cec-notifier.c mutex_init(&n->lock); n 66 drivers/media/cec/cec-notifier.c kref_init(&n->kref); n 67 drivers/media/cec/cec-notifier.c list_add_tail(&n->head, &cec_notifiers); n 70 drivers/media/cec/cec-notifier.c return n; n 76 drivers/media/cec/cec-notifier.c struct cec_notifier *n = n 79 drivers/media/cec/cec-notifier.c list_del(&n->head); n 80 drivers/media/cec/cec-notifier.c kfree(n->conn_name); n 81 drivers/media/cec/cec-notifier.c kfree(n); n 84 drivers/media/cec/cec-notifier.c void cec_notifier_put(struct cec_notifier *n) n 87 drivers/media/cec/cec-notifier.c kref_put(&n->kref, cec_notifier_release); n 96 drivers/media/cec/cec-notifier.c struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, conn_name); n 98 drivers/media/cec/cec-notifier.c if (!n) n 99 drivers/media/cec/cec-notifier.c return n; n 101 drivers/media/cec/cec-notifier.c mutex_lock(&n->lock); n 102 drivers/media/cec/cec-notifier.c n->phys_addr = CEC_PHYS_ADDR_INVALID; n 104 drivers/media/cec/cec-notifier.c n->conn_info = *conn_info; n 106 drivers/media/cec/cec-notifier.c memset(&n->conn_info, 0, sizeof(n->conn_info)); n 107 drivers/media/cec/cec-notifier.c if (n->cec_adap) { n 108 drivers/media/cec/cec-notifier.c cec_phys_addr_invalidate(n->cec_adap); n 109 drivers/media/cec/cec-notifier.c cec_s_conn_info(n->cec_adap, conn_info); n 111 drivers/media/cec/cec-notifier.c mutex_unlock(&n->lock); n 112 drivers/media/cec/cec-notifier.c return n; n 116 drivers/media/cec/cec-notifier.c void cec_notifier_conn_unregister(struct cec_notifier *n) n 118 drivers/media/cec/cec-notifier.c if (!n) n 121 drivers/media/cec/cec-notifier.c mutex_lock(&n->lock); n 122 drivers/media/cec/cec-notifier.c memset(&n->conn_info, 0, sizeof(n->conn_info)); n 123 drivers/media/cec/cec-notifier.c n->phys_addr = CEC_PHYS_ADDR_INVALID; n 124 drivers/media/cec/cec-notifier.c if (n->cec_adap) { n 125 drivers/media/cec/cec-notifier.c cec_phys_addr_invalidate(n->cec_adap); n 126 drivers/media/cec/cec-notifier.c cec_s_conn_info(n->cec_adap, NULL); n 128 drivers/media/cec/cec-notifier.c mutex_unlock(&n->lock); n 129 drivers/media/cec/cec-notifier.c cec_notifier_put(n); n 137 drivers/media/cec/cec-notifier.c struct cec_notifier *n; n 142 drivers/media/cec/cec-notifier.c n = cec_notifier_get_conn(hdmi_dev, conn_name); n 143 drivers/media/cec/cec-notifier.c if (!n) n 144 drivers/media/cec/cec-notifier.c return n; n 146 drivers/media/cec/cec-notifier.c mutex_lock(&n->lock); n 147 drivers/media/cec/cec-notifier.c n->cec_adap = adap; n 148 drivers/media/cec/cec-notifier.c adap->conn_info = n->conn_info; n 149 drivers/media/cec/cec-notifier.c adap->notifier = n; n 150 drivers/media/cec/cec-notifier.c cec_s_phys_addr(adap, n->phys_addr, false); n 151 drivers/media/cec/cec-notifier.c mutex_unlock(&n->lock); n 152 drivers/media/cec/cec-notifier.c return n; n 156 drivers/media/cec/cec-notifier.c void cec_notifier_cec_adap_unregister(struct cec_notifier *n) n 158 drivers/media/cec/cec-notifier.c if (!n) n 161 drivers/media/cec/cec-notifier.c mutex_lock(&n->lock); n 162 drivers/media/cec/cec-notifier.c n->cec_adap->notifier = NULL; n 163 drivers/media/cec/cec-notifier.c n->cec_adap = NULL; n 164 drivers/media/cec/cec-notifier.c n->callback = NULL; n 165 drivers/media/cec/cec-notifier.c mutex_unlock(&n->lock); n 166 drivers/media/cec/cec-notifier.c cec_notifier_put(n); n 170 drivers/media/cec/cec-notifier.c void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) n 172 drivers/media/cec/cec-notifier.c if (n == NULL) n 175 drivers/media/cec/cec-notifier.c mutex_lock(&n->lock); n 176 drivers/media/cec/cec-notifier.c n->phys_addr = pa; n 177 drivers/media/cec/cec-notifier.c if (n->callback) n 178 drivers/media/cec/cec-notifier.c n->callback(n->cec_adap, n->phys_addr); n 179 drivers/media/cec/cec-notifier.c else if (n->cec_adap) n 180 drivers/media/cec/cec-notifier.c cec_s_phys_addr(n->cec_adap, n->phys_addr, false); n 181 drivers/media/cec/cec-notifier.c mutex_unlock(&n->lock); n 185 drivers/media/cec/cec-notifier.c void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, n 190 drivers/media/cec/cec-notifier.c if (n == NULL) n 196 drivers/media/cec/cec-notifier.c cec_notifier_set_phys_addr(n, pa); n 200 drivers/media/cec/cec-notifier.c void cec_notifier_register(struct cec_notifier *n, n 204 drivers/media/cec/cec-notifier.c kref_get(&n->kref); n 205 drivers/media/cec/cec-notifier.c mutex_lock(&n->lock); n 206 drivers/media/cec/cec-notifier.c n->cec_adap = adap; n 207 drivers/media/cec/cec-notifier.c n->callback = callback; n 208 drivers/media/cec/cec-notifier.c n->callback(adap, n->phys_addr); n 209 drivers/media/cec/cec-notifier.c mutex_unlock(&n->lock); n 213 drivers/media/cec/cec-notifier.c void cec_notifier_unregister(struct cec_notifier *n) n 216 drivers/media/cec/cec-notifier.c if (!n->callback) n 219 drivers/media/cec/cec-notifier.c mutex_lock(&n->lock); n 220 drivers/media/cec/cec-notifier.c n->callback = NULL; n 221 drivers/media/cec/cec-notifier.c n->cec_adap->notifier = NULL; n 222 drivers/media/cec/cec-notifier.c n->cec_adap = NULL; n 223 drivers/media/cec/cec-notifier.c mutex_unlock(&n->lock); n 224 drivers/media/cec/cec-notifier.c cec_notifier_put(n); n 23 drivers/media/common/btcx-risc.h struct v4l2_clip *clips, unsigned int n); n 25 drivers/media/common/btcx-risc.h unsigned int n, int mask); n 37 drivers/media/common/siano/smsdvb-debugfs.c int n = 0; n 48 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 50 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 52 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 54 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 56 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 58 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 60 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 62 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 64 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 66 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 68 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 70 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 72 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 74 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 76 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 78 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 80 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 82 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 84 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 86 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 88 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 90 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 92 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 94 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 96 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 99 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 101 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 103 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 105 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 107 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 109 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 111 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 113 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 115 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 117 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 119 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 121 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 123 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 125 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 127 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 129 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 132 drivers/media/common/siano/smsdvb-debugfs.c debug_data->stats_count = n; n 140 drivers/media/common/siano/smsdvb-debugfs.c int i, n = 0; n 151 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 153 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 156 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 158 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 160 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 162 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 164 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 166 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 168 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 170 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 172 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 174 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 176 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 178 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 180 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 182 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 184 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 186 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 194 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); n 195 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", n 197 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", n 199 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", n 201 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t", n 203 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", n 205 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", n 207 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", n 209 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t", n 211 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t", n 213 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", n 215 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 218 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", n 222 drivers/media/common/siano/smsdvb-debugfs.c debug_data->stats_count = n; n 230 drivers/media/common/siano/smsdvb-debugfs.c int i, n = 0; n 241 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 243 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 246 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 248 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 250 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 252 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 254 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 256 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 258 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 260 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 262 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 264 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 266 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 268 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 270 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 272 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 274 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 276 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "segment_number = %d\t", n 278 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "tune_bw = %d\n", n 286 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i); n 287 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t", n 289 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n", n 291 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t", n 293 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tber_error_count = %-5d\t", n 295 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n", n 297 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t", n 299 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n", n 301 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "\terror_ts_packets = %-5d\t", n 303 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "total_ts_packets = %-5d\t", n 305 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n", n 307 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, n 310 drivers/media/common/siano/smsdvb-debugfs.c n += snprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n", n 315 drivers/media/common/siano/smsdvb-debugfs.c debug_data->stats_count = n; n 203 drivers/media/dvb-core/dvb_demux.c int n = sec->tsfeedp - sec->secbufp; n 210 drivers/media/dvb-core/dvb_demux.c if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { n 214 drivers/media/dvb-core/dvb_demux.c n, sec->tsfeedp); n 215 drivers/media/dvb-core/dvb_demux.c dprintk_sect_loss("pad data: %*ph\n", n, sec->secbuf); n 246 drivers/media/dvb-core/dvb_demux.c u16 limit, seclen, n; n 275 drivers/media/dvb-core/dvb_demux.c for (n = 0; sec->secbufp + 2 < limit; n++) { n 1187 drivers/media/dvb-core/dvb_demux.c struct list_head *pos, *n, *head = &dvbdemux->frontend_list; n 1189 drivers/media/dvb-core/dvb_demux.c list_for_each_safe(pos, n, head) { n 1061 drivers/media/dvb-core/dvb_frontend.c #define _DTV_CMD(n, s, b) \ n 1062 drivers/media/dvb-core/dvb_frontend.c [n] = { \ n 1063 drivers/media/dvb-core/dvb_frontend.c .name = #n, \ n 1064 drivers/media/dvb-core/dvb_frontend.c .cmd = n, \ n 363 drivers/media/dvb-frontends/bcm3510.c static int bcm3510_tuner_cmd(struct bcm3510_state* st,u8 bc, u16 n, u8 a) n 397 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[4].data = n >> 3; n 401 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[5].data = ((n & 0x7) << 5) | (a >> 2); n 455 drivers/media/dvb-frontends/bcm3510.c u16 n; n 485 drivers/media/dvb-frontends/bcm3510.c n = Tfvco1 >> 6; n 488 drivers/media/dvb-frontends/bcm3510.c deb_info(" BC1_2_3_4: %x, N: %x A: %x\n", bc, n, a); n 489 drivers/media/dvb-frontends/bcm3510.c if (n >= 16 && n <= 2047) n 490 drivers/media/dvb-frontends/bcm3510.c return bcm3510_tuner_cmd(st,bc,n,a); n 288 drivers/media/dvb-frontends/cx24113.c static void cx24113_calc_pll_nf(struct cx24113_state *state, u16 *n, s32 *f) n 358 drivers/media/dvb-frontends/cx24113.c *n = (u16) N; n 363 drivers/media/dvb-frontends/cx24113.c static void cx24113_set_nfr(struct cx24113_state *state, u16 n, s32 f, u8 r) n 366 drivers/media/dvb-frontends/cx24113.c cx24113_writereg(state, 0x19, (n >> 1) & 0xff); n 368 drivers/media/dvb-frontends/cx24113.c reg = ((n & 0x1) << 7) | ((f >> 11) & 0x7f); n 382 drivers/media/dvb-frontends/cx24113.c u16 n = 6; n 395 drivers/media/dvb-frontends/cx24113.c cx24113_calc_pll_nf(state, &n, &f); n 396 drivers/media/dvb-frontends/cx24113.c cx24113_set_nfr(state, n, f, state->refdiv); n 62 drivers/media/dvb-frontends/cxd2099.c static int read_block(struct cxd *ci, u8 adr, u8 *data, u16 n) n 71 drivers/media/dvb-frontends/cxd2099.c while (n) { n 72 drivers/media/dvb-frontends/cxd2099.c int len = n; n 80 drivers/media/dvb-frontends/cxd2099.c n -= len; n 91 drivers/media/dvb-frontends/cxd2099.c static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n) n 98 drivers/media/dvb-frontends/cxd2099.c status = regmap_raw_read(ci->regmap, 3, data, n); n 102 drivers/media/dvb-frontends/cxd2099.c static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n) n 111 drivers/media/dvb-frontends/cxd2099.c memcpy(buf, data, n); n 112 drivers/media/dvb-frontends/cxd2099.c status = regmap_raw_write(ci->regmap, 3, buf, n); n 164 drivers/media/dvb-frontends/cxd2099.c static int write_block(struct cxd *ci, u8 adr, u8 *data, u16 n) n 175 drivers/media/dvb-frontends/cxd2099.c while (n) { n 176 drivers/media/dvb-frontends/cxd2099.c int len = n; n 184 drivers/media/dvb-frontends/cxd2099.c n -= len; n 637 drivers/media/dvb-frontends/dib0070.c u16 l, r, *n; n 659 drivers/media/dvb-frontends/dib0070.c n = (u16 *) dib0070_p1f_defaults; n 660 drivers/media/dvb-frontends/dib0070.c l = pgm_read_word(n++); n 662 drivers/media/dvb-frontends/dib0070.c r = pgm_read_word(n++); n 664 drivers/media/dvb-frontends/dib0070.c dib0070_write_reg(state, (u8)r, pgm_read_word(n++)); n 667 drivers/media/dvb-frontends/dib0070.c l = pgm_read_word(n++); n 1479 drivers/media/dvb-frontends/dib0090.c static void dib0090_set_default_config(struct dib0090_state *state, const u16 * n) n 1483 drivers/media/dvb-frontends/dib0090.c l = pgm_read_word(n++); n 1485 drivers/media/dvb-frontends/dib0090.c r = pgm_read_word(n++); n 1487 drivers/media/dvb-frontends/dib0090.c dib0090_write_reg(state, r, pgm_read_word(n++)); n 1490 drivers/media/dvb-frontends/dib0090.c l = pgm_read_word(n++); n 1503 drivers/media/dvb-frontends/dib0090.c u8 c, h, n; n 1518 drivers/media/dvb-frontends/dib0090.c n = 165 - ((cal * 10)>>6) ; n 1519 drivers/media/dvb-frontends/dib0090.c e2 = e4 = (3<<12) | (34<<6) | (n); n 1527 drivers/media/dvb-frontends/dib0090.c n = (e2 >> 12) & 0xf; n 1536 drivers/media/dvb-frontends/dib0090.c if ((n >= POLY_MAX) || (n <= POLY_MIN)) n 1537 drivers/media/dvb-frontends/dib0090.c n = 3; n 1540 drivers/media/dvb-frontends/dib0090.c e2 = (n << 11) | ((h >> 2)<<6) | c; n 134 drivers/media/dvb-frontends/dib7000m.c u16 l = 0, r, *n; n 135 drivers/media/dvb-frontends/dib7000m.c n = buf; n 136 drivers/media/dvb-frontends/dib7000m.c l = *n++; n 138 drivers/media/dvb-frontends/dib7000m.c r = *n++; n 144 drivers/media/dvb-frontends/dib7000m.c dib7000m_write_word(state, r, *n++); n 147 drivers/media/dvb-frontends/dib7000m.c l = *n++; n 158 drivers/media/dvb-frontends/dib7000p.c u16 l = 0, r, *n; n 159 drivers/media/dvb-frontends/dib7000p.c n = buf; n 160 drivers/media/dvb-frontends/dib7000p.c l = *n++; n 162 drivers/media/dvb-frontends/dib7000p.c r = *n++; n 165 drivers/media/dvb-frontends/dib7000p.c dib7000p_write_word(state, r, *n++); n 168 drivers/media/dvb-frontends/dib7000p.c l = *n++; n 1104 drivers/media/dvb-frontends/dib8000.c const u16 *n; n 1105 drivers/media/dvb-frontends/dib8000.c n = dib8000_defaults; n 1106 drivers/media/dvb-frontends/dib8000.c l = *n++; n 1108 drivers/media/dvb-frontends/dib8000.c r = *n++; n 1110 drivers/media/dvb-frontends/dib8000.c dib8000_write_word(state, r, *n++); n 1113 drivers/media/dvb-frontends/dib8000.c l = *n++; n 3837 drivers/media/dvb-frontends/dib8000.c u32 n, s, exp; n 3844 drivers/media/dvb-frontends/dib8000.c n = (val >> 6) & 0xff; n 3848 drivers/media/dvb-frontends/dib8000.c n <<= exp+16; n 3860 drivers/media/dvb-frontends/dib8000.c if (n > 0) { n 3861 drivers/media/dvb-frontends/dib8000.c u32 t = (s/n) << 16; n 3862 drivers/media/dvb-frontends/dib8000.c return t + ((s << 16) - n*t) / n; n 2284 drivers/media/dvb-frontends/dib9000.c u32 n, s, exp; n 2299 drivers/media/dvb-frontends/dib9000.c n = (val >> 4) & 0xff; n 2305 drivers/media/dvb-frontends/dib9000.c n <<= exp + 16; n 2313 drivers/media/dvb-frontends/dib9000.c if (n > 0) { n 2314 drivers/media/dvb-frontends/dib9000.c u32 t = (s / n) << 16; n 2315 drivers/media/dvb-frontends/dib9000.c return t + ((s << 16) - n * t) / n; n 6044 drivers/media/dvb-frontends/drxk_hard.c int status = 0, n = 0; n 6243 drivers/media/dvb-frontends/drxk_hard.c n = 0; n 6245 drivers/media/dvb-frontends/drxk_hard.c state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_A; n 6246 drivers/media/dvb-frontends/drxk_hard.c state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_C; n 6251 drivers/media/dvb-frontends/drxk_hard.c state->frontend.ops.delsys[n++] = SYS_DVBT; n 172 drivers/media/dvb-frontends/lgs8gl5.c int n; n 193 drivers/media/dvb-frontends/lgs8gl5.c for (n = 0; n < 10; n++) { n 195 drivers/media/dvb-frontends/lgs8gl5.c dprintk("Wait for carrier[%d] 0x%02X\n", n, val); n 204 drivers/media/dvb-frontends/lgs8gl5.c for (n = 0; n < 20; n++) { n 206 drivers/media/dvb-frontends/lgs8gl5.c dprintk("Wait for lock[%d] 0x%02X\n", n, val); n 1037 drivers/media/dvb-frontends/mb86a16.c int n; n 1054 drivers/media/dvb-frontends/mb86a16.c for (n = 0; ((n < 3) && (ret == -1)); n++) { n 149 drivers/media/dvb-frontends/mxl5xx_defs.h #define GET_BYTE(x, n) (((x) >> (8*(n))) & 0xFF) n 1152 drivers/media/dvb-frontends/si2165.c int n; n 1252 drivers/media/dvb-frontends/si2165.c n = 0; n 1254 drivers/media/dvb-frontends/si2165.c state->fe.ops.delsys[n++] = SYS_DVBT; n 1259 drivers/media/dvb-frontends/si2165.c state->fe.ops.delsys[n++] = SYS_DVBC_ANNEX_A; n 287 drivers/media/dvb-frontends/sp887x.c static void divide (int n, int d, int *quotient_i, int *quotient_f) n 291 drivers/media/dvb-frontends/sp887x.c r = (n % d) << 8; n 15 drivers/media/dvb-frontends/stb0899_algo.c static inline u32 stb0899_do_div(u64 n, u32 d) n 19 drivers/media/dvb-frontends/stb0899_algo.c do_div(n, d); n 20 drivers/media/dvb-frontends/stb0899_algo.c return n; n 68 drivers/media/dvb-frontends/stb6000.c unsigned int n, m; n 112 drivers/media/dvb-frontends/stb6000.c n = freq_mhz / 8; /* vco=lo*4 */ n 115 drivers/media/dvb-frontends/stb6000.c n = freq_mhz / 16; /* vco=lo*2 */ n 118 drivers/media/dvb-frontends/stb6000.c buf[2] = n >> 1; n 119 drivers/media/dvb-frontends/stb6000.c buf[3] = (unsigned char)(((n & 1) << 7) | n 120 drivers/media/dvb-frontends/stb6000.c (m * freq_mhz - n * 16) | 0x60); n 455 drivers/media/dvb-frontends/stv0367.c u32 m, n, p; n 460 drivers/media/dvb-frontends/stv0367.c n = (u32)stv0367_readbits(state, F367TER_PLL_NDIV); n 461 drivers/media/dvb-frontends/stv0367.c if (n == 0) n 462 drivers/media/dvb-frontends/stv0367.c n = n + 1; n 472 drivers/media/dvb-frontends/stv0367.c mclk_Hz = ((ExtClk_Hz / 2) * n) / (m * (1 << p)); n 475 drivers/media/dvb-frontends/stv0367.c n, m, p, mclk_Hz, ExtClk_Hz); n 1340 drivers/media/dvb-frontends/stv0910.c u32 n, d; n 1342 drivers/media/dvb-frontends/stv0910.c get_bit_error_rate(state, &n, &d); n 1345 drivers/media/dvb-frontends/stv0910.c p->pre_bit_error.stat[0].uvalue = n; n 311 drivers/media/firewire/firedtv-avc.c int i, n, pos = 1; n 313 drivers/media/firewire/firedtv-avc.c for (i = 0, n = 0; i < 16; i++) { n 321 drivers/media/firewire/firedtv-avc.c n++; n 324 drivers/media/firewire/firedtv-avc.c operand[0] = n; n 36 drivers/media/firewire/firedtv-dvb.c int i, n; n 38 drivers/media/firewire/firedtv-dvb.c for (i = 0, n = 0; i < 16; i++) n 40 drivers/media/firewire/firedtv-dvb.c pid[n++] = fdtv->channel_pid[i]; n 41 drivers/media/firewire/firedtv-dvb.c *pidc = n; n 35 drivers/media/i2c/adv748x/adv748x-core.c #define ADV748X_REGMAP_CONF(n) \ n 37 drivers/media/i2c/adv748x/adv748x-core.c .name = n, \ n 90 drivers/media/i2c/adv748x/adv748x.h #define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier) n 146 drivers/media/i2c/aptina-pll.c pll->n = div * mf_low / p1; n 149 drivers/media/i2c/aptina-pll.c dev_dbg(dev, "PLL: N %u M %u P1 %u\n", pll->n, pll->m, pll->p1); n 15 drivers/media/i2c/aptina-pll.h unsigned int n; n 174 drivers/media/i2c/cx25840/cx25840-core.c static int cx23885_s_io_pin_config(struct v4l2_subdev *sd, size_t n, n 186 drivers/media/i2c/cx25840/cx25840-core.c for (i = 0; i < n; i++) { n 385 drivers/media/i2c/cx25840/cx25840-core.c static int cx25840_s_io_pin_config(struct v4l2_subdev *sd, size_t n, n 400 drivers/media/i2c/cx25840/cx25840-core.c for (i = 0; i < n; i++) { n 524 drivers/media/i2c/cx25840/cx25840-core.c static int common_s_io_pin_config(struct v4l2_subdev *sd, size_t n, n 530 drivers/media/i2c/cx25840/cx25840-core.c return cx23885_s_io_pin_config(sd, n, pincfg); n 532 drivers/media/i2c/cx25840/cx25840-core.c return cx25840_s_io_pin_config(sd, n, pincfg); n 228 drivers/media/i2c/cx25840/cx25840-ir.c u64 n; n 235 drivers/media/i2c/cx25840/cx25840-ir.c n = (((u64) count << 2) | 0x3) * (divider + 1) * 1000; /* millicycles */ n 236 drivers/media/i2c/cx25840/cx25840-ir.c rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => ns */ n 238 drivers/media/i2c/cx25840/cx25840-ir.c n++; n 239 drivers/media/i2c/cx25840/cx25840-ir.c return n; n 246 drivers/media/i2c/cx25840/cx25840-ir.c u64 n; n 254 drivers/media/i2c/cx25840/cx25840-ir.c n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */ n 256 drivers/media/i2c/cx25840/cx25840-ir.c rem = do_div(n, d); n 258 drivers/media/i2c/cx25840/cx25840-ir.c n++; n 260 drivers/media/i2c/cx25840/cx25840-ir.c if (n > FIFO_RXTX) n 261 drivers/media/i2c/cx25840/cx25840-ir.c n = FIFO_RXTX; n 262 drivers/media/i2c/cx25840/cx25840-ir.c else if (n == 0) n 263 drivers/media/i2c/cx25840/cx25840-ir.c n = 1; n 264 drivers/media/i2c/cx25840/cx25840-ir.c return (u16) n; n 270 drivers/media/i2c/cx25840/cx25840-ir.c u64 n; n 277 drivers/media/i2c/cx25840/cx25840-ir.c n = (((u64) count << 2) | 0x3) * (divider + 1); /* cycles */ n 278 drivers/media/i2c/cx25840/cx25840-ir.c rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => us */ n 280 drivers/media/i2c/cx25840/cx25840-ir.c n++; n 281 drivers/media/i2c/cx25840/cx25840-ir.c return (unsigned int) n; n 461 drivers/media/i2c/cx25840/cx25840-ir.c u32 n; n 462 drivers/media/i2c/cx25840/cx25840-ir.c n = DIV_ROUND_CLOSEST(duty_cycle * 100, 625); /* 16ths of 100% */ n 463 drivers/media/i2c/cx25840/cx25840-ir.c if (n != 0) n 464 drivers/media/i2c/cx25840/cx25840-ir.c n--; n 465 drivers/media/i2c/cx25840/cx25840-ir.c if (n > 15) n 466 drivers/media/i2c/cx25840/cx25840-ir.c n = 15; n 467 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_CDUTY_REG, n); n 468 drivers/media/i2c/cx25840/cx25840-ir.c return DIV_ROUND_CLOSEST((n + 1) * 100, 16); n 654 drivers/media/i2c/cx25840/cx25840-ir.c unsigned int i, n; n 664 drivers/media/i2c/cx25840/cx25840-ir.c n = count / sizeof(union cx25840_ir_fifo_rec) n 666 drivers/media/i2c/cx25840/cx25840-ir.c if (n == 0) { n 671 drivers/media/i2c/cx25840/cx25840-ir.c n = kfifo_out_locked(&ir_state->rx_kfifo, buf, n, n 674 drivers/media/i2c/cx25840/cx25840-ir.c n /= sizeof(union cx25840_ir_fifo_rec); n 675 drivers/media/i2c/cx25840/cx25840-ir.c *num = n * sizeof(union cx25840_ir_fifo_rec); n 677 drivers/media/i2c/cx25840/cx25840-ir.c for (p = (union cx25840_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) { n 861 drivers/media/i2c/cx25840/cx25840-ir.c unsigned int n; n 866 drivers/media/i2c/cx25840/cx25840-ir.c n = CX25840_IR_TX_KFIFO_SIZE - kfifo_len(ir_state->tx_kfifo); n 867 drivers/media/i2c/cx25840/cx25840-ir.c n = min(n, (unsigned int) count); n 868 drivers/media/i2c/cx25840/cx25840-ir.c n /= sizeof(u32); n 873 drivers/media/i2c/cx25840/cx25840-ir.c for (i = 0; i < n; ) { n 874 drivers/media/i2c/cx25840/cx25840-ir.c for (j = 0; j < FIFO_TX_DEPTH / 2 && i < n; j++) { n 887 drivers/media/i2c/cx25840/cx25840-ir.c *num = n * sizeof(u32); n 76 drivers/media/i2c/ml86v7667.c #define ADC2_CLAMP_VOLTAGE(n) ((n & 7) << 1) n 63 drivers/media/i2c/mt9m001.c int n) n 66 drivers/media/i2c/mt9m001.c for (i = 0; i < n; i++) n 284 drivers/media/i2c/mt9m032.c ((pll.n - 1) & MT9M032_PLL_CONFIG1_PREDIV_MASK)); n 79 drivers/media/i2c/mt9p031.c #define MT9P031_PIXEL_CLOCK_SHIFT(n) ((n) << 8) n 80 drivers/media/i2c/mt9p031.c #define MT9P031_PIXEL_CLOCK_DIVIDE(n) ((n) << 0) n 278 drivers/media/i2c/mt9p031.c (mt9p031->pll.m << 8) | (mt9p031->pll.n - 1)); n 277 drivers/media/i2c/mt9t112.c int m, n, p1, p2, p3, p4, p5, p6, p7; n 283 drivers/media/i2c/mt9t112.c mt9t112_reg_read(n, client, 0x0012); n 284 drivers/media/i2c/mt9t112.c p1 = n & 0x000f; n 285 drivers/media/i2c/mt9t112.c n = n >> 4; n 286 drivers/media/i2c/mt9t112.c p2 = n & 0x000f; n 287 drivers/media/i2c/mt9t112.c n = n >> 4; n 288 drivers/media/i2c/mt9t112.c p3 = n & 0x000f; n 290 drivers/media/i2c/mt9t112.c mt9t112_reg_read(n, client, 0x002a); n 291 drivers/media/i2c/mt9t112.c p4 = n & 0x000f; n 292 drivers/media/i2c/mt9t112.c n = n >> 4; n 293 drivers/media/i2c/mt9t112.c p5 = n & 0x000f; n 294 drivers/media/i2c/mt9t112.c n = n >> 4; n 295 drivers/media/i2c/mt9t112.c p6 = n & 0x000f; n 297 drivers/media/i2c/mt9t112.c mt9t112_reg_read(n, client, 0x002c); n 298 drivers/media/i2c/mt9t112.c p7 = n & 0x000f; n 300 drivers/media/i2c/mt9t112.c mt9t112_reg_read(n, client, 0x0010); n 301 drivers/media/i2c/mt9t112.c m = n & 0x00ff; n 302 drivers/media/i2c/mt9t112.c n = (n >> 8) & 0x003f; n 307 drivers/media/i2c/mt9t112.c vco = 2 * m * ext / (n + 1); n 334 drivers/media/i2c/mt9t112.c clk = ext / (n + 1); n 371 drivers/media/i2c/mt9t112.c u8 m, u8 n, u8 p1, u8 p2, u8 p3, u8 p4, n 378 drivers/media/i2c/mt9t112.c val = (n << 8) | (m << 0); n 409 drivers/media/i2c/mt9t112.c priv->info->divider.m, priv->info->divider.n, n 39 drivers/media/i2c/noon010pc30.c #define VDO_CTL_REG(n) (0x0010 + (n)) n 55 drivers/media/i2c/noon010pc30.c #define VS_CTL_REG(n) (0x00A1 + (n)) n 57 drivers/media/i2c/noon010pc30.c #define ISP_CTL_REG(n) (0x0110 + (n)) n 69 drivers/media/i2c/noon010pc30.c #define CMC_COEF_REG(n) (0x0138 + (n)) n 70 drivers/media/i2c/noon010pc30.c #define CMC_OFS_REG(n) (0x0141 + (n)) n 73 drivers/media/i2c/noon010pc30.c #define GMA_COEF_REG(n) (0x0161 + (n)) n 84 drivers/media/i2c/noon010pc30.c #define AE_CTL_REG(n) (0x0310 + (n)) n 88 drivers/media/i2c/noon010pc30.c #define AE_YTH_REG(n) (0x031D + (n)) n 99 drivers/media/i2c/noon010pc30.c #define AWB_CTL_REG(n) (0x0410 + (n)) n 102 drivers/media/i2c/noon010pc30.c #define BGAIN_PAR_REG(n) (0x044F + (n)) n 576 drivers/media/i2c/ov2640.c #define OV2640_SIZE(n, w, h, r) \ n 577 drivers/media/i2c/ov2640.c {.name = n, .width = w , .height = h, .regs = r } n 1006 drivers/media/i2c/ov7251.c unsigned int i, n = 0; n 1021 drivers/media/i2c/ov7251.c n = i; n 1026 drivers/media/i2c/ov7251.c return &ov7251_mode_info_data[n]; n 276 drivers/media/i2c/ov772x.c #define CLKRC_DIV(n) ((n) - 1) n 121 drivers/media/i2c/rj54n1cb0c.c int n) n 124 drivers/media/i2c/rj54n1cb0c.c for (i = 0; i < n; i++) n 476 drivers/media/i2c/rj54n1cb0c.c const struct rj54n1_reg_val *rv, const int n) n 480 drivers/media/i2c/rj54n1cb0c.c for (i = 0; i < n; i++) { n 66 drivers/media/i2c/s5k4ecgx.c #define PREG(n, x) ((n) * 0x30 + (x)) n 67 drivers/media/i2c/s5k4ecgx.c #define REG_P_OUT_WIDTH(n) PREG(n, 0x700002a6) n 68 drivers/media/i2c/s5k4ecgx.c #define REG_P_OUT_HEIGHT(n) PREG(n, 0x700002a8) n 69 drivers/media/i2c/s5k4ecgx.c #define REG_P_FMT(n) PREG(n, 0x700002aa) n 70 drivers/media/i2c/s5k4ecgx.c #define REG_P_PVI_MASK(n) PREG(n, 0x700002b4) n 71 drivers/media/i2c/s5k4ecgx.c #define REG_P_FR_TIME_TYPE(n) PREG(n, 0x700002be) n 75 drivers/media/i2c/s5k4ecgx.c #define REG_P_FR_TIME_Q_TYPE(n) PREG(n, 0x700002c0) n 81 drivers/media/i2c/s5k4ecgx.c #define REG_P_MAX_FR_TIME(n) PREG(n, 0x700002c2) n 82 drivers/media/i2c/s5k4ecgx.c #define REG_P_MIN_FR_TIME(n) PREG(n, 0x700002c4) n 84 drivers/media/i2c/s5k4ecgx.c #define REG_P_PREV_MIRROR(n) PREG(n, 0x700002d0) n 85 drivers/media/i2c/s5k4ecgx.c #define REG_P_CAP_MIRROR(n) PREG(n, 0x700002d2) n 97 drivers/media/i2c/s5k4ecgx.c #define REG_USER_SHARPNESS(n) (0x70000a28 + (n) * 0xb6) n 101 drivers/media/i2c/s5k5baf.c #define REG_I_OPCLK_4KHZ(n) ((n) * 6 + 0x01cc) n 102 drivers/media/i2c/s5k5baf.c #define REG_I_MIN_OUTRATE_4KHZ(n) ((n) * 6 + 0x01ce) n 103 drivers/media/i2c/s5k5baf.c #define REG_I_MAX_OUTRATE_4KHZ(n) ((n) * 6 + 0x01d0) n 143 drivers/media/i2c/s5k5baf.c #define PREG(n, x) ((n) * 0x26 + x) n 144 drivers/media/i2c/s5k5baf.c #define REG_P_OUT_WIDTH(n) PREG(n, 0x0242) n 145 drivers/media/i2c/s5k5baf.c #define REG_P_OUT_HEIGHT(n) PREG(n, 0x0244) n 146 drivers/media/i2c/s5k5baf.c #define REG_P_FMT(n) PREG(n, 0x0246) n 147 drivers/media/i2c/s5k5baf.c #define REG_P_MAX_OUT_RATE(n) PREG(n, 0x0248) n 148 drivers/media/i2c/s5k5baf.c #define REG_P_MIN_OUT_RATE(n) PREG(n, 0x024a) n 149 drivers/media/i2c/s5k5baf.c #define REG_P_PVI_MASK(n) PREG(n, 0x024c) n 151 drivers/media/i2c/s5k5baf.c #define REG_P_CLK_INDEX(n) PREG(n, 0x024e) n 154 drivers/media/i2c/s5k5baf.c #define REG_P_FR_RATE_TYPE(n) PREG(n, 0x0250) n 158 drivers/media/i2c/s5k5baf.c #define REG_P_FR_RATE_Q_TYPE(n) PREG(n, 0x0252) n 163 drivers/media/i2c/s5k5baf.c #define REG_P_MAX_FR_TIME(n) PREG(n, 0x0254) n 164 drivers/media/i2c/s5k5baf.c #define REG_P_MIN_FR_TIME(n) PREG(n, 0x0256) n 168 drivers/media/i2c/s5k5baf.c #define REG_P_SATURATION(n) PREG(n, 0x0258) n 169 drivers/media/i2c/s5k5baf.c #define REG_P_SHARP_BLUR(n) PREG(n, 0x025a) n 170 drivers/media/i2c/s5k5baf.c #define REG_P_GLAMOUR(n) PREG(n, 0x025c) n 171 drivers/media/i2c/s5k5baf.c #define REG_P_COLORTEMP(n) PREG(n, 0x025e) n 172 drivers/media/i2c/s5k5baf.c #define REG_P_GAMMA_INDEX(n) PREG(n, 0x0260) n 173 drivers/media/i2c/s5k5baf.c #define REG_P_PREV_MIRROR(n) PREG(n, 0x0262) n 174 drivers/media/i2c/s5k5baf.c #define REG_P_CAP_MIRROR(n) PREG(n, 0x0264) n 175 drivers/media/i2c/s5k5baf.c #define REG_P_CAP_ROTATION(n) PREG(n, 0x0266) n 220 drivers/media/i2c/s5k5baf.c #define REG_ARR_CCM(n) (0x2800 + 36 * (n)) n 494 drivers/media/i2c/s5k5baf.c int n = min_t(int, count, ARRAY_SIZE(buf) - 1); n 497 drivers/media/i2c/s5k5baf.c for (i = 1; i <= n; ++i) n 508 drivers/media/i2c/s5k5baf.c count -= n; n 794 drivers/media/i2c/s5k5baf.c int n; n 800 drivers/media/i2c/s5k5baf.c for (n = 5; n > 0; --n) { n 849 drivers/media/i2c/s5k5baf.c const struct v4l2_rect *n, n 852 drivers/media/i2c/s5k5baf.c r->left = v->left * n->width / d->width; n 853 drivers/media/i2c/s5k5baf.c r->top = v->top * n->height / d->height; n 854 drivers/media/i2c/s5k5baf.c r->width = v->width * n->width / d->width; n 855 drivers/media/i2c/s5k5baf.c r->height = v->height * n->height / d->height; n 72 drivers/media/i2c/s5k6aa.c #define REG_I_OPCLK_4KHZ(n) ((n) * 6 + 0x01cc) n 73 drivers/media/i2c/s5k6aa.c #define REG_I_MIN_OUTRATE_4KHZ(n) ((n) * 6 + 0x01ce) n 74 drivers/media/i2c/s5k6aa.c #define REG_I_MAX_OUTRATE_4KHZ(n) ((n) * 6 + 0x01d0) n 102 drivers/media/i2c/s5k6aa.c #define PREG(n, x) ((n) * 0x26 + x) n 103 drivers/media/i2c/s5k6aa.c #define REG_P_OUT_WIDTH(n) PREG(n, 0x0242) n 104 drivers/media/i2c/s5k6aa.c #define REG_P_OUT_HEIGHT(n) PREG(n, 0x0244) n 105 drivers/media/i2c/s5k6aa.c #define REG_P_FMT(n) PREG(n, 0x0246) n 106 drivers/media/i2c/s5k6aa.c #define REG_P_MAX_OUT_RATE(n) PREG(n, 0x0248) n 107 drivers/media/i2c/s5k6aa.c #define REG_P_MIN_OUT_RATE(n) PREG(n, 0x024a) n 108 drivers/media/i2c/s5k6aa.c #define REG_P_PVI_MASK(n) PREG(n, 0x024c) n 109 drivers/media/i2c/s5k6aa.c #define REG_P_CLK_INDEX(n) PREG(n, 0x024e) n 110 drivers/media/i2c/s5k6aa.c #define REG_P_FR_RATE_TYPE(n) PREG(n, 0x0250) n 114 drivers/media/i2c/s5k6aa.c #define REG_P_FR_RATE_Q_TYPE(n) PREG(n, 0x0252) n 118 drivers/media/i2c/s5k6aa.c #define REG_P_MAX_FR_TIME(n) PREG(n, 0x0254) n 119 drivers/media/i2c/s5k6aa.c #define REG_P_MIN_FR_TIME(n) PREG(n, 0x0256) n 126 drivers/media/i2c/s5k6aa.c #define REG_P_COLORTEMP(n) PREG(n, 0x025e) n 127 drivers/media/i2c/s5k6aa.c #define REG_P_PREV_MIRROR(n) PREG(n, 0x0262) n 647 drivers/media/i2c/smiapp/smiapp-core.c unsigned int n) n 654 drivers/media/i2c/smiapp/smiapp-core.c for (i = 0; i < n; i++) { n 750 drivers/media/i2c/smiapp/smiapp-core.c unsigned int type, n; n 776 drivers/media/i2c/smiapp/smiapp-core.c n = SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL_N; n 779 drivers/media/i2c/smiapp/smiapp-core.c n = SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED_N; n 788 drivers/media/i2c/smiapp/smiapp-core.c for (i = 0; i < n; i++) { n 37 drivers/media/i2c/smiapp/smiapp-reg-defs.h #define SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(n) SMIAPP_REG_MK_U16(0x0042 + ((n) << 1)) /* 0 <= n <= 14 */ n 38 drivers/media/i2c/smiapp/smiapp-reg-defs.h #define SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(n) SMIAPP_REG_MK_U32(0x0060 + ((n) << 2)) /* 0 <= n <= 7 */ n 50 drivers/media/i2c/smiapp/smiapp-reg-defs.h #define SMIAPP_REG_U16_DATA_FORMAT_DESCRIPTOR(n) SMIAPP_REG_MK_U16(0x00c2 + ((n) << 1)) n 459 drivers/media/i2c/smiapp/smiapp-reg-defs.h #define SMIAPP_REG_U8_BINNING_TYPE_n(n) SMIAPP_REG_MK_U8(0x1713 + (n)) /* 1 <= n <= 237 */ n 58 drivers/media/i2c/sr030pc30.c #define ISP_CTL_REG(n) (0x1010 + (n)) n 92 drivers/media/i2c/sr030pc30.c #define CMC_COEF_REG(n) (0x1530 + (n)) n 94 drivers/media/i2c/sr030pc30.c #define CMC_OFS_REG(n) (0x1540 + (n)) n 98 drivers/media/i2c/sr030pc30.c #define GMA_COEF_REG(n) (0x1630 + (n)) n 103 drivers/media/i2c/sr030pc30.c #define AE_FINE_CTL_REG(n) (0x2028 + (n)) n 113 drivers/media/i2c/tc358743.c static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) n 129 drivers/media/i2c/tc358743.c .len = n, n 141 drivers/media/i2c/tc358743.c static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n) n 149 drivers/media/i2c/tc358743.c if ((2 + n) > I2C_MAX_XFER_SIZE) { n 150 drivers/media/i2c/tc358743.c n = I2C_MAX_XFER_SIZE - 2; n 152 drivers/media/i2c/tc358743.c reg, 2 + n); n 157 drivers/media/i2c/tc358743.c msg.len = 2 + n; n 163 drivers/media/i2c/tc358743.c for (i = 0; i < n; i++) n 176 drivers/media/i2c/tc358743.c switch (n) { n 191 drivers/media/i2c/tc358743.c n, reg); n 195 drivers/media/i2c/tc358743.c static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n) n 199 drivers/media/i2c/tc358743.c i2c_rd(sd, reg, (u8 __force *)&val, n); n 204 drivers/media/i2c/tc358743.c static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n) n 208 drivers/media/i2c/tc358743.c i2c_wr(sd, reg, (u8 __force *)&raw, n); n 451 drivers/media/i2c/tc358743_regs.h #define SET_CSQ_CNT_LEVEL(n) (n & MASK_CSQ_CNT) n 517 drivers/media/i2c/tc358743_regs.h #define SET_AUTO_P3_RESET_FRAMES(n) (n & MASK_AUTO_P3_RESET) n 190 drivers/media/i2c/video-i2c.c unsigned int n, idx; n 192 drivers/media/i2c/video-i2c.c for (n = 0; n < data->chip->num_frame_intervals - 1; n++) { n 194 drivers/media/i2c/video-i2c.c data->chip->frame_intervals[n])) n 198 drivers/media/i2c/video-i2c.c idx = data->chip->num_frame_intervals - n - 1; n 85 drivers/media/pci/bt8xx/btcx-risc.c struct v4l2_clip *clips, unsigned int n) n 89 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = 0; n 90 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = 0; n 91 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = -win->left; n 92 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = win->height; n 93 drivers/media/pci/bt8xx/btcx-risc.c n++; n 97 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = swidth - win->left; n 98 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = 0; n 99 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = win->width - clips[n].c.left; n 100 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = win->height; n 101 drivers/media/pci/bt8xx/btcx-risc.c n++; n 105 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = 0; n 106 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = 0; n 107 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = win->width; n 108 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = -win->top; n 109 drivers/media/pci/bt8xx/btcx-risc.c n++; n 113 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = 0; n 114 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = sheight - win->top; n 115 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = win->width; n 116 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = win->height - clips[n].c.top; n 117 drivers/media/pci/bt8xx/btcx-risc.c n++; n 119 drivers/media/pci/bt8xx/btcx-risc.c return n; n 123 drivers/media/pci/bt8xx/btcx-risc.c btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int mask) n 140 drivers/media/pci/bt8xx/btcx-risc.c for (i = 0; i < n; i++) { n 157 drivers/media/pci/bt8xx/btcx-risc.c int i,j,n; n 162 drivers/media/pci/bt8xx/btcx-risc.c for (n = 0, j = 0; j <= i; j++) { n 165 drivers/media/pci/bt8xx/btcx-risc.c n++; n 168 drivers/media/pci/bt8xx/btcx-risc.c if (0 == n) n 21 drivers/media/pci/bt8xx/btcx-risc.h struct v4l2_clip *clips, unsigned int n); n 23 drivers/media/pci/bt8xx/btcx-risc.h unsigned int n, int mask); n 3866 drivers/media/pci/bt8xx/bttv-cards.c u32 n; n 3877 drivers/media/pci/bt8xx/bttv-cards.c for (n = 0; n < microlen; n++) { n 3878 drivers/media/pci/bt8xx/bttv-cards.c bits = micro[n]; n 2127 drivers/media/pci/bt8xx/bttv-driver.c int n,size,retval = 0; n 2141 drivers/media/pci/bt8xx/bttv-driver.c n = win->clipcount; n 2142 drivers/media/pci/bt8xx/bttv-driver.c size = sizeof(*clips)*(n+4); n 2146 drivers/media/pci/bt8xx/bttv-driver.c if (n > 0) { n 2147 drivers/media/pci/bt8xx/bttv-driver.c if (copy_from_user(clips,win->clips,sizeof(struct v4l2_clip)*n)) { n 2155 drivers/media/pci/bt8xx/bttv-driver.c n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height, n 2156 drivers/media/pci/bt8xx/bttv-driver.c &win->w, clips, n); n 2157 drivers/media/pci/bt8xx/bttv-driver.c btcx_sort_clips(clips,n); n 2163 drivers/media/pci/bt8xx/bttv-driver.c btcx_align(&win->w, clips, n, 3); n 2166 drivers/media/pci/bt8xx/bttv-driver.c btcx_align(&win->w, clips, n, 1); n 2177 drivers/media/pci/bt8xx/bttv-driver.c fh->ov.nclips = n; n 3368 drivers/media/pci/bt8xx/bttv-driver.c unsigned int i,j,n; n 3372 drivers/media/pci/bt8xx/bttv-driver.c for (i = 0; i < (risc->size >> 2); i += n) { n 3376 drivers/media/pci/bt8xx/bttv-driver.c n = bttv_risc_decode(le32_to_cpu(risc->cpu[i])); n 3377 drivers/media/pci/bt8xx/bttv-driver.c for (j = 1; j < n; j++) n 190 drivers/media/pci/bt8xx/dvb-bt8xx.c int i, a, n, pump; n 217 drivers/media/pci/bt8xx/dvb-bt8xx.c n=((i<=2?2:1)*freq*10L)/(XTAL/100); n 218 drivers/media/pci/bt8xx/dvb-bt8xx.c a=n%32; n/=32; if(a==0) n--; n 222 drivers/media/pci/bt8xx/dvb-bt8xx.c ((n&0x1ff)<<(5+11))| n 226 drivers/media/pci/bt8xx/dvb-bt8xx.c dprintk("cx24108 debug: pump=%d, n=%d, a=%d\n", pump, n, a); n 106 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_SYS_CTRL_VIDEO_RX_RESETN_BIT(n) (4 + 4 * (n)) n 107 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_SYS_CTRL_NRESET_TO_HDMI_BIT(n) (5 + 4 * (n)) n 108 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_SYS_CTRL_HPD_TO_CONNECTOR_BIT(n) (6 + 4 * (n)) n 109 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_SYS_CTRL_AUDIO_IPP_RESETN_BIT(n) (7 + 4 * (n)) n 92 drivers/media/pci/cx18/cx18-mailbox.c static char *u32arr2hex(u32 data[], int n, char *buf) n 97 drivers/media/pci/cx18/cx18-mailbox.c for (i = 0, p = buf; i < n; i++, p += 11) { n 546 drivers/media/pci/cx23885/cx23885-core.c unsigned int i, j, n; n 560 drivers/media/pci/cx23885/cx23885-core.c for (i = 0; i < (64 >> 2); i += n) { n 566 drivers/media/pci/cx23885/cx23885-core.c n = cx23885_risc_decode(risc); n 567 drivers/media/pci/cx23885/cx23885-core.c for (j = 1; j < n; j++) { n 592 drivers/media/pci/cx23885/cx23885-core.c unsigned int i, j, n; n 596 drivers/media/pci/cx23885/cx23885-core.c for (i = 0; i < (risc->size >> 2); i += n) { n 598 drivers/media/pci/cx23885/cx23885-core.c n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i])); n 599 drivers/media/pci/cx23885/cx23885-core.c for (j = 1; j < n; j++) n 723 drivers/media/pci/cx23885/cx23885-video.c unsigned int n; n 726 drivers/media/pci/cx23885/cx23885-video.c n = i->index; n 727 drivers/media/pci/cx23885/cx23885-video.c if (n >= MAX_CX23885_INPUT) n 730 drivers/media/pci/cx23885/cx23885-video.c if (0 == INPUT(n)->type) n 733 drivers/media/pci/cx23885/cx23885-video.c i->index = n; n 735 drivers/media/pci/cx23885/cx23885-video.c strscpy(i->name, iname[INPUT(n)->type], sizeof(i->name)); n 737 drivers/media/pci/cx23885/cx23885-video.c if ((CX23885_VMUX_TELEVISION == INPUT(n)->type) || n 738 drivers/media/pci/cx23885/cx23885-video.c (CX23885_VMUX_CABLE == INPUT(n)->type)) { n 746 drivers/media/pci/cx23885/cx23885-video.c if (dev->input == n) { n 823 drivers/media/pci/cx23885/cx23885-video.c unsigned int n; n 826 drivers/media/pci/cx23885/cx23885-video.c n = i->index; n 827 drivers/media/pci/cx23885/cx23885-video.c if (n >= 3) n 831 drivers/media/pci/cx23885/cx23885-video.c i->index = n; n 832 drivers/media/pci/cx23885/cx23885-video.c strscpy(i->name, iname[n], sizeof(i->name)); n 267 drivers/media/pci/cx23885/cx23888-ir.c u64 n; n 274 drivers/media/pci/cx23885/cx23888-ir.c n = (((u64) count << 2) | 0x3) * (divider + 1) * 1000; /* millicycles */ n 275 drivers/media/pci/cx23885/cx23888-ir.c rem = do_div(n, CX23888_IR_REFCLK_FREQ / 1000000); /* / MHz => ns */ n 277 drivers/media/pci/cx23885/cx23888-ir.c n++; n 278 drivers/media/pci/cx23885/cx23888-ir.c return n; n 283 drivers/media/pci/cx23885/cx23888-ir.c u64 n; n 290 drivers/media/pci/cx23885/cx23888-ir.c n = (((u64) count << 2) | 0x3) * (divider + 1); /* cycles */ n 291 drivers/media/pci/cx23885/cx23888-ir.c rem = do_div(n, CX23888_IR_REFCLK_FREQ / 1000000); /* / MHz => us */ n 293 drivers/media/pci/cx23885/cx23888-ir.c n++; n 294 drivers/media/pci/cx23885/cx23888-ir.c return (unsigned int) n; n 481 drivers/media/pci/cx23885/cx23888-ir.c u32 n; n 482 drivers/media/pci/cx23885/cx23888-ir.c n = DIV_ROUND_CLOSEST(duty_cycle * 100, 625); /* 16ths of 100% */ n 483 drivers/media/pci/cx23885/cx23888-ir.c if (n != 0) n 484 drivers/media/pci/cx23885/cx23888-ir.c n--; n 485 drivers/media/pci/cx23885/cx23888-ir.c if (n > 15) n 486 drivers/media/pci/cx23885/cx23888-ir.c n = 15; n 487 drivers/media/pci/cx23885/cx23888-ir.c cx23888_ir_write4(dev, CX23888_IR_CDUTY_REG, n); n 488 drivers/media/pci/cx23885/cx23888-ir.c return DIV_ROUND_CLOSEST((n + 1) * 100, 16); n 656 drivers/media/pci/cx23885/cx23888-ir.c unsigned int i, n; n 660 drivers/media/pci/cx23885/cx23888-ir.c n = count / sizeof(union cx23888_ir_fifo_rec) n 662 drivers/media/pci/cx23885/cx23888-ir.c if (n == 0) { n 667 drivers/media/pci/cx23885/cx23888-ir.c n = kfifo_out_locked(&state->rx_kfifo, buf, n, &state->rx_kfifo_lock); n 669 drivers/media/pci/cx23885/cx23888-ir.c n /= sizeof(union cx23888_ir_fifo_rec); n 670 drivers/media/pci/cx23885/cx23888-ir.c *num = n * sizeof(union cx23888_ir_fifo_rec); n 672 drivers/media/pci/cx23885/cx23888-ir.c for (p = (union cx23888_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) { n 578 drivers/media/pci/cx25821/cx25821-core.c unsigned int i, j, n; n 592 drivers/media/pci/cx25821/cx25821-core.c for (i = 0; i < (64 >> 2); i += n) { n 598 drivers/media/pci/cx25821/cx25821-core.c n = cx25821_risc_decode(risc); n 599 drivers/media/pci/cx25821/cx25821-core.c for (j = 1; j < n; j++) { n 641 drivers/media/pci/cx25821/cx25821-core.c unsigned int i, j, n; n 658 drivers/media/pci/cx25821/cx25821-core.c for (i = 0; i < (64 >> 2); i += n) { n 664 drivers/media/pci/cx25821/cx25821-core.c n = cx25821_risc_decode(risc); n 666 drivers/media/pci/cx25821/cx25821-core.c for (j = 1; j < n; j++) { n 695 drivers/media/pci/cx25821/cx25821-core.c n = cx_read(risc + i * 4); n 696 drivers/media/pci/cx25821/cx25821-core.c pr_cont("0x%x ", n); n 436 drivers/media/pci/cx88/cx88-core.c unsigned int i, j, n; n 442 drivers/media/pci/cx88/cx88-core.c for (n = 1, i = 0; i < 4; i++) { n 445 drivers/media/pci/cx88/cx88-core.c if (--n) n 446 drivers/media/pci/cx88/cx88-core.c pr_cont("0x%08x [ arg #%d ]\n", risc, n); n 448 drivers/media/pci/cx88/cx88-core.c n = cx88_risc_decode(risc); n 450 drivers/media/pci/cx88/cx88-core.c for (i = 0; i < 16; i += n) { n 453 drivers/media/pci/cx88/cx88-core.c n = cx88_risc_decode(risc); n 454 drivers/media/pci/cx88/cx88-core.c for (j = 1; j < n; j++) { n 858 drivers/media/pci/cx88/cx88-video.c unsigned int n = i->index; n 860 drivers/media/pci/cx88/cx88-video.c if (n >= 4) n 862 drivers/media/pci/cx88/cx88-video.c if (!INPUT(n).type) n 865 drivers/media/pci/cx88/cx88-video.c strscpy(i->name, iname[INPUT(n).type], sizeof(i->name)); n 866 drivers/media/pci/cx88/cx88-video.c if ((INPUT(n).type == CX88_VMUX_TELEVISION) || n 867 drivers/media/pci/cx88/cx88-video.c (INPUT(n).type == CX88_VMUX_CABLE)) n 32 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_PIPE_BASE(n) ((n) * 0x0400) /* n = 0..3 */ n 178 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_PBM_FOPN_ABORT(n) (0x1 << 8 * (n)) n 179 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_PBM_FOPN_FORCE_ABORT(n) (0x2 << 8 * (n)) n 180 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_PBM_FOPN_FRAMEOPEN(n) (0x8 << 8 * (n)) n 210 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_CDMABA(n) (0x1500 + 0x10 * (n)) /* n = 0..19 */ n 211 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_CDMARI(n) (0x1504 + 0x10 * (n)) n 214 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_CDMAC0(n) (0x1508 + 0x10 * (n)) n 224 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_CDMAC1(n) (0x150c + 0x10 * (n)) n 228 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_PXM_PXF_FMT_CFG0(n) (0x1700 + 0x30 * (n)) n 249 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_ECC_RE(n) (0x01 << (8 * (n))) n 250 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_DPHY_NR(n) (0x02 << (8 * (n))) n 251 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_ECC_NR(n) (0x04 << (8 * (n))) n 252 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_CRCERR(n) (0x08 << (8 * (n))) n 253 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_INTERFRAMEDATA(n) (0x10 << (8 * (n))) n 254 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_PKT2SHORT(n) (0x20 << (8 * (n))) n 255 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_PKT2LONG(n) (0x40 << (8 * (n))) n 256 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_INT_EXT_IE_IRQ(n) (0x80 << (8 * (n))) n 257 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_PXM_FRF_CFG(n) (0x1720 + 0x30 * (n)) n 270 drivers/media/pci/intel/ipu3/ipu3-cio2.h #define CIO2_REG_PXM_SID2BID0(n) (0x1724 + 0x30 * (n)) n 450 drivers/media/pci/meye/meye.c int n = 10; n 451 drivers/media/pci/meye/meye.c while (--n && mchip_read(reg) != v) n 453 drivers/media/pci/meye/meye.c return n; n 72 drivers/media/pci/meye/meye.h #define MCHIP_MM_FIR(n) (0x0c+(n)*4) /* Frame info 0-3 */ n 995 drivers/media/pci/ngene/ngene-core.c u32 n = pRingBuffer->NumBuffers; n 1001 drivers/media/pci/ngene/ngene-core.c for (i = 0; i < n; i++) { n 34 drivers/media/pci/pluto2/pluto2.c #define REG_PIDn(n) ((n) << 2) /* PID n pattern registers */ n 358 drivers/media/pci/saa7134/saa7134-core.c struct list_head *pos, *n; n 363 drivers/media/pci/saa7134/saa7134-core.c list_for_each_safe(pos, n, &q->queue) { n 30 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_RS_BA1(n) ((0x200 >> 2) + 4*n) n 31 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_RS_BA2(n) ((0x204 >> 2) + 4*n) n 32 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_RS_PITCH(n) ((0x208 >> 2) + 4*n) n 33 drivers/media/pci/saa7134/saa7134-reg.h #define SAA7134_RS_CONTROL(n) ((0x20c >> 2) + 4*n) n 1417 drivers/media/pci/saa7134/saa7134-video.c unsigned int n; n 1419 drivers/media/pci/saa7134/saa7134-video.c n = i->index; n 1420 drivers/media/pci/saa7134/saa7134-video.c if (n >= SAA7134_INPUT_MAX) n 1424 drivers/media/pci/saa7134/saa7134-video.c i->index = n; n 1425 drivers/media/pci/saa7134/saa7134-video.c strscpy(i->name, saa7134_input_name[card_in(dev, n).type], n 1427 drivers/media/pci/saa7134/saa7134-video.c switch (card_in(dev, n).type) { n 1436 drivers/media/pci/saa7134/saa7134-video.c if (n == dev->ctl_input) { n 1684 drivers/media/pci/saa7134/saa7134-video.c int n; n 1689 drivers/media/pci/saa7134/saa7134-video.c for (n = 0; n < SAA7134_INPUT_MAX; n++) { n 1690 drivers/media/pci/saa7134/saa7134-video.c if (card_in(dev, n).type == SAA7134_INPUT_TV || n 1691 drivers/media/pci/saa7134/saa7134-video.c card_in(dev, n).type == SAA7134_INPUT_TV_MONO) n 1694 drivers/media/pci/saa7134/saa7134-video.c if (n == SAA7134_INPUT_MAX) n 1696 drivers/media/pci/saa7134/saa7134-video.c if (card_in(dev, n).type != SAA7134_NO_INPUT) { n 421 drivers/media/pci/saa7134/saa7134.h #define card_in(dev,n) (saa7134_boards[dev->board].inputs[n]) n 223 drivers/media/pci/saa7164/saa7164-buffer.c struct list_head *c, *n; n 250 drivers/media/pci/saa7164/saa7164-buffer.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 252 drivers/media/pci/saa7164/saa7164-core.c struct list_head *c, *n; n 257 drivers/media/pci/saa7164/saa7164-core.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 575 drivers/media/pci/saa7164/saa7164-core.c struct list_head *c, *n; n 591 drivers/media/pci/saa7164/saa7164-core.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 477 drivers/media/pci/saa7164/saa7164-dvb.c struct list_head *c, *n; n 487 drivers/media/pci/saa7164/saa7164-dvb.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 61 drivers/media/pci/saa7164/saa7164-encoder.c struct list_head *c, *n, *p, *q, *l, *v; n 70 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 246 drivers/media/pci/saa7164/saa7164-encoder.c int n; n 258 drivers/media/pci/saa7164/saa7164-encoder.c for (n = 0; n < ARRAY_SIZE(saa7164_tvnorms); n++) n 259 drivers/media/pci/saa7164/saa7164-encoder.c i->std |= saa7164_tvnorms[n].id; n 592 drivers/media/pci/saa7164/saa7164-encoder.c struct list_head *c, *n; n 608 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 614 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(c, n, &port->list_buf_used.list) { n 30 drivers/media/pci/saa7164/saa7164-vbi.c struct list_head *c, *n, *p, *q, *l, *v; n 39 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 276 drivers/media/pci/saa7164/saa7164-vbi.c struct list_head *c, *n; n 292 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(c, n, &port->dmaqueue.list) { n 298 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(c, n, &port->list_buf_used.list) { n 167 drivers/media/pci/solo6x10/solo6x10-disp.c const int n = 64, size = n * sizeof(*buf); n 174 drivers/media/pci/solo6x10/solo6x10-disp.c for (i = 0; i < n; i++) n 34 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DMA_CTRL_REFRESH_CYCLE(n) ((n)<<8) n 36 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DMA_CTRL_SDRAM_SIZE(n) ((n)<<6) n 41 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DMA_CTRL_LATENCY(n) ((n)<<0) n 49 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_SELECT(n) ((n)<<20) n 50 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN1415_DELAY(n) ((n)<<14) n 51 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN1213_DELAY(n) ((n)<<12) n 52 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN1011_DELAY(n) ((n)<<10) n 53 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN0809_DELAY(n) ((n)<<8) n 54 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN0607_DELAY(n) ((n)<<6) n 55 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN0405_DELAY(n) ((n)<<4) n 56 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN0203_DELAY(n) ((n)<<2) n 57 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VCLK_VIN0001_DELAY(n) ((n)<<0) n 61 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_IRQ_P2M(n) BIT((n) + 17) n 73 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_IRQ_UART(n) BIT((n) + 4) n 105 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_CONFIG(n) (0x0080 + ((n)*0x20)) n 106 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_DMA_INTERVAL(n) ((n)<<6)/* N*32 clocks */ n 115 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_DES_ADR(n) (0x0084 + ((n)*0x20)) n 117 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_DESC_ID(n) (0x0088 + ((n)*0x20)) n 118 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_UPDATE_ID(n) ((n)<<0) n 120 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_STATUS(n) (0x008C + ((n)*0x20)) n 124 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_CONTROL(n) (0x0090 + ((n)*0x20)) n 125 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_PCI_INC(n) ((n)<<20) n 126 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_REPEAT(n) ((n)<<10) n 128 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_BURST_SIZE(n) ((n)<<7) n 136 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_ALPHA_MODE(n) ((n)<<4) n 142 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_EXT_CFG(n) (0x0094 + ((n)*0x20)) n 143 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_EXT_INC(n) ((n)<<20) n 144 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_COPY_SIZE(n) ((n)<<0) n 146 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_TAR_ADR(n) (0x0098 + ((n)*0x20)) n 148 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_P2M_EXT_ADR(n) (0x009C + ((n)*0x20)) n 158 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_FD_SEL_MASK(n) ((n)<<16) n 159 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_PROG_MASK(n) ((n)<<0) n 167 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_FI_INV_DISP_LIVE(n) ((n)<<8) n 168 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_FI_INV_DISP_OUT(n) ((n)<<7) n 169 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DISP_SYNC_FI(n) ((n)<<6) n 170 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_PIP_PAGE_ADD(n) ((n)<<3) n 171 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_NORMAL_PAGE_ADD(n) ((n)<<0) n 177 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_H_START(n) ((n)<<21) n 178 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_V_START(n) ((n)<<11) n 179 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_V_STOP(n) ((n)<<0) n 195 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_PB_HSTART(n) ((n)<<12) n 196 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_PB_HSTOP(n) ((n)<<0) n 198 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_PB_VSTART(n) ((n)<<12) n 199 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_PB_VSTOP(n) ((n)<<0) n 210 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_WIN_CHANNEL(n) ((n)<<28) n 212 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_WIN_PIP(n) ((n)<<27) n 213 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_WIN_SCALE(n) ((n)<<24) n 229 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_MOTION_FRAME_COUNT(n) ((n)<<24) n 230 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_MOTION_SAMPLE_LENGTH(n) ((n)<<16) n 233 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_MOTION_SAMPLE_COUNT(n) ((n)<<0) n 236 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VI_MOTION_CNT(n) ((n)<<0) n 265 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_H_BLANK(n) ((n)<<22) n 266 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_H_START(n) ((n)<<11) n 267 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_H_STOP(n) ((n)<<0) n 270 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_V_BLANK(n) ((n)<<22) n 271 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_V_START(n) ((n)<<11) n 272 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_V_STOP(n) ((n)<<0) n 278 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_H_LEN(n) ((n)<<11) n 279 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_V_LEN(n) ((n)<<0) n 283 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_DISP_ERASE_COUNT(n) ((n&0xf)<<24) n 286 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_DISP_BASE(n) (((n)>>16) & 0xffff) n 308 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_DEINTERLACE_THRESHOLD(n) ((n)<<8) n 309 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_DEINTERLACE_EDGE_VALUE(n) ((n)<<0) n 316 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_BORDER_X(n) (0x0340+((n)*4)) n 317 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_BORDER_Y(n) (0x0354+((n)*4)) n 327 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_RECTANGLE_CTRL(n) (0x0368+((n)*12)) n 328 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_RECTANGLE_START(n) (0x036c+((n)*12)) n 329 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_RECTANGLE_STOP(n) (0x0370+((n)*12)) n 341 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_OSG_ALPHA_RATE(n) ((n)<<22) n 342 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VO_OSG_ALPHA_BG_RATE(n) ((n)<<16) n 354 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_CAP_MAX_PAGE(n) ((n)<<16) n 355 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_CAP_BASE_ADDR(n) ((n)<<0) n 357 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_CAP_PROG_BANDWIDTH(n) ((n)<<8) n 358 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_CAP_MAX_BANDWIDTH(n) ((n)<<0) n 365 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DIM_V_MB_NUM_FRAME(n) ((n)<<16) n 366 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DIM_V_MB_NUM_FIELD(n) ((n)<<8) n 367 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DIM_H_MB_NUM(n) ((n)<<0) n 380 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_INTR_CTRL(n) ((n)<<24) n 381 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_BLOCK_SIZE(n) ((n)<<16) n 382 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_BLOCK_BASE(n) ((n)<<0) n 385 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_BYTE_ALIGN(n) ((n)<<24) n 387 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_MOTION_MODE(n) ((n)<<16) n 388 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_MOTION_BASE(n) ((n)<<0) n 389 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_MPEG_SIZE_H(n) ((n)<<28) /* 6110 Only */ n 390 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_JPEG_SIZE_H(n) ((n)<<20) /* 6110 Only */ n 401 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_COMP_ATTR_FCODE(n) ((n)<<27) n 402 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_COMP_TIME_INC(n) ((n)<<25) n 403 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_COMP_TIME_WIDTH(n) ((n)<<21) n 404 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_DCT_INTERVAL(n) ((n)<<16) n 407 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_STATE(n) (0x0640+((n)*4)) n 424 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_OSD_H_OFFSET(n) ((n & 0x7f)<<7) n 425 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_OSD_V_OFFSET(n) (n & 0x7f) n 436 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_MPEG4_QUE(n) (0x0A00+((n)*8)) n 437 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VE_JPEG_QUE(n) (0x0A04+((n)*8)) n 452 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_CFG_TIME_WIDTH(n) ((n)<<8) n 453 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_CFG_DCT_INTERVAL(n) ((n)<<0) n 458 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_DEINTERLACE_THRESHOLD(n) ((n)<<8) n 459 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_DEINTERLACE_EDGE_VALUE(n) ((n)<<0) n 465 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_MAX_ITEM(n) ((n)<<0) n 476 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_CHANNEL(n) ((n)<<24) n 477 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_SIZE(n) ((n)<<0) n 480 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_SRC_SCALE(n) ((n)<<28) n 481 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_WINDOW(n) ((n)<<24) n 483 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_H_BLOCK(n) ((n)<<8) n 484 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_V_BLOCK(n) ((n)<<0) n 488 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_REF_BASE(n) (((n)>>16)&0xffff) n 491 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_DISP_SCALE(n) ((n)<<28) n 495 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_OFFSET_X(n) ((n)<<12) n 496 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_OFFSET_Y(n) ((n)<<0) n 499 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_DEC_WR_PAGE(n) ((n)<<8) n 500 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_IDX_DISP_RD_PAGE(n) ((n)<<0) n 502 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_VD_WR_PAGE(n) (0x03F0 + ((n) * 4)) n 517 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_IIC_PRESCALE(n) ((n)<<0) n 525 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_IIC_CH_SET(n) ((n)<<5) n 538 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_UART_CONTROL(n) (0x0BA0 + ((n)*0x20)) n 539 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_UART_CLK_DIV(n) ((n)<<24) n 573 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_UART_STATUS(n) (0x0BA4 + ((n)*0x20)) n 586 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_UART_TX_DATA(n) (0x0BA8 + ((n)*0x20)) n 588 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_UART_RX_DATA(n) (0x0BAC + ((n)*0x20)) n 602 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_I2S_MULTI(n) ((n)<<24) n 604 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_DEC_9TO0_VOL(n) ((n)<<20) n 606 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_DEC_19TO10_VOL(n) ((n)<<16) n 607 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_MODE(n) ((n)<<0) n 611 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_BITRATE(n) ((n)<<16) n 612 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_CLK_DIV(n) ((n)<<0) n 614 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_FDMA_INTERVAL(n) ((n)<<19) n 615 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_INTR_ORDER(n) ((n)<<16) n 616 drivers/media/pci/solo6x10/solo6x10-regs.h #define SOLO_AUDIO_FDMA_BASE(n) ((n)<<0) n 505 drivers/media/pci/solo6x10/solo6x10-tw28.c int start, int n) n 507 drivers/media/pci/solo6x10/solo6x10-tw28.c for (; start < n; start++, vals++) { n 19 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW_CHIP_OFFSET_ADDR(n) (TW_BASE_ADDR + (n)) n 23 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW_HUE_ADDR(n) (0x07 | ((n) << 4)) n 24 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW_SATURATION_ADDR(n) (0x08 | ((n) << 4)) n 25 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW_CONTRAST_ADDR(n) (0x09 | ((n) << 4)) n 26 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW_BRIGHTNESS_ADDR(n) (0x0a | ((n) << 4)) n 28 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW_AUDIO_INPUT_GAIN_ADDR(n) (0x60 + ((n > 1) ? 1 : 0)) n 32 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW286x_HUE_ADDR(n) (0x06 | ((n) << 4)) n 33 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW286x_SATURATIONU_ADDR(n) (0x04 | ((n) << 4)) n 34 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW286x_SATURATIONV_ADDR(n) (0x05 | ((n) << 4)) n 35 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW286x_CONTRAST_ADDR(n) (0x02 | ((n) << 4)) n 36 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW286x_BRIGHTNESS_ADDR(n) (0x01 | ((n) << 4)) n 37 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW286x_SHARPNESS(n) (0x03 | ((n) << 4)) n 39 drivers/media/pci/solo6x10/solo6x10-tw28.h #define TW286x_AUDIO_INPUT_GAIN_ADDR(n) (0xD0 + ((n > 1) ? 1 : 0)) n 1032 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c static inline int calc_interval(u8 fps, u32 n, u32 d) n 1034 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c if (!n || !d) n 1037 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c return n; n 1038 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c n *= fps; n 1039 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c return min(15U, n / d + (n % d >= (fps >> 1))); n 472 drivers/media/pci/ttpci/av7110_av.c unsigned long todo = count, n; n 489 drivers/media/pci/ttpci/av7110_av.c n = todo; n 490 drivers/media/pci/ttpci/av7110_av.c if (n > IPACKS * 2) n 491 drivers/media/pci/ttpci/av7110_av.c n = IPACKS * 2; n 492 drivers/media/pci/ttpci/av7110_av.c if (copy_from_user(av7110->kbuf[type], buf, n)) n 494 drivers/media/pci/ttpci/av7110_av.c av7110_ipack_instant_repack(av7110->kbuf[type], n, n 496 drivers/media/pci/ttpci/av7110_av.c todo -= n; n 497 drivers/media/pci/ttpci/av7110_av.c buf += n; n 505 drivers/media/pci/ttpci/av7110_av.c unsigned long todo = count, n; n 522 drivers/media/pci/ttpci/av7110_av.c n = todo; n 523 drivers/media/pci/ttpci/av7110_av.c if (n > IPACKS * 2) n 524 drivers/media/pci/ttpci/av7110_av.c n = IPACKS * 2; n 525 drivers/media/pci/ttpci/av7110_av.c av7110_ipack_instant_repack(buf, n, &av7110->ipack[type]); n 526 drivers/media/pci/ttpci/av7110_av.c todo -= n; n 527 drivers/media/pci/ttpci/av7110_av.c buf += n; n 535 drivers/media/pci/ttpci/av7110_av.c unsigned long todo = count, n; n 551 drivers/media/pci/ttpci/av7110_av.c n = todo; n 552 drivers/media/pci/ttpci/av7110_av.c if (n > IPACKS * 2) n 553 drivers/media/pci/ttpci/av7110_av.c n = IPACKS * 2; n 554 drivers/media/pci/ttpci/av7110_av.c if (copy_from_user(av7110->kbuf[type], buf, n)) n 556 drivers/media/pci/ttpci/av7110_av.c av7110_ipack_instant_repack(av7110->kbuf[type], n, n 558 drivers/media/pci/ttpci/av7110_av.c todo -= n; n 559 drivers/media/pci/ttpci/av7110_av.c buf += n; n 1024 drivers/media/pci/ttpci/av7110_av.c unsigned i, n; n 1067 drivers/media/pci/ttpci/av7110_av.c n = MIN_IFRAME / len + 1; n 1072 drivers/media/pci/ttpci/av7110_av.c for (i = 0; i < n; i++) n 901 drivers/media/pci/tw5864/tw5864-reg.h #define TW5864_INTR_GPIO(n) (1 << (4 + n)) n 664 drivers/media/pci/tw68/tw68-video.c unsigned int n; n 666 drivers/media/pci/tw68/tw68-video.c n = i->index; n 667 drivers/media/pci/tw68/tw68-video.c if (n >= TW68_INPUT_MAX) n 669 drivers/media/pci/tw68/tw68-video.c i->index = n; n 671 drivers/media/pci/tw68/tw68-video.c snprintf(i->name, sizeof(i->name), "Composite %d", n); n 674 drivers/media/pci/tw68/tw68-video.c if (n == dev->input) { n 69 drivers/media/platform/atmel/atmel-isc-regs.h #define ISC_CLK(n) BIT(n) n 73 drivers/media/platform/atmel/atmel-isc-regs.h #define ISC_CLKCFG_DIV_SHIFT(n) ((n)*16) n 74 drivers/media/platform/atmel/atmel-isc-regs.h #define ISC_CLKCFG_DIV_MASK(n) GENMASK(((n)*16 + 7), (n)*16) n 75 drivers/media/platform/atmel/atmel-isc-regs.h #define ISC_CLKCFG_SEL_SHIFT(n) ((n)*16 + 8) n 76 drivers/media/platform/atmel/atmel-isc-regs.h #define ISC_CLKCFG_SEL_MASK(n) GENMASK(((n)*17 + 8), ((n)*16 + 8)) n 134 drivers/media/platform/atmel/atmel-isi.c #define notifier_to_isi(n) container_of(n, struct atmel_isi, notifier) n 33 drivers/media/platform/cadence/cdns-csi2rx.c #define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100) n 35 drivers/media/platform/cadence/cdns-csi2rx.c #define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000) n 38 drivers/media/platform/cadence/cdns-csi2rx.c #define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008) n 40 drivers/media/platform/cadence/cdns-csi2rx.c #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16) n 42 drivers/media/platform/cadence/cdns-csi2rx.c #define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c) n 34 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DPHY_CFG_LANE_RESET(n) BIT((n) + 12) n 40 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DPHY_CFG_LANE_ENABLE(n) BIT(n) n 43 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DPHY_CLK_WAKEUP_ULPS_CYCLES(n) ((n) & 0xffff) n 45 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DT_CFG_REG(n) (0x80 + (n) * 8) n 46 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DT_CFG_DT(n) (((n) & 0x3f) << 2) n 48 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DT_FORMAT_REG(n) (0x84 + (n) * 8) n 49 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DT_FORMAT_BYTES_PER_LINE(n) (((n) & 0xffff) << 16) n 50 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_DT_FORMAT_MAX_LINE_NUM(n) ((n) & 0xffff) n 52 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_STREAM_IF_CFG_REG(n) (0x100 + (n) * 4) n 53 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_STREAM_IF_CFG_FILL_LEVEL(n) ((n) & 0x1f) n 64 drivers/media/platform/cadence/cdns-csi2tx.c #define CSI2TX_V2_DPHY_CFG_LANE_ENABLE(n) BIT(n) n 186 drivers/media/platform/coda/coda-bit.c u32 n; n 196 drivers/media/platform/coda/coda-bit.c n = kfifo_in(&ctx->bitstream_fifo, buf, size); n 199 drivers/media/platform/coda/coda-bit.c return (n < size) ? -ENOSPC : 0; n 224 drivers/media/platform/coda/coda-bit.c u32 n = kfifo_in(&ctx->bitstream_fifo, buf, size); n 226 drivers/media/platform/coda/coda-bit.c return (n < size) ? -ENOSPC : 0; n 142 drivers/media/platform/exynos-gsc/gsc-regs.h #define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4) n 147 drivers/media/platform/exynos-gsc/gsc-regs.h #define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4) n 152 drivers/media/platform/exynos-gsc/gsc-regs.h #define GSC_IN_BASE_ADDR_CR(n) (0xb0 + (n) * 0x4) n 157 drivers/media/platform/exynos-gsc/gsc-regs.h #define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4) n 162 drivers/media/platform/exynos-gsc/gsc-regs.h #define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4) n 167 drivers/media/platform/exynos-gsc/gsc-regs.h #define GSC_OUT_BASE_ADDR_CR(n) (0x1b0 + (n) * 0x4) n 72 drivers/media/platform/exynos4-is/fimc-reg.h #define FIMC_REG_CIOYSA(n) (0x18 + (n) * 4) n 73 drivers/media/platform/exynos4-is/fimc-reg.h #define FIMC_REG_CIOCBSA(n) (0x28 + (n) * 4) n 74 drivers/media/platform/exynos4-is/fimc-reg.h #define FIMC_REG_CIOCRSA(n) (0x38 + (n) * 4) n 197 drivers/media/platform/exynos4-is/fimc-reg.h #define FIMC_REG_CIIYSA(n) (0xd4 + (n) * 0x70) n 198 drivers/media/platform/exynos4-is/fimc-reg.h #define FIMC_REG_CIICBSA(n) (0xd8 + (n) * 0x70) n 199 drivers/media/platform/exynos4-is/fimc-reg.h #define FIMC_REG_CIICRSA(n) (0xdc + (n) * 0x70) n 168 drivers/media/platform/exynos4-is/media-dev.h static inline struct fimc_md *notifier_to_fimc_md(struct v4l2_async_notifier *n) n 170 drivers/media/platform/exynos4-is/media-dev.h return container_of(n, struct fimc_md, subdev_notifier); n 263 drivers/media/platform/marvell-ccic/mcam-core.h #define CSI2_C0_ACT_LANE(n) ((n-1) << 1) n 97 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c static int mtk_jpeg_enum_fmt(struct mtk_jpeg_fmt *mtk_jpeg_formats, int n, n 102 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c for (i = 0; i < n; ++i) { n 110 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c if (i >= n) n 370 drivers/media/platform/omap3isp/ispccdc.c struct ispccdc_lsc_config_req *req, *n; n 374 drivers/media/platform/omap3isp/ispccdc.c list_for_each_entry_safe(req, n, queue, list) { n 699 drivers/media/platform/omap3isp/ispcsi2.c unsigned int n = ctx->ctxnum; n 702 drivers/media/platform/omap3isp/ispcsi2.c status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n)); n 703 drivers/media/platform/omap3isp/ispcsi2.c isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n)); n 723 drivers/media/platform/omap3isp/ispcsi2.c csi2_ctx_enable(isp, csi2, n, 1); n 1213 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_IRQSTATUS_CONTEXT(n) BIT(n) n 1256 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n) (3 + ((n) * 4)) n 1257 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POL_MASK(n) \ n 1258 drivers/media/platform/omap3isp/ispreg.h (0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n)) n 1259 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POL_PN(n) \ n 1260 drivers/media/platform/omap3isp/ispreg.h (0x0 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n)) n 1261 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POL_NP(n) \ n 1262 drivers/media/platform/omap3isp/ispreg.h (0x1 << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(n)) n 1264 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n) ((n) * 4) n 1265 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_MASK(n) \ n 1266 drivers/media/platform/omap3isp/ispreg.h (0x7 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)) n 1267 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_NC(n) \ n 1268 drivers/media/platform/omap3isp/ispreg.h (0x0 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)) n 1269 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_1(n) \ n 1270 drivers/media/platform/omap3isp/ispreg.h (0x1 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)) n 1271 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_2(n) \ n 1272 drivers/media/platform/omap3isp/ispreg.h (0x2 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)) n 1273 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_3(n) \ n 1274 drivers/media/platform/omap3isp/ispreg.h (0x3 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)) n 1275 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_4(n) \ n 1276 drivers/media/platform/omap3isp/ispreg.h (0x4 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)) n 1277 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_PHY_CFG_DATA_POSITION_5(n) \ n 1278 drivers/media/platform/omap3isp/ispreg.h (0x5 << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(n)) n 1363 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_TIMING_FORCE_RX_MODE_IO(n) (1 << ((16 * ((n) - 1)) + 15)) n 1364 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_TIMING_STOP_STATE_X16_IO(n) (1 << ((16 * ((n) - 1)) + 14)) n 1365 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_TIMING_STOP_STATE_X4_IO(n) (1 << ((16 * ((n) - 1)) + 13)) n 1366 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n) (16 * ((n) - 1)) n 1367 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(n) \ n 1368 drivers/media/platform/omap3isp/ispreg.h (0x1fff << ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(n)) n 1370 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_CTRL1(n) ((0x070) + 0x20 * (n)) n 1381 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_CTRL2(n) ((0x074) + 0x20 * (n)) n 1396 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_DAT_OFST(n) ((0x078) + 0x20 * (n)) n 1401 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_DAT_PING_ADDR(n) ((0x07c) + 0x20 * (n)) n 1402 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_DAT_PONG_ADDR(n) ((0x080) + 0x20 * (n)) n 1403 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_IRQENABLE(n) ((0x084) + 0x20 * (n)) n 1413 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_IRQSTATUS(n) ((0x088) + 0x20 * (n)) n 1423 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_CTRL3(n) ((0x08c) + 0x20 * (n)) n 1429 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_TRANSCODEH(n) (0x000 + 0x8 * (n)) n 1436 drivers/media/platform/omap3isp/ispreg.h #define ISPCSI2_CTX_TRANSCODEV(n) (0x004 + 0x8 * (n)) n 591 drivers/media/platform/pxa_camera.c int n) n 595 drivers/media/platform/pxa_camera.c for (i = 0; i < n; i++) n 33 drivers/media/platform/qcom/camss/camss-csid.c #define CAMSS_CSID_CID_LUT_VC_n(v, n) \ n 34 drivers/media/platform/qcom/camss/camss-csid.c (((v) == CAMSS_8x16 ? 0x010 : 0x014) + 0x4 * (n)) n 35 drivers/media/platform/qcom/camss/camss-csid.c #define CAMSS_CSID_CID_n_CFG(v, n) \ n 36 drivers/media/platform/qcom/camss/camss-csid.c (((v) == CAMSS_8x16 ? 0x020 : 0x024) + 0x4 * (n)) n 55 drivers/media/platform/qcom/camss/camss-csid.c #define CAMSS_CSID_TG_DT_n_CGG_0(v, n) \ n 56 drivers/media/platform/qcom/camss/camss-csid.c (((v) == CAMSS_8x16 ? 0x0ac : 0x0b4) + 0xc * (n)) n 57 drivers/media/platform/qcom/camss/camss-csid.c #define CAMSS_CSID_TG_DT_n_CGG_1(v, n) \ n 58 drivers/media/platform/qcom/camss/camss-csid.c (((v) == CAMSS_8x16 ? 0x0b0 : 0x0b8) + 0xc * (n)) n 59 drivers/media/platform/qcom/camss/camss-csid.c #define CAMSS_CSID_TG_DT_n_CGG_2(v, n) \ n 60 drivers/media/platform/qcom/camss/camss-csid.c (((v) == CAMSS_8x16 ? 0x0b4 : 0x0bc) + 0xc * (n)) n 17 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) n 18 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) n 23 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c #define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n)) n 24 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c #define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n)) n 25 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c #define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n)) n 17 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) n 19 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG2(n) (0x004 + 0x100 * (n)) n 21 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG3(n) (0x008 + 0x100 * (n)) n 22 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG4(n) (0x00c + 0x100 * (n)) n 24 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG5(n) (0x010 + 0x100 * (n)) n 27 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_TEST_IMP(n) (0x01c + 0x100 * (n)) n 29 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_MISC1(n) (0x028 + 0x100 * (n)) n 31 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG6(n) (0x02c + 0x100 * (n)) n 33 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG7(n) (0x030 + 0x100 * (n)) n 35 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG8(n) (0x034 + 0x100 * (n)) n 38 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CFG9(n) (0x038 + 0x100 * (n)) n 40 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n)) n 43 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n)) n 46 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c #define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n)) n 76 drivers/media/platform/qcom/camss/camss-ispif.c #define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) \ n 77 drivers/media/platform/qcom/camss/camss-ispif.c (0x254 + 0x200 * (m) + 0x4 * (n)) n 78 drivers/media/platform/qcom/camss/camss-ispif.c #define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \ n 79 drivers/media/platform/qcom/camss/camss-ispif.c (0x264 + 0x200 * (m) + 0x4 * (n)) n 81 drivers/media/platform/qcom/camss/camss-ispif.c #define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(m, n) \ n 82 drivers/media/platform/qcom/camss/camss-ispif.c (0x270 + 0x200 * (m) + 0x4 * (n)) n 83 drivers/media/platform/qcom/camss/camss-ispif.c #define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(m, n) \ n 84 drivers/media/platform/qcom/camss/camss-ispif.c (0x27c + 0x200 * (m) + 0x4 * (n)) n 87 drivers/media/platform/qcom/camss/camss-ispif.c #define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \ n 88 drivers/media/platform/qcom/camss/camss-ispif.c (0x2c0 + 0x200 * (m) + 0x4 * (n)) n 89 drivers/media/platform/qcom/camss/camss-ispif.c #define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \ n 90 drivers/media/platform/qcom/camss/camss-ispif.c (0x2d0 + 0x200 * (m) + 0x4 * (n)) n 48 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5) n 49 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ n 50 drivers/media/platform/qcom/camss/camss-vfe-4-1.c ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) n 51 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) n 52 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) n 58 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9) n 59 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29) n 66 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5) n 67 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ n 68 drivers/media/platform/qcom/camss/camss-vfe-4-1.c ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) n 69 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) n 70 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) n 75 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29) n 94 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n)) n 97 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n)) n 98 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n)) n 99 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n)) n 103 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n)) n 105 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n)) n 106 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n)) n 107 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ n 108 drivers/media/platform/qcom/camss/camss-vfe-4-1.c (0x088 + 0x24 * (n)) n 109 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ n 110 drivers/media/platform/qcom/camss/camss-vfe-4-1.c (0x08c + 0x24 * (n)) n 154 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n)) n 155 drivers/media/platform/qcom/camss/camss-vfe-4-1.c #define VFE_0_REG_UPDATE_line_n(n) \ n 156 drivers/media/platform/qcom/camss/camss-vfe-4-1.c ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) n 53 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5) n 54 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ n 55 drivers/media/platform/qcom/camss/camss-vfe-4-7.c ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) n 56 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) n 57 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) n 63 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9) n 64 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29) n 71 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5) n 72 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ n 73 drivers/media/platform/qcom/camss/camss-vfe-4-7.c ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) n 74 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) n 75 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) n 80 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29) n 102 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n)) n 104 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n)) n 105 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n)) n 106 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n)) n 110 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n)) n 112 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n)) n 113 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n)) n 114 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ n 115 drivers/media/platform/qcom/camss/camss-vfe-4-7.c (0x0c4 + 0x2c * (n)) n 116 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ n 117 drivers/media/platform/qcom/camss/camss-vfe-4-7.c (0x0c8 + 0x2c * (n)) n 181 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n)) n 182 drivers/media/platform/qcom/camss/camss-vfe-4-7.c #define VFE_0_REG_UPDATE_line_n(n) \ n 183 drivers/media/platform/qcom/camss/camss-vfe-4-7.c ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) n 107 drivers/media/platform/qcom/venus/helpers.c struct intbuf *buf, *n; n 109 drivers/media/platform/qcom/venus/helpers.c list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) { n 245 drivers/media/platform/qcom/venus/helpers.c struct intbuf *buf, *n; n 248 drivers/media/platform/qcom/venus/helpers.c list_for_each_entry_safe(buf, n, &inst->internalbufs, list) { n 320 drivers/media/platform/qcom/venus/helpers.c struct intbuf *buf, *n; n 323 drivers/media/platform/qcom/venus/helpers.c list_for_each_entry_safe(buf, n, &inst->internalbufs, list) { n 581 drivers/media/platform/qcom/venus/helpers.c struct venus_buffer *buf, *n; n 588 drivers/media/platform/qcom/venus/helpers.c list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) { n 958 drivers/media/platform/qcom/venus/helpers.c struct venus_buffer *buf, *n; n 969 drivers/media/platform/qcom/venus/helpers.c list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) { n 1161 drivers/media/platform/qcom/venus/helpers.c struct v4l2_m2m_buffer *buf, *n; n 1164 drivers/media/platform/qcom/venus/helpers.c v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) { n 1179 drivers/media/platform/qcom/venus/helpers.c struct v4l2_m2m_buffer *buf, *n; n 1182 drivers/media/platform/qcom/venus/helpers.c v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { n 1233 drivers/media/platform/qcom/venus/helpers.c struct v4l2_m2m_buffer *buf, *n; n 1238 drivers/media/platform/qcom/venus/helpers.c v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) { n 1244 drivers/media/platform/qcom/venus/helpers.c v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { n 61 drivers/media/platform/rcar-vin/rcar-csi2.c #define VCDT_SEL_VC(n) (((n) & 0x3) << 8) n 63 drivers/media/platform/rcar-vin/rcar-csi2.c #define VCDT_SEL_DT(n) (((n) & 0x3f) << 0) n 70 drivers/media/platform/rcar-vin/rcar-csi2.c #define FLD_FLD_NUM(n) (((n) & 0xff) << 16) n 71 drivers/media/platform/rcar-vin/rcar-csi2.c #define FLD_DET_SEL(n) (((n) & 0x3) << 4) n 117 drivers/media/platform/rcar-vin/rcar-csi2.c #define LSWAP_L3SEL(n) (((n) & 0x3) << 6) n 118 drivers/media/platform/rcar-vin/rcar-csi2.c #define LSWAP_L2SEL(n) (((n) & 0x3) << 4) n 119 drivers/media/platform/rcar-vin/rcar-csi2.c #define LSWAP_L1SEL(n) (((n) & 0x3) << 2) n 120 drivers/media/platform/rcar-vin/rcar-csi2.c #define LSWAP_L0SEL(n) (((n) & 0x3) << 0) n 125 drivers/media/platform/rcar-vin/rcar-csi2.c #define PHTW_TESTDIN_DATA(n) (((n & 0xff)) << 16) n 127 drivers/media/platform/rcar-vin/rcar-csi2.c #define PHTW_TESTDIN_CODE(n) ((n & 0xff)) n 201 drivers/media/platform/rcar-vin/rcar-csi2.c #define PHYPLL_HSFREQRANGE(n) ((n) << 16) n 309 drivers/media/platform/rcar-vin/rcar-csi2.c #define CSI0CLKFREQRANGE(n) ((n & 0x3f) << 16) n 381 drivers/media/platform/rcar-vin/rcar-csi2.c static inline struct rcar_csi2 *notifier_to_csi2(struct v4l2_async_notifier *n) n 383 drivers/media/platform/rcar-vin/rcar-csi2.c return container_of(n, struct rcar_csi2, notifier); n 114 drivers/media/platform/rcar-vin/rcar-dma.c #define VNDMR_A8BIT(n) (((n) & 0xff) << 24) n 127 drivers/media/platform/rcar-vin/rcar-dma.c #define VNDMR2_VLV(n) ((n & 0xf) << 12) n 132 drivers/media/platform/rcar-vin/rcar-dma.c #define VNCSI_IFMD_CSI_CHSEL(n) (((n) & 0xf) << 0) n 108 drivers/media/platform/rcar_drif.c #define RCAR_DRIF_MDR_GRPCNT(n) (((n) - 1) << 30) n 109 drivers/media/platform/rcar_drif.c #define RCAR_DRIF_MDR_BITLEN(n) (((n) - 1) << 24) n 110 drivers/media/platform/rcar_drif.c #define RCAR_DRIF_MDR_WDCNT(n) (((n) - 1) << 16) n 185 drivers/media/platform/rcar_jpu.c #define JCQTBL(n) (0x10000 + (n) * 0x40) /* quantization tables regs */ n 186 drivers/media/platform/rcar_jpu.c #define JCHTBD(n) (0x10100 + (n) * 0x100) /* Huffman table DC regs */ n 187 drivers/media/platform/rcar_jpu.c #define JCHTBA(n) (0x10120 + (n) * 0x100) /* Huffman table AC regs */ n 71 drivers/media/platform/s3c-camif/camif-regs.h #define S3C_CAMIF_REG_CIYSA(id, n) (0x18 + (id) * 0x54 + (n) * 4) n 73 drivers/media/platform/s3c-camif/camif-regs.h #define S3C_CAMIF_REG_CICBSA(id, n) (0x28 + (id) * 0x54 + (n) * 4) n 75 drivers/media/platform/s3c-camif/camif-regs.h #define S3C_CAMIF_REG_CICRSA(id, n) (0x38 + (id) * 0x54 + (n) * 4) n 25 drivers/media/platform/s5p-g2d/g2d-hw.c u32 n; n 30 drivers/media/platform/s5p-g2d/g2d-hw.c n = f->o_height & 0xFFF; n 31 drivers/media/platform/s5p-g2d/g2d-hw.c n <<= 16; n 32 drivers/media/platform/s5p-g2d/g2d-hw.c n |= f->o_width & 0xFFF; n 33 drivers/media/platform/s5p-g2d/g2d-hw.c w(n, SRC_LEFT_TOP_REG); n 35 drivers/media/platform/s5p-g2d/g2d-hw.c n = f->bottom & 0xFFF; n 36 drivers/media/platform/s5p-g2d/g2d-hw.c n <<= 16; n 37 drivers/media/platform/s5p-g2d/g2d-hw.c n |= f->right & 0xFFF; n 38 drivers/media/platform/s5p-g2d/g2d-hw.c w(n, SRC_RIGHT_BOTTOM_REG); n 50 drivers/media/platform/s5p-g2d/g2d-hw.c u32 n; n 55 drivers/media/platform/s5p-g2d/g2d-hw.c n = f->o_height & 0xFFF; n 56 drivers/media/platform/s5p-g2d/g2d-hw.c n <<= 16; n 57 drivers/media/platform/s5p-g2d/g2d-hw.c n |= f->o_width & 0xFFF; n 58 drivers/media/platform/s5p-g2d/g2d-hw.c w(n, DST_LEFT_TOP_REG); n 60 drivers/media/platform/s5p-g2d/g2d-hw.c n = f->bottom & 0xFFF; n 61 drivers/media/platform/s5p-g2d/g2d-hw.c n <<= 16; n 62 drivers/media/platform/s5p-g2d/g2d-hw.c n |= f->right & 0xFFF; n 63 drivers/media/platform/s5p-g2d/g2d-hw.c w(n, DST_RIGHT_BOTTOM_REG); n 807 drivers/media/platform/s5p-jpeg/jpeg-core.c int c, i, n, j; n 809 drivers/media/platform/s5p-jpeg/jpeg-core.c for (j = 0; j < ctx->out_q.dht.n; ++j) { n 824 drivers/media/platform/s5p-jpeg/jpeg-core.c n = 0; n 836 drivers/media/platform/s5p-jpeg/jpeg-core.c n += c; n 839 drivers/media/platform/s5p-jpeg/jpeg-core.c for (i = 0; i < n; ++i) { n 899 drivers/media/platform/s5p-jpeg/jpeg-core.c for (j = 0; j < ctx->out_q.dqt.n; ++j) { n 1227 drivers/media/platform/s5p-jpeg/jpeg-core.c result->dht.n = n_dht; n 1232 drivers/media/platform/s5p-jpeg/jpeg-core.c result->dqt.n = n_dqt; n 1266 drivers/media/platform/s5p-jpeg/jpeg-core.c struct s5p_jpeg_fmt *sjpeg_formats, int n, n 1272 drivers/media/platform/s5p-jpeg/jpeg-core.c for (i = 0; i < n; ++i) { n 1286 drivers/media/platform/s5p-jpeg/jpeg-core.c if (i >= n) n 180 drivers/media/platform/s5p-jpeg/jpeg-core.h u32 n; n 182 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c void exynos3250_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n) n 188 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c reg |= (n << EXYNOS3250_QT_NUM_SHIFT(t)) & n 28 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.h void exynos3250_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n); n 255 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c void exynos4_jpeg_set_dec_components(void __iomem *base, int n) n 261 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c reg |= EXYNOS4_NF(n); n 32 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h void exynos4_jpeg_set_dec_components(void __iomem *base, int n); n 101 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n) n 107 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c reg |= (n << S5P_QT_NUMt_SHIFT(t)) & S5P_QT_NUMt_MASK(t); n 33 drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n); n 99 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_JPG_COEF(n) (0x5c + (((n) - 1) << 2)) n 155 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_JPG_QTBL_CONTENT(n) (0x400 + (n) * 0x100) n 158 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_JPG_HDCTBL(n) (0x800 + (n) * 0x400) n 161 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_JPG_HDCTBLG(n) (0x840 + (n) * 0x400) n 164 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_JPG_HACTBL(n) (0x880 + (n) * 0x400) n 167 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define S5P_JPG_HACTBLG(n) (0x8c0 + (n) * 0x400) n 322 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS4_Q_TBL_COMP(c, n) ((n) << (((c) - 1) << 1)) n 339 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS4_HUFF_TBL_COMP(c, n) ((n) << ((((c) - 1) << 1) + 6)) n 374 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS4_QTBL_CONTENT(n) (0x100 + (n) * 0x40) n 522 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS3250_JPG_COEF(n) (0x128 + (((n) - 1) << 2)) n 460 drivers/media/platform/sh_veu.c int i, n, dflt; n 467 drivers/media/platform/sh_veu.c n = ARRAY_SIZE(sh_veu_fmt_out); n 473 drivers/media/platform/sh_veu.c n = ARRAY_SIZE(sh_veu_fmt_in); n 478 drivers/media/platform/sh_veu.c for (i = 0; i < n; i++) n 202 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c int n; n 205 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c for (n = 0; n < fei->tsin_count; n++) { n 207 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c tsin = fei->channel_data[n]; n 234 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c int n, res; n 240 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c for (n = 0; n < fei->tsin_count; n++) { n 241 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c tsin = fei->channel_data[n]; n 243 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.c res = c8sectpfe_frontend_attach(&frontend, *c8sectpfe, tsin, n); n 85 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c int pos, num_packets, n, size; n 117 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c for (n = 0; n < num_packets; n++) { n 270 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h #define DMA_ERROR_RECORD(n) ((n*4) + DMA_ERRREC_BASE + 0x4) n 279 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h #define PIDF_BASE(n) ((n*4) + PIDF_OFFSET) n 178 drivers/media/platform/stm32/stm32-dcmi.c static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n) n 180 drivers/media/platform/stm32/stm32-dcmi.c return container_of(n, struct stm32_dcmi, notifier); n 341 drivers/media/platform/ti-vpe/cal.c static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n) n 343 drivers/media/platform/ti-vpe/cal.c return container_of(n, struct cal_ctx, notifier); n 41 drivers/media/platform/vivid/vivid-vbi-gen.c unsigned n = ((bit + 1) * sampling_rate) / rate; n 43 drivers/media/platform/vivid/vivid-vbi-gen.c while (i < n) n 61 drivers/media/platform/vivid/vivid-vbi-gen.c unsigned n = ((bit + 1) * sampling_rate) / rate; n 64 drivers/media/platform/vivid/vivid-vbi-gen.c while (i < n) n 102 drivers/media/platform/vivid/vivid-vbi-gen.c unsigned n = ((bit + 1) * sampling_rate) / rate; n 104 drivers/media/platform/vivid/vivid-vbi-gen.c while (i < n) n 21 drivers/media/platform/vsp1/vsp1_brx.h #define BRX_PAD_SINK(n) (n) n 46 drivers/media/platform/vsp1/vsp1_hgt.c unsigned int n; n 59 drivers/media/platform/vsp1/vsp1_hgt.c for (n = 0; n < 32; ++n) n 60 drivers/media/platform/vsp1/vsp1_hgt.c *data++ = vsp1_hgt_read(hgt, VI6_HGT_HISTO(m, n)); n 17 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_CMD(n) (0x0000 + (n) * 4) n 28 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_SRESET_SRTS(n) BIT(n) n 31 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_STATUS_FLD_STD(n) BIT((n) + 28) n 32 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_STATUS_SYS_ACT(n) BIT((n) + 8) n 34 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12) n 38 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12) n 42 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60) n 45 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DISP_IRQ_ENB_LNEE(n) BIT(n) n 47 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DISP_IRQ_STA(n) (0x007c + (n) * 60) n 50 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DISP_IRQ_STA_LNE(n) BIT(n) n 52 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_LINE_COUNT(n) (0x0084 + (n) * 4) n 69 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DL_HDR_ADDR(n) (0x0104 + (n) * 4) n 76 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DL_EXT_CTRL(n) (0x011c + (n) * 36) n 246 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_SRCRPF_RPF_ACT_DIS(n) (0 << ((n) * 2)) n 247 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_SRCRPF_RPF_ACT_SUB(n) (1 << ((n) * 2)) n 248 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_SRCRPF_RPF_ACT_MST(n) (2 << ((n) * 2)) n 249 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_SRCRPF_RPF_ACT_MASK(n) (3 << ((n) * 2)) n 310 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_WPF_WRBCK_CTRL(n) (0x1034 + (n) * 0x100) n 336 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_UIF_DISCOM_DOCMMDR_INTHRH(n) ((n) << 16) n 339 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_UIF_DISCOM_DOCMPMR_CMPDFF(n) ((n) << 17) n 340 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_UIF_DISCOM_DOCMPMR_CMPDFA(n) ((n) << 8) n 342 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_UIF_DISCOM_DOCMPMR_SEL(n) ((n) << 0) n 355 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_RPF_ROUTE(n) (0x2000 + (n) * 4) n 357 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_WPF_FPORCH(n) (0x2014 + (n) * 4) n 361 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_UDS_ROUTE(n) (0x2028 + (n) * 4) n 383 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_UIF_ROUTE(n) (0x2074 + (n) * 4) n 385 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_NODE_RPF(n) (n) n 386 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_NODE_UIF(n) (12 + (n)) n 388 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_NODE_UDS(n) (17 + (n)) n 390 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_NODE_BRU_IN(n) (((n) <= 3) ? 23 + (n) : 49) n 395 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_NODE_BRS_IN(n) (38 + (n)) n 397 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_DPR_NODE_WPF(n) (56 + (n)) n 567 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DnON (1 << (16 + (n))) n 568 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_OFF (0 << ((n) * 4)) n 569 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_18BPP (1 << ((n) * 4)) n 570 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_16BPP (2 << ((n) * 4)) n 571 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_15BPP (3 << ((n) * 4)) n 572 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_12BPP (4 << ((n) * 4)) n 573 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_8BPP (5 << ((n) * 4)) n 574 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_MASK (7 << ((n) * 4)) n 575 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_INCTRL_DITHn_SHIFT ((n) * 4) n 599 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_CTRL(n) (0x0010 + (n) * 8 + ((n) <= 3 ? 0 : 4)) n 601 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) n 604 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_CTRL_SRCSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 16) n 612 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_BLD(n) (0x0014 + (n) * 8 + ((n) <= 3 ? 0 : 4)) n 646 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20) n 673 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGO_LBn_H(n) (0x3010 + (n) * 8) n 674 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGO_LBn_V(n) (0x3014 + (n) * 8) n 675 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGO_R_HISTO(n) (0x3030 + (n) * 4) n 679 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGO_G_HISTO(n) (0x3140 + (n) * 4) n 683 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGO_B_HISTO(n) (0x3250 + (n) * 4) n 705 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGT_HUE_AREA(n) (0x340c + (n) * 4) n 709 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGT_LBn_H(n) (0x3438 + (n) * 8) n 710 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGT_LBn_V(n) (0x342c + (n) * 8) n 711 drivers/media/platform/vsp1/vsp1_regs.h #define VI6_HGT_HISTO(m, n) (0x3450 + (m) * 128 + (n) * 4) n 42 drivers/media/platform/xilinx/xilinx-vtc.c #define XVTC_STATUS_FSYNC(n) ((n) << 16) n 57 drivers/media/platform/xilinx/xilinx-vtc.c #define XVTC_IRQ_ENABLE_FSYNC(n) ((n) << 16) n 136 drivers/media/platform/xilinx/xilinx-vtc.c #define XVTC_FRAME_SYNC_CONFIG(n) (0x0100 + 4 * (n)) n 73 drivers/media/radio/radio-trust.c static void write_i2c(struct trust *tr, int n, ...) n 78 drivers/media/radio/radio-trust.c va_start(args, n); n 88 drivers/media/radio/radio-trust.c for (; n; n--) { n 907 drivers/media/radio/si4713/si4713.c u8 p = 0, a = 0, n = 0; n 909 drivers/media/radio/si4713/si4713.c rval = si4713_tx_tune_status(sdev, 0x00, &f, &p, &a, &n); n 919 drivers/media/radio/si4713/si4713.c sdev->tune_rnl = n; n 1356 drivers/media/radio/si4713/si4713.c u8 p, a, n; n 1358 drivers/media/radio/si4713/si4713.c rval = si4713_tx_tune_status(sdev, 0x00, &freq, &p, &a, &n); n 956 drivers/media/rc/ene_ir.c static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n) n 962 drivers/media/rc/ene_ir.c dev->tx_len = n; n 988 drivers/media/rc/ene_ir.c return n; n 179 drivers/media/rc/ir-rcmm-decoder.c unsigned int n, u32 data) n 188 drivers/media/rc/ir-rcmm-decoder.c for (i = n - 2; i >= 0; i -= 2) { n 76 drivers/media/rc/ir-xmp-decoder.c u32 *n; n 86 drivers/media/rc/ir-xmp-decoder.c n = data->durations; n 92 drivers/media/rc/ir-xmp-decoder.c divider = (n[3] - XMP_NIBBLE_PREFIX) / 15 - 2000; n 102 drivers/media/rc/ir-xmp-decoder.c n[i] = (n[i] - XMP_NIBBLE_PREFIX) / divider; n 103 drivers/media/rc/ir-xmp-decoder.c sum1 = (15 + n[0] + n[1] + n[2] + n[3] + n 104 drivers/media/rc/ir-xmp-decoder.c n[4] + n[5] + n[6] + n[7]) % 16; n 105 drivers/media/rc/ir-xmp-decoder.c sum2 = (15 + n[8] + n[9] + n[10] + n[11] + n 106 drivers/media/rc/ir-xmp-decoder.c n[12] + n[13] + n[14] + n[15]) % 16; n 115 drivers/media/rc/ir-xmp-decoder.c subaddr = n[0] << 4 | n[2]; n 116 drivers/media/rc/ir-xmp-decoder.c subaddr2 = n[8] << 4 | n[11]; n 117 drivers/media/rc/ir-xmp-decoder.c oem = n[4] << 4 | n[5]; n 118 drivers/media/rc/ir-xmp-decoder.c addr = n[6] << 4 | n[7]; n 119 drivers/media/rc/ir-xmp-decoder.c toggle = n[10]; n 120 drivers/media/rc/ir-xmp-decoder.c obc1 = n[12] << 4 | n[13]; n 121 drivers/media/rc/ir-xmp-decoder.c obc2 = n[14] << 4 | n[15]; n 371 drivers/media/rc/ite-cir.c static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n) n 378 drivers/media/rc/ite-cir.c int ret = n; n 410 drivers/media/rc/ite-cir.c while (n > 0 && dev->in_use) { n 414 drivers/media/rc/ite-cir.c n--; n 227 drivers/media/rc/lirc_dev.c size_t n, loff_t *ppos) n 257 drivers/media/rc/lirc_dev.c if (n != sizeof(scan)) { n 314 drivers/media/rc/lirc_dev.c if (n < sizeof(unsigned int) || n % sizeof(unsigned int)) { n 319 drivers/media/rc/lirc_dev.c count = n / sizeof(unsigned int); n 325 drivers/media/rc/lirc_dev.c txbuf = memdup_user(buf, n); n 363 drivers/media/rc/lirc_dev.c return n; n 210 drivers/media/rc/rc-core-priv.h unsigned int n, u64 data); n 262 drivers/media/rc/rc-core-priv.h unsigned int n, u64 data); n 282 drivers/media/rc/rc-core-priv.h unsigned int n, u64 data); n 320 drivers/media/rc/rc-ir-raw.c unsigned int n, u64 data) n 326 drivers/media/rc/rc-ir-raw.c i = BIT_ULL(n - 1); n 344 drivers/media/rc/rc-ir-raw.c while (n && i > 0) { n 402 drivers/media/rc/rc-ir-raw.c unsigned int n, u64 data) n 416 drivers/media/rc/rc-ir-raw.c for (i = n - 1; i >= 0; --i) { n 425 drivers/media/rc/rc-ir-raw.c for (i = 0; i < n; ++i, data >>= 1) { n 461 drivers/media/rc/rc-ir-raw.c unsigned int n, u64 data) n 473 drivers/media/rc/rc-ir-raw.c for (i = n - 1; i >= 0; --i) { n 484 drivers/media/rc/rc-ir-raw.c for (i = 0; i < n; ++i, data >>= 1) { n 38 drivers/media/rc/tango-ir.c #define NEC_CAP(n) ((n) << 24) n 39 drivers/media/rc/tango-ir.c #define GPIO_SEL(n) ((n) << 16) n 215 drivers/media/tuners/it913x.c u8 u8tmp, n, l_band, lna_band; n 227 drivers/media/tuners/it913x.c n = 0; n 230 drivers/media/tuners/it913x.c n = 1; n 233 drivers/media/tuners/it913x.c n = 2; n 236 drivers/media/tuners/it913x.c n = 3; n 239 drivers/media/tuners/it913x.c n = 4; n 242 drivers/media/tuners/it913x.c n = 5; n 245 drivers/media/tuners/it913x.c n = 6; n 248 drivers/media/tuners/it913x.c n = 7; n 251 drivers/media/tuners/it913x.c n = 0; n 280 drivers/media/tuners/it913x.c pre_lo_freq += (u32) n << 13; n 354 drivers/media/tuners/mt2063.c #define ceil(n, d) (((n) < 0) ? (-((-(n))/(d))) : (n)/(d) + ((n)%(d) != 0)) n 355 drivers/media/tuners/mt2063.c #define floor(n, d) (((n) < 0) ? (-((-(n))/(d))) - ((n)%(d) != 0) : (n)/(d)) n 684 drivers/media/tuners/mt2063.c u32 n, n0; n 716 drivers/media/tuners/mt2063.c for (n = n0; n <= pAS_Info->maxH1; ++n) { n 717 drivers/media/tuners/mt2063.c md = (n * ((f_LO1 + hgds) / gd_Scale) - n 724 drivers/media/tuners/mt2063.c ma = (n * ((f_LO1 + hgds) / gd_Scale) + n 731 drivers/media/tuners/mt2063.c mc = (n * ((f_LO1 + hgcs) / gc_Scale) - n 734 drivers/media/tuners/mt2063.c f_nsLO1 = (s32) (n * (f_LO1 / gc_Scale)); n 738 drivers/media/tuners/mt2063.c n * (f_LO1 % gc_Scale) - mc * (f_LO2 % gc_Scale); n 740 drivers/media/tuners/mt2063.c *fp = ((f_Spur - (s32) c) / (mc - n)) + 1; n 741 drivers/media/tuners/mt2063.c *fm = (((s32) d - f_Spur) / (mc - n)) + 1; n 746 drivers/media/tuners/mt2063.c me = (n * ((f_LO1 + hgfs) / gf_Scale) + n 748 drivers/media/tuners/mt2063.c mf = (n * ((f_LO1 + hgfs) / gf_Scale) - n 751 drivers/media/tuners/mt2063.c f_nsLO1 = n * (f_LO1 / gf_Scale); n 755 drivers/media/tuners/mt2063.c n * (f_LO1 % gf_Scale) - me * (f_LO2 % gf_Scale); n 757 drivers/media/tuners/mt2063.c *fp = ((f_Spur + (s32) f) / (me - n)) + 1; n 758 drivers/media/tuners/mt2063.c *fm = (((s32) f - f_Spur) / (me - n)) + 1; n 762 drivers/media/tuners/mt2063.c mb = (n * ((f_LO1 + hgcs) / gc_Scale) + n 765 drivers/media/tuners/mt2063.c f_nsLO1 = n * (f_LO1 / gc_Scale); n 769 drivers/media/tuners/mt2063.c n * (f_LO1 % gc_Scale) - ma * (f_LO2 % gc_Scale); n 771 drivers/media/tuners/mt2063.c *fp = (((s32) d + f_Spur) / (ma - n)) + 1; n 772 drivers/media/tuners/mt2063.c *fm = (-(f_Spur + (s32) c) / (ma - n)) + 1; n 1892 drivers/media/tuners/r820t.c u8 n_ring, n; n 1901 drivers/media/tuners/r820t.c for (n = 0; n < 16; n++) { n 1902 drivers/media/tuners/r820t.c if ((16 + n) * 8 * ring_ref >= 3100000) { n 1903 drivers/media/tuners/r820t.c n_ring = n; n 307 drivers/media/tuners/tuner-xc2028.c int n, n_array; n 343 drivers/media/tuners/tuner-xc2028.c n = -1; n 349 drivers/media/tuners/tuner-xc2028.c n++; n 350 drivers/media/tuners/tuner-xc2028.c if (n >= n_array) { n 384 drivers/media/tuners/tuner-xc2028.c priv->firm[n].ptr = kmemdup(p, size, GFP_KERNEL); n 385 drivers/media/tuners/tuner-xc2028.c if (priv->firm[n].ptr == NULL) { n 397 drivers/media/tuners/tuner-xc2028.c priv->firm[n].type = type; n 398 drivers/media/tuners/tuner-xc2028.c priv->firm[n].id = id; n 399 drivers/media/tuners/tuner-xc2028.c priv->firm[n].size = size; n 400 drivers/media/tuners/tuner-xc2028.c priv->firm[n].int_freq = int_freq; n 405 drivers/media/tuners/tuner-xc2028.c if (n + 1 != priv->firm_size) { n 711 drivers/media/tuners/xc4000.c int n, n_array; n 776 drivers/media/tuners/xc4000.c n = -1; n 782 drivers/media/tuners/xc4000.c n++; n 783 drivers/media/tuners/xc4000.c if (n >= n_array) { n 815 drivers/media/tuners/xc4000.c priv->firm[n].ptr = kmemdup(p, size, GFP_KERNEL); n 816 drivers/media/tuners/xc4000.c if (priv->firm[n].ptr == NULL) { n 829 drivers/media/tuners/xc4000.c priv->firm[n].type = type; n 830 drivers/media/tuners/xc4000.c priv->firm[n].id = id; n 831 drivers/media/tuners/xc4000.c priv->firm[n].size = size; n 832 drivers/media/tuners/xc4000.c priv->firm[n].int_freq = int_freq; n 837 drivers/media/tuners/xc4000.c if (n + 1 != priv->firm_size) { n 226 drivers/media/usb/cpia2/cpia2_usb.c int n = urb->iso_frame_desc[i].actual_length; n 260 drivers/media/usb/cpia2/cpia2_usb.c i, n, st); n 266 drivers/media/usb/cpia2/cpia2_usb.c if(n<=2) n 270 drivers/media/usb/cpia2/cpia2_usb.c for(j=0; j<n-2; ++j) n 275 drivers/media/usb/cpia2/cpia2_usb.c i, n, (int)checksum, (int)iso_checksum); n 281 drivers/media/usb/cpia2/cpia2_usb.c n -= 2; n 296 drivers/media/usb/cpia2/cpia2_usb.c if (cam->frame_size < cam->workbuff->length + n) { n 298 drivers/media/usb/cpia2/cpia2_usb.c cam->workbuff->length, n); n 326 drivers/media/usb/cpia2/cpia2_usb.c cdata+data_offset, n-data_offset); n 327 drivers/media/usb/cpia2/cpia2_usb.c cam->workbuff->length += n-data_offset; n 330 drivers/media/usb/cpia2/cpia2_usb.c cdata, n); n 331 drivers/media/usb/cpia2/cpia2_usb.c cam->workbuff->length += n; n 1147 drivers/media/usb/cx231xx/cx231xx-video.c unsigned int n; n 1150 drivers/media/usb/cx231xx/cx231xx-video.c n = i->index; n 1151 drivers/media/usb/cx231xx/cx231xx-video.c if (n >= MAX_CX231XX_INPUT) n 1153 drivers/media/usb/cx231xx/cx231xx-video.c if (0 == INPUT(n)->type) n 1156 drivers/media/usb/cx231xx/cx231xx-video.c i->index = n; n 1159 drivers/media/usb/cx231xx/cx231xx-video.c strscpy(i->name, iname[INPUT(n)->type], sizeof(i->name)); n 1161 drivers/media/usb/cx231xx/cx231xx-video.c if ((CX231XX_VMUX_TELEVISION == INPUT(n)->type) || n 1162 drivers/media/usb/cx231xx/cx231xx-video.c (CX231XX_VMUX_CABLE == INPUT(n)->type)) n 1168 drivers/media/usb/cx231xx/cx231xx-video.c if (n == dev->video_input) { n 1257 drivers/media/usb/dvb-usb/cxusb.c int n; n 1275 drivers/media/usb/dvb-usb/cxusb.c for (n = 0; n < 5; n++) { n 3942 drivers/media/usb/dvb-usb/dib0700_devices.c #define DIB0700_NUM_FRONTENDS(n) \ n 3943 drivers/media/usb/dvb-usb/dib0700_devices.c .num_frontends = n, \ n 29 drivers/media/usb/dvb-usb/dvb-usb-init.c int ret, n, o; n 31 drivers/media/usb/dvb-usb/dvb-usb-init.c for (n = 0; n < d->props.num_adapters; n++) { n 32 drivers/media/usb/dvb-usb/dvb-usb-init.c adap = &d->adapter[n]; n 34 drivers/media/usb/dvb-usb/dvb-usb-init.c adap->id = n; n 36 drivers/media/usb/dvb-usb/dvb-usb-init.c memcpy(&adap->props, &d->props.adapter[n], sizeof(struct dvb_usb_adapter_properties)); n 68 drivers/media/usb/dvb-usb/dvb-usb-init.c err("no memory for priv for adapter %d fe %d.", n, o); n 77 drivers/media/usb/dvb-usb/dvb-usb-init.c err("no memory for priv for adapter %d.", n); n 110 drivers/media/usb/dvb-usb/dvb-usb-init.c int n; n 112 drivers/media/usb/dvb-usb/dvb-usb-init.c for (n = 0; n < d->num_adapters_initialized; n++) { n 113 drivers/media/usb/dvb-usb/dvb-usb-init.c dvb_usb_adapter_frontend_exit(&d->adapter[n]); n 114 drivers/media/usb/dvb-usb/dvb-usb-init.c dvb_usb_adapter_dvb_exit(&d->adapter[n]); n 115 drivers/media/usb/dvb-usb/dvb-usb-init.c dvb_usb_adapter_stream_exit(&d->adapter[n]); n 116 drivers/media/usb/dvb-usb/dvb-usb-init.c kfree(d->adapter[n].priv); n 1662 drivers/media/usb/em28xx/em28xx-video.c unsigned int n; n 1665 drivers/media/usb/em28xx/em28xx-video.c n = i->index; n 1666 drivers/media/usb/em28xx/em28xx-video.c if (n >= MAX_EM28XX_INPUT) n 1668 drivers/media/usb/em28xx/em28xx-video.c if (!INPUT(n)->type) n 1673 drivers/media/usb/em28xx/em28xx-video.c strscpy(i->name, iname[INPUT(n)->type], sizeof(i->name)); n 1675 drivers/media/usb/em28xx/em28xx-video.c if (INPUT(n)->type == EM28XX_VMUX_TELEVISION) n 486 drivers/media/usb/go7007/go7007-v4l2.c unsigned int n, d; n 491 drivers/media/usb/go7007/go7007-v4l2.c n = go->sensor_framerate * n 494 drivers/media/usb/go7007/go7007-v4l2.c if (n != 0 && d != 0 && n > d) n 495 drivers/media/usb/go7007/go7007-v4l2.c go->fps_scale = (n + d/2) / d; n 75 drivers/media/usb/gspca/benq.c int i, n; n 83 drivers/media/usb/gspca/benq.c for (n = 0; n < 4; n++) { n 87 drivers/media/usb/gspca/benq.c gspca_dev->urb[n] = urb; n 101 drivers/media/usb/gspca/benq.c n & 1 ? 0x82 : 0x83); n 212 drivers/media/usb/gspca/gl860/gl860-mi1320.c s32 n; /* reserved for FETCH functions */ n 217 drivers/media/usb/gspca/gl860/gl860-mi1320.c n = fetch_validx(gspca_dev, tbl_common, ARRAY_SIZE(tbl_common)); n 224 drivers/media/usb/gspca/gl860/gl860-mi1320.c ARRAY_SIZE(tbl_common), n); n 230 drivers/media/usb/gspca/gl860/gl860-mi1320.c ARRAY_SIZE(tbl_common), n); n 233 drivers/media/usb/gspca/gl860/gl860-mi1320.c ARRAY_SIZE(tbl_common), n); n 258 drivers/media/usb/gspca/gl860/gl860-ov2640.c s32 n; /* reserved for FETCH functions */ n 262 drivers/media/usb/gspca/gl860/gl860-ov2640.c n = fetch_validx(gspca_dev, tbl_sensor_settings_common1, n 267 drivers/media/usb/gspca/gl860/gl860-ov2640.c ARRAY_SIZE(tbl_sensor_settings_common1), n); n 271 drivers/media/usb/gspca/gl860/gl860-ov2640.c n = fetch_validx(gspca_dev, tbl_640, ARRAY_SIZE(tbl_640)); n 276 drivers/media/usb/gspca/gl860/gl860-ov2640.c n = fetch_validx(gspca_dev, tbl_800, ARRAY_SIZE(tbl_800)); n 282 drivers/media/usb/gspca/gl860/gl860-ov2640.c n = fetch_validx(gspca_dev, tbl_big1, ARRAY_SIZE(tbl_big1)); n 285 drivers/media/usb/gspca/gl860/gl860-ov2640.c n = fetch_validx(gspca_dev, tbl_big2, n 293 drivers/media/usb/gspca/gl860/gl860-ov2640.c n = fetch_validx(gspca_dev, tbl_big3, ARRAY_SIZE(tbl_big3)); n 309 drivers/media/usb/gspca/gl860/gl860-ov2640.c n = fetch_validx(gspca_dev, tbl_sensor_settings_common2, n 202 drivers/media/usb/gspca/gl860/gl860-ov9655.c s32 n; /* reserved for FETCH functions */ n 218 drivers/media/usb/gspca/gl860/gl860-ov9655.c n = fetch_validx(gspca_dev, tbl_init_post_alt, n 223 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 226 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 229 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 232 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 235 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 239 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 242 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 245 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 248 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 251 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 255 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 258 drivers/media/usb/gspca/gl860/gl860-ov9655.c ARRAY_SIZE(tbl_init_post_alt), n); n 582 drivers/media/usb/gspca/gl860/gl860.c int n; n 584 drivers/media/usb/gspca/gl860/gl860.c for (n = 0; n < len; n++) { n 585 drivers/media/usb/gspca/gl860/gl860.c if (tbl[n].idx != 0xffff) n 586 drivers/media/usb/gspca/gl860/gl860.c ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, n 587 drivers/media/usb/gspca/gl860/gl860.c tbl[n].idx, 0, NULL); n 588 drivers/media/usb/gspca/gl860/gl860.c else if (tbl[n].val == 0xffff) n 591 drivers/media/usb/gspca/gl860/gl860.c msleep(tbl[n].val); n 593 drivers/media/usb/gspca/gl860/gl860.c return n; n 597 drivers/media/usb/gspca/gl860/gl860.c int len, int n) n 599 drivers/media/usb/gspca/gl860/gl860.c while (++n < len) { n 600 drivers/media/usb/gspca/gl860/gl860.c if (tbl[n].idx != 0xffff) n 601 drivers/media/usb/gspca/gl860/gl860.c ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, n 603 drivers/media/usb/gspca/gl860/gl860.c else if (tbl[n].val == 0xffff) n 606 drivers/media/usb/gspca/gl860/gl860.c msleep(tbl[n].val); n 608 drivers/media/usb/gspca/gl860/gl860.c return n; n 613 drivers/media/usb/gspca/gl860/gl860.c int n; n 615 drivers/media/usb/gspca/gl860/gl860.c for (n = 0; n < len; n++) { n 616 drivers/media/usb/gspca/gl860/gl860.c if (memcmp(tbl[n].data, "\xff\xff\xff", 3) != 0) n 617 drivers/media/usb/gspca/gl860/gl860.c ctrl_out(gspca_dev, 0x40, 3, 0x7a00, tbl[n].idx, n 618 drivers/media/usb/gspca/gl860/gl860.c 3, tbl[n].data); n 620 drivers/media/usb/gspca/gl860/gl860.c msleep(tbl[n].idx); n 81 drivers/media/usb/gspca/gl860/gl860.h int len, int n); n 663 drivers/media/usb/gspca/gspca.c int n, nurbs, i, psize, npkt, bsize; n 695 drivers/media/usb/gspca/gspca.c for (n = 0; n < nurbs; n++) { n 699 drivers/media/usb/gspca/gspca.c gspca_dev->urb[n] = urb; n 760 drivers/media/usb/gspca/gspca.c int n, ret, xfer, alt, alt_idx; n 853 drivers/media/usb/gspca/gspca.c for (n = 0; n < MAX_NURBS; n++) { n 854 drivers/media/usb/gspca/gspca.c urb = gspca_dev->urb[n]; n 172 drivers/media/usb/gspca/konica.c int i, n, packet_size; n 188 drivers/media/usb/gspca/konica.c n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; n 189 drivers/media/usb/gspca/konica.c reg_w(gspca_dev, n, 0x08); n 201 drivers/media/usb/gspca/konica.c for (n = 0; n < 4; n++) { n 202 drivers/media/usb/gspca/konica.c i = n & 1 ? 0 : 1; n 208 drivers/media/usb/gspca/konica.c gspca_dev->urb[n] = urb; n 222 drivers/media/usb/gspca/konica.c n & 1 ? 0x81 : 0x82); n 1018 drivers/media/usb/gspca/mr97310a.c int n; n 1021 drivers/media/usb/gspca/mr97310a.c n = sof - data; n 1022 drivers/media/usb/gspca/mr97310a.c if (n > sizeof pac_sof_marker) n 1023 drivers/media/usb/gspca/mr97310a.c n -= sizeof pac_sof_marker; n 1025 drivers/media/usb/gspca/mr97310a.c n = 0; n 1027 drivers/media/usb/gspca/mr97310a.c data, n); n 2149 drivers/media/usb/gspca/ov519.c static void ov518_reg_w32(struct sd *sd, u16 index, u32 value, int n) n 2166 drivers/media/usb/gspca/ov519.c sd->gspca_dev.usb_buf, n, 500); n 2591 drivers/media/usb/gspca/ov519.c int n) n 2593 drivers/media/usb/gspca/ov519.c while (--n >= 0) { n 2601 drivers/media/usb/gspca/ov519.c int n) n 2603 drivers/media/usb/gspca/ov519.c while (--n >= 0) { n 364 drivers/media/usb/gspca/pac207.c int n; n 367 drivers/media/usb/gspca/pac207.c n = sof - data; n 368 drivers/media/usb/gspca/pac207.c if (n > sizeof pac_sof_marker) n 369 drivers/media/usb/gspca/pac207.c n -= sizeof pac_sof_marker; n 371 drivers/media/usb/gspca/pac207.c n = 0; n 373 drivers/media/usb/gspca/pac207.c data, n); n 782 drivers/media/usb/gspca/pac7302.c int n, lum_offset, footer_length; n 794 drivers/media/usb/gspca/pac7302.c n = (sof - data) - (footer_length + sizeof pac_sof_marker); n 795 drivers/media/usb/gspca/pac7302.c if (n < 0) { n 796 drivers/media/usb/gspca/pac7302.c gspca_dev->image_len += n; n 797 drivers/media/usb/gspca/pac7302.c n = 0; n 799 drivers/media/usb/gspca/pac7302.c gspca_frame_add(gspca_dev, INTER_PACKET, data, n); n 808 drivers/media/usb/gspca/pac7302.c n = sof - data; n 809 drivers/media/usb/gspca/pac7302.c len -= n; n 814 drivers/media/usb/gspca/pac7302.c n >= lum_offset) n 566 drivers/media/usb/gspca/pac7311.c int n, lum_offset, footer_length; n 578 drivers/media/usb/gspca/pac7311.c n = (sof - data) - (footer_length + sizeof pac_sof_marker); n 579 drivers/media/usb/gspca/pac7311.c if (n < 0) { n 580 drivers/media/usb/gspca/pac7311.c gspca_dev->image_len += n; n 581 drivers/media/usb/gspca/pac7311.c n = 0; n 583 drivers/media/usb/gspca/pac7311.c gspca_frame_add(gspca_dev, INTER_PACKET, data, n); n 591 drivers/media/usb/gspca/pac7311.c n = sof - data; n 592 drivers/media/usb/gspca/pac7311.c len -= n; n 597 drivers/media/usb/gspca/pac7311.c n >= lum_offset) n 213 drivers/media/usb/gspca/se401.c int i, j, n; n 244 drivers/media/usb/gspca/se401.c n = cd[4] | (cd[5] << 8); n 245 drivers/media/usb/gspca/se401.c if (n > MAX_MODES) { n 250 drivers/media/usb/gspca/se401.c for (i = 0; i < n ; i++) { n 255 drivers/media/usb/gspca/se401.c for (i = 0; i < n ; i++) { n 263 drivers/media/usb/gspca/se401.c for (j = 0; j < n; j++) { n 272 drivers/media/usb/gspca/se401.c for (j = 0; j < n; j++) { n 299 drivers/media/usb/gspca/se401.c cam->nmodes = n; n 228 drivers/media/usb/gspca/sn9c2028.c struct init_command *cam_commands, int n) n 232 drivers/media/usb/gspca/sn9c2028.c for (i = 0; i < n; i++) { n 898 drivers/media/usb/gspca/sn9c2028.c int n; n 901 drivers/media/usb/gspca/sn9c2028.c n = sof - data; n 902 drivers/media/usb/gspca/sn9c2028.c if (n > sizeof sn9c2028_sof_marker) n 903 drivers/media/usb/gspca/sn9c2028.c n -= sizeof sn9c2028_sof_marker; n 905 drivers/media/usb/gspca/sn9c2028.c n = 0; n 906 drivers/media/usb/gspca/sn9c2028.c gspca_frame_add(gspca_dev, LAST_PACKET, data, n); n 3003 drivers/media/usb/gspca/vc032x.c int i, n; n 3019 drivers/media/usb/gspca/vc032x.c n = ARRAY_SIZE(vc0321_probe_data); n 3022 drivers/media/usb/gspca/vc032x.c n = ARRAY_SIZE(vc0323_probe_data); n 3024 drivers/media/usb/gspca/vc032x.c for (i = 0; i < n; i++) { n 2905 drivers/media/usb/gspca/xirlink_cit.c int n; n 2908 drivers/media/usb/gspca/xirlink_cit.c n = sof - data; n 2909 drivers/media/usb/gspca/xirlink_cit.c if (n > sd->sof_len) n 2910 drivers/media/usb/gspca/xirlink_cit.c n -= sd->sof_len; n 2912 drivers/media/usb/gspca/xirlink_cit.c n = 0; n 2914 drivers/media/usb/gspca/xirlink_cit.c data, n); n 764 drivers/media/usb/hdpvr/hdpvr-video.c unsigned int n; n 766 drivers/media/usb/hdpvr/hdpvr-video.c n = i->index; n 767 drivers/media/usb/hdpvr/hdpvr-video.c if (n >= HDPVR_VIDEO_INPUTS) n 772 drivers/media/usb/hdpvr/hdpvr-video.c strscpy(i->name, iname[n], sizeof(i->name)); n 776 drivers/media/usb/hdpvr/hdpvr-video.c i->capabilities = n ? V4L2_IN_CAP_STD : V4L2_IN_CAP_DV_TIMINGS; n 777 drivers/media/usb/hdpvr/hdpvr-video.c i->std = n ? V4L2_STD_ALL : 0; n 835 drivers/media/usb/hdpvr/hdpvr-video.c unsigned int n; n 837 drivers/media/usb/hdpvr/hdpvr-video.c n = audio->index; n 838 drivers/media/usb/hdpvr/hdpvr-video.c if (n >= HDPVR_AUDIO_INPUTS) n 843 drivers/media/usb/hdpvr/hdpvr-video.c strscpy(audio->name, audio_iname[n], sizeof(audio->name)); n 26 drivers/media/usb/pwc/pwc-uncompress.c int n, line, col; n 61 drivers/media/usb/pwc/pwc-uncompress.c n = pdev->width * pdev->height; n 63 drivers/media/usb/pwc/pwc-uncompress.c dstu = (u16 *)(image + n); n 64 drivers/media/usb/pwc/pwc-uncompress.c dstv = (u16 *)(image + n + n / 4); n 1070 drivers/media/usb/tm6000/tm6000-video.c unsigned int n; n 1072 drivers/media/usb/tm6000/tm6000-video.c n = i->index; n 1073 drivers/media/usb/tm6000/tm6000-video.c if (n >= 3) n 1076 drivers/media/usb/tm6000/tm6000-video.c if (!dev->vinput[n].type) n 1079 drivers/media/usb/tm6000/tm6000-video.c i->index = n; n 1081 drivers/media/usb/tm6000/tm6000-video.c if (dev->vinput[n].type == TM6000_INPUT_TV) n 1086 drivers/media/usb/tm6000/tm6000-video.c strscpy(i->name, iname[dev->vinput[n].type], sizeof(i->name)); n 280 drivers/media/usb/uvc/uvc_driver.c unsigned int i, n; n 294 drivers/media/usb/uvc/uvc_driver.c for (n = 0; n < n_terms && y != 0; ++n) { n 295 drivers/media/usb/uvc/uvc_driver.c an[n] = x / y; n 296 drivers/media/usb/uvc/uvc_driver.c if (an[n] >= threshold) { n 297 drivers/media/usb/uvc/uvc_driver.c if (n < 2) n 298 drivers/media/usb/uvc/uvc_driver.c n++; n 302 drivers/media/usb/uvc/uvc_driver.c r = x - an[n] * y; n 311 drivers/media/usb/uvc/uvc_driver.c for (i = n; i > 0; --i) { n 452 drivers/media/usb/uvc/uvc_driver.c unsigned int i, n; n 461 drivers/media/usb/uvc/uvc_driver.c n = buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED ? 27 : 28; n 462 drivers/media/usb/uvc/uvc_driver.c if (buflen < n) { n 508 drivers/media/usb/uvc/uvc_driver.c n = info->bpp[0] * div; n 510 drivers/media/usb/uvc/uvc_driver.c n += info->bpp[i]; n 512 drivers/media/usb/uvc/uvc_driver.c format->bpp = DIV_ROUND_UP(8 * n, div); n 609 drivers/media/usb/uvc/uvc_driver.c n = buflen > 25 ? buffer[25] : 0; n 611 drivers/media/usb/uvc/uvc_driver.c n = buflen > 21 ? buffer[21] : 0; n 613 drivers/media/usb/uvc/uvc_driver.c n = n ? n : 3; n 615 drivers/media/usb/uvc/uvc_driver.c if (buflen < 26 + 4*n) { n 660 drivers/media/usb/uvc/uvc_driver.c for (i = 0; i < n; ++i) { n 668 drivers/media/usb/uvc/uvc_driver.c n -= frame->bFrameIntervalType ? 1 : 2; n 670 drivers/media/usb/uvc/uvc_driver.c min(frame->dwFrameInterval[n], n 725 drivers/media/usb/uvc/uvc_driver.c unsigned int size, i, n, p; n 804 drivers/media/usb/uvc/uvc_driver.c n = buflen >= size ? buffer[size-1] : 0; n 806 drivers/media/usb/uvc/uvc_driver.c if (buflen < size + p*n) { n 824 drivers/media/usb/uvc/uvc_driver.c streaming->header.bControlSize = n; n 826 drivers/media/usb/uvc/uvc_driver.c streaming->header.bmaControls = kmemdup(&buffer[size], p * n, n 1002 drivers/media/usb/uvc/uvc_driver.c unsigned int n, p; n 1037 drivers/media/usb/uvc/uvc_driver.c n = buflen >= 25 + p ? buffer[22+p] : 0; n 1039 drivers/media/usb/uvc/uvc_driver.c if (buflen < 25 + p + 2*n) { n 1047 drivers/media/usb/uvc/uvc_driver.c p + 1, 2*n); n 1057 drivers/media/usb/uvc/uvc_driver.c + n; n 1058 drivers/media/usb/uvc/uvc_driver.c memcpy(unit->extension.bmControls, &buffer[23+p], 2*n); n 1060 drivers/media/usb/uvc/uvc_driver.c if (buffer[24+p+2*n] != 0) n 1061 drivers/media/usb/uvc/uvc_driver.c usb_string(udev, buffer[24+p+2*n], unit->name, n 1081 drivers/media/usb/uvc/uvc_driver.c unsigned int i, n, p, len; n 1086 drivers/media/usb/uvc/uvc_driver.c n = buflen >= 12 ? buffer[11] : 0; n 1088 drivers/media/usb/uvc/uvc_driver.c if (buflen < 12 + n) { n 1099 drivers/media/usb/uvc/uvc_driver.c for (i = 0; i < n; ++i) { n 1141 drivers/media/usb/uvc/uvc_driver.c n = 0; n 1146 drivers/media/usb/uvc/uvc_driver.c n = buflen >= 15 ? buffer[14] : 0; n 1150 drivers/media/usb/uvc/uvc_driver.c n = buflen >= 9 ? buffer[8] : 0; n 1151 drivers/media/usb/uvc/uvc_driver.c p = buflen >= 10 + n ? buffer[9+n] : 0; n 1155 drivers/media/usb/uvc/uvc_driver.c if (buflen < len + n + p) { n 1163 drivers/media/usb/uvc/uvc_driver.c 1, n + p); n 1168 drivers/media/usb/uvc/uvc_driver.c term->camera.bControlSize = n; n 1176 drivers/media/usb/uvc/uvc_driver.c memcpy(term->camera.bmControls, &buffer[15], n); n 1179 drivers/media/usb/uvc/uvc_driver.c term->media.bControlSize = n; n 1183 drivers/media/usb/uvc/uvc_driver.c + sizeof(*term) + n; n 1184 drivers/media/usb/uvc/uvc_driver.c memcpy(term->media.bmControls, &buffer[9], n); n 1185 drivers/media/usb/uvc/uvc_driver.c memcpy(term->media.bmTransportModes, &buffer[10+n], p); n 1263 drivers/media/usb/uvc/uvc_driver.c n = buflen >= 8 ? buffer[7] : 0; n 1266 drivers/media/usb/uvc/uvc_driver.c if (buflen < p + n) { n 1273 drivers/media/usb/uvc/uvc_driver.c unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n); n 1282 drivers/media/usb/uvc/uvc_driver.c memcpy(unit->processing.bmControls, &buffer[8], n); n 1284 drivers/media/usb/uvc/uvc_driver.c unit->processing.bmVideoStandards = buffer[9+n]; n 1286 drivers/media/usb/uvc/uvc_driver.c if (buffer[8+n] != 0) n 1287 drivers/media/usb/uvc/uvc_driver.c usb_string(udev, buffer[8+n], unit->name, n 1297 drivers/media/usb/uvc/uvc_driver.c n = buflen >= 24 + p ? buffer[22+p] : 0; n 1299 drivers/media/usb/uvc/uvc_driver.c if (buflen < 24 + p + n) { n 1306 drivers/media/usb/uvc/uvc_driver.c unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n); n 1315 drivers/media/usb/uvc/uvc_driver.c memcpy(unit->extension.bmControls, &buffer[23+p], n); n 1317 drivers/media/usb/uvc/uvc_driver.c if (buffer[23+p+n] != 0) n 1318 drivers/media/usb/uvc/uvc_driver.c usb_string(udev, buffer[23+p+n], unit->name, n 1891 drivers/media/usb/uvc/uvc_driver.c struct list_head *p, *n; n 1903 drivers/media/usb/uvc/uvc_driver.c list_for_each_safe(p, n, &dev->chains) { n 1909 drivers/media/usb/uvc/uvc_driver.c list_for_each_safe(p, n, &dev->entities) { n 1918 drivers/media/usb/uvc/uvc_driver.c list_for_each_safe(p, n, &dev->streams) { n 25 drivers/media/v4l2-core/v4l2-async.c static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n, n 29 drivers/media/v4l2-core/v4l2-async.c if (!n->ops || !n->ops->bound) n 32 drivers/media/v4l2-core/v4l2-async.c return n->ops->bound(n, subdev, asd); n 35 drivers/media/v4l2-core/v4l2-async.c static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n, n 39 drivers/media/v4l2-core/v4l2-async.c if (!n->ops || !n->ops->unbind) n 42 drivers/media/v4l2-core/v4l2-async.c n->ops->unbind(n, subdev, asd); n 45 drivers/media/v4l2-core/v4l2-async.c static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n) n 47 drivers/media/v4l2-core/v4l2-async.c if (!n->ops || !n->ops->complete) n 50 drivers/media/v4l2-core/v4l2-async.c return n->ops->complete(n); n 155 drivers/media/v4l2-core/v4l2-async.c struct v4l2_async_notifier *n; n 157 drivers/media/v4l2-core/v4l2-async.c list_for_each_entry(n, ¬ifier_list, list) n 158 drivers/media/v4l2-core/v4l2-async.c if (n->sd == sd) n 159 drivers/media/v4l2-core/v4l2-async.c return n; n 914 drivers/media/v4l2-core/v4l2-compat-ioctl32.c u32 n; n 941 drivers/media/v4l2-core/v4l2-compat-ioctl32.c for (n = 0; n < count; n++) { n 972 drivers/media/v4l2-core/v4l2-compat-ioctl32.c u32 n; n 1001 drivers/media/v4l2-core/v4l2-compat-ioctl32.c for (n = 0; n < count; n++) { n 359 drivers/media/v4l2-core/v4l2-dv-timings.c unsigned long n, d; n 370 drivers/media/v4l2-core/v4l2-dv-timings.c ratio.numerator, ratio.denominator, &n, &d); n 371 drivers/media/v4l2-core/v4l2-dv-timings.c ratio.numerator = n; n 389 drivers/media/v4l2-core/v4l2-dv-timings.c unsigned long n, d; n 408 drivers/media/v4l2-core/v4l2-dv-timings.c rational_best_approximation(fps, 100, fps, 100, &n, &d); n 411 drivers/media/v4l2-core/v4l2-dv-timings.c fps_fract.denominator = n; n 3043 drivers/media/v4l2-core/v4l2-ioctl.c unsigned int n = ioc_size; n 3056 drivers/media/v4l2-core/v4l2-ioctl.c n = (flags & INFO_FL_CLEAR_MASK) >> 16; n 3060 drivers/media/v4l2-core/v4l2-ioctl.c if (copy_from_user(parg, (void __user *)arg, n)) n 3064 drivers/media/v4l2-core/v4l2-ioctl.c if (n < ioc_size) n 3065 drivers/media/v4l2-core/v4l2-ioctl.c memset((u8 *)parg + n, 0, ioc_size - n); n 22 drivers/memory/jz4780-nemc.c #define NEMC_SMCRn(n) (0x14 + (((n) - 1) * 4)) n 40 drivers/memory/jz4780-nemc.c #define NEMC_NFCSR_NFEn(n) BIT(((n) - 1) << 1) n 41 drivers/memory/jz4780-nemc.c #define NEMC_NFCSR_NFCEn(n) BIT((((n) - 1) << 1) + 1) n 42 drivers/memory/jz4780-nemc.c #define NEMC_NFCSR_TNFEn(n) BIT(16 + (n) - 1) n 27 drivers/memory/pl172.c #define MPMC_STATIC_CFG(n) (0x200 + 0x20 * n) n 37 drivers/memory/pl172.c #define MPMC_STATIC_WAIT_WEN(n) (0x204 + 0x20 * n) n 39 drivers/memory/pl172.c #define MPMC_STATIC_WAIT_OEN(n) (0x208 + 0x20 * n) n 41 drivers/memory/pl172.c #define MPMC_STATIC_WAIT_RD(n) (0x20c + 0x20 * n) n 43 drivers/memory/pl172.c #define MPMC_STATIC_WAIT_PAGE(n) (0x210 + 0x20 * n) n 45 drivers/memory/pl172.c #define MPMC_STATIC_WAIT_WR(n) (0x214 + 0x20 * n) n 47 drivers/memory/pl172.c #define MPMC_STATIC_WAIT_TURN(n) (0x218 + 0x20 * n) n 274 drivers/memory/tegra/tegra124-emc.c #define EMC_REFCTRL_DEV_SEL(n) (((n > 1) ? 0 : 2) << EMC_REFCTRL_DEV_SEL_SHIFT) n 275 drivers/memory/tegra/tegra124-emc.c #define EMC_DRAM_DEV_SEL(n) ((n > 1) ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0) n 793 drivers/message/fusion/mptctl.c int n = 0; n 907 drivers/message/fusion/mptctl.c n++; n 911 drivers/message/fusion/mptctl.c iocp->name, __FILE__, __LINE__, n, ufwbuf); n 1164 drivers/message/fusion/mptctl.c int n = 0; n 1186 drivers/message/fusion/mptctl.c n++; n 1204 drivers/message/fusion/mptctl.c n++; n 1210 drivers/message/fusion/mptctl.c ioc->name, n)); n 164 drivers/message/fusion/mptdebug.h int ii, n; n 170 drivers/message/fusion/mptdebug.h n = ioc->req_sz/4 - 1; n 171 drivers/message/fusion/mptdebug.h while (mfp[n] == 0) n 172 drivers/message/fusion/mptdebug.h n--; n 173 drivers/message/fusion/mptdebug.h for (ii=0; ii<=n; ii++) { n 184 drivers/message/fusion/mptdebug.h int i, n; n 188 drivers/message/fusion/mptdebug.h n = 10; n 190 drivers/message/fusion/mptdebug.h for (i = 0; i < n; i++) n 198 drivers/message/fusion/mptdebug.h int i, n; n 202 drivers/message/fusion/mptdebug.h n = 24; n 203 drivers/message/fusion/mptdebug.h for (i=0; i<n; i++) { n 214 drivers/message/fusion/mptdebug.h int i, n; n 218 drivers/message/fusion/mptdebug.h n = (le32_to_cpu(mfp[0]) & 0x00FF0000) >> 16; n 220 drivers/message/fusion/mptdebug.h for (i=0; i<n; i++) n 228 drivers/message/fusion/mptdebug.h int i, n; n 232 drivers/message/fusion/mptdebug.h n = 3; n 234 drivers/message/fusion/mptdebug.h for (i=0; i<n; i++) n 242 drivers/message/fusion/mptdebug.h int i, n; n 246 drivers/message/fusion/mptdebug.h n = 13; n 248 drivers/message/fusion/mptdebug.h for (i=0; i<n; i++) { n 259 drivers/message/fusion/mptdebug.h int i, n; n 263 drivers/message/fusion/mptdebug.h n = (le32_to_cpu(mfp[0]) & 0x00FF0000) >> 16; n 264 drivers/message/fusion/mptdebug.h printk(KERN_DEBUG "TM_REPLY MessageLength=%d:\n", n); n 265 drivers/message/fusion/mptdebug.h for (i=0; i<n; i++) { n 1497 drivers/message/fusion/mptfc.c struct mptfc_rport_info *p, *n; n 1512 drivers/message/fusion/mptfc.c list_for_each_entry_safe(p, n, &ioc->fc_rports, list) { n 337 drivers/message/fusion/mptsas.c struct mptsas_target_reset_event *target_reset_list, *n; n 342 drivers/message/fusion/mptsas.c list_for_each_entry_safe(target_reset_list, n, n 5325 drivers/message/fusion/mptsas.c struct mptsas_portinfo *p, *n; n 5342 drivers/message/fusion/mptsas.c list_for_each_entry_safe(p, n, &ioc->sas_topology, list) { n 210 drivers/mfd/asic3.c int n; n 212 drivers/mfd/asic3.c n = (irq - asic->irq_base) >> 4; n 214 drivers/mfd/asic3.c return (n * (ASIC3_GPIO_B_BASE - ASIC3_GPIO_A_BASE)); n 1418 drivers/mfd/db8500-prcmu.c static int request_dsiclk(u8 n, bool enable) n 1423 drivers/mfd/db8500-prcmu.c val &= ~dsiclk[n].divsel_mask; n 1424 drivers/mfd/db8500-prcmu.c val |= ((enable ? dsiclk[n].divsel : PRCM_DSI_PLLOUT_SEL_OFF) << n 1425 drivers/mfd/db8500-prcmu.c dsiclk[n].divsel_shift); n 1430 drivers/mfd/db8500-prcmu.c static int request_dsiescclk(u8 n, bool enable) n 1435 drivers/mfd/db8500-prcmu.c enable ? (val |= dsiescclk[n].en) : (val &= ~dsiescclk[n].en); n 1576 drivers/mfd/db8500-prcmu.c static unsigned long dsiclk_rate(u8 n) n 1582 drivers/mfd/db8500-prcmu.c divsel = ((divsel & dsiclk[n].divsel_mask) >> dsiclk[n].divsel_shift); n 1585 drivers/mfd/db8500-prcmu.c divsel = dsiclk[n].divsel; n 1587 drivers/mfd/db8500-prcmu.c dsiclk[n].divsel = divsel; n 1604 drivers/mfd/db8500-prcmu.c static unsigned long dsiescclk_rate(u8 n) n 1609 drivers/mfd/db8500-prcmu.c div = ((div & dsiescclk[n].div_mask) >> (dsiescclk[n].div_shift)); n 1949 drivers/mfd/db8500-prcmu.c static void set_dsiclk_rate(u8 n, unsigned long rate) n 1957 drivers/mfd/db8500-prcmu.c dsiclk[n].divsel = (div == 1) ? PRCM_DSI_PLLOUT_SEL_PHI : n 1962 drivers/mfd/db8500-prcmu.c val &= ~dsiclk[n].divsel_mask; n 1963 drivers/mfd/db8500-prcmu.c val |= (dsiclk[n].divsel << dsiclk[n].divsel_shift); n 1967 drivers/mfd/db8500-prcmu.c static void set_dsiescclk_rate(u8 n, unsigned long rate) n 1974 drivers/mfd/db8500-prcmu.c val &= ~dsiescclk[n].div_mask; n 1975 drivers/mfd/db8500-prcmu.c val |= (min(div, (u32)255) << dsiescclk[n].div_shift); n 2407 drivers/mfd/db8500-prcmu.c static inline void print_unknown_header_warning(u8 n, u8 header) n 2410 drivers/mfd/db8500-prcmu.c header, n); n 2417 drivers/mfd/db8500-prcmu.c unsigned int n; n 2436 drivers/mfd/db8500-prcmu.c for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { n 2437 drivers/mfd/db8500-prcmu.c if (ev & prcmu_irq_bit[n]) n 2438 drivers/mfd/db8500-prcmu.c generic_handle_irq(irq_find_mapping(db8500_irq_domain, n)); n 2545 drivers/mfd/db8500-prcmu.c u8 n; n 2553 drivers/mfd/db8500-prcmu.c for (n = 0; bits; n++) { n 2554 drivers/mfd/db8500-prcmu.c if (bits & MBOX_BIT(n)) { n 2555 drivers/mfd/db8500-prcmu.c bits -= MBOX_BIT(n); n 2556 drivers/mfd/db8500-prcmu.c if (read_mailbox[n]()) n 359 drivers/mfd/lm3533-core.c struct attribute *attr, int n) n 481 drivers/mfd/menelaus.c int n) n 485 drivers/mfd/menelaus.c for (i = 0; i < n; i++, tbl++) n 85 drivers/mfd/pcf50633-core.c int n, n1, idx = 0; n 96 drivers/mfd/pcf50633-core.c for (n = 0; n < 256; n += sizeof(dump)) { n 98 drivers/mfd/pcf50633-core.c if (n == address_no_read[idx]) { n 102 drivers/mfd/pcf50633-core.c dump[n1] = pcf50633_reg_read(pcf, n + n1); n 115 drivers/mfd/pcf50633-core.c int n; n 117 drivers/mfd/pcf50633-core.c n = sprintf(buf, "%02x%02x%02x%02x%02x\n", n 124 drivers/mfd/pcf50633-core.c return n; n 263 drivers/mfd/rave-sp.c static void *stuff(unsigned char *dest, const unsigned char *src, size_t n) n 265 drivers/mfd/rave-sp.c while (n--) { n 394 drivers/mfd/sm501.c unsigned int m, n, k; n 453 drivers/mfd/sm501.c unsigned int m, n, k; n 461 drivers/mfd/sm501.c for (n = 2; n <= 127; n++) { n 463 drivers/mfd/sm501.c mclk = (24000000UL * m / n) >> k; n 468 drivers/mfd/sm501.c clock->n = n; n 542 drivers/mfd/sm501.c pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m; n 209 drivers/mfd/stmfx.c int n, ret; n 227 drivers/mfd/stmfx.c for_each_set_bit(n, &bits, STMFX_REG_IRQ_SRC_MAX) n 228 drivers/mfd/stmfx.c handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n)); n 615 drivers/mfd/ucb1x00-core.c struct list_head *l, *n; n 619 drivers/mfd/ucb1x00-core.c list_for_each_safe(l, n, &ucb->devs) { n 652 drivers/mfd/ucb1x00-core.c struct list_head *n, *l; n 656 drivers/mfd/ucb1x00-core.c list_for_each_safe(l, n, &drv->devs) { n 22 drivers/misc/altera-stapl/altera-comp.c static u32 altera_bits_req(u32 n) n 26 drivers/misc/altera-stapl/altera-comp.c if (n == 0) n 30 drivers/misc/altera-stapl/altera-comp.c while ((n & (1 << (SHORT_BITS - 1))) == 0) { n 31 drivers/misc/altera-stapl/altera-comp.c n <<= 1; n 732 drivers/misc/cardreader/rtsx_pcr.c u8 n, clk_divider, mcu_cnt, div; n 773 drivers/misc/cardreader/rtsx_pcr.c n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); n 775 drivers/misc/cardreader/rtsx_pcr.c n = (u8)(clk - 2); n 776 drivers/misc/cardreader/rtsx_pcr.c if ((clk <= 2) || (n > MAX_DIV_N_PCR)) n 785 drivers/misc/cardreader/rtsx_pcr.c while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) { n 787 drivers/misc/cardreader/rtsx_pcr.c int dbl_clk = pcr->ops->conv_clk_and_div_n(n, n 789 drivers/misc/cardreader/rtsx_pcr.c n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, n 792 drivers/misc/cardreader/rtsx_pcr.c n = (n + 2) * 2 - 2; n 796 drivers/misc/cardreader/rtsx_pcr.c pcr_dbg(pcr, "n = %d, div = %d\n", n, div); n 813 drivers/misc/cardreader/rtsx_pcr.c rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); n 393 drivers/misc/cardreader/rtsx_usb.c u8 n, clk_divider, mcu_cnt, div; n 427 drivers/misc/cardreader/rtsx_usb.c n = card_clock - 2; n 428 drivers/misc/cardreader/rtsx_usb.c if ((card_clock <= 2) || (n > MAX_DIV_N)) n 438 drivers/misc/cardreader/rtsx_usb.c while (n < MIN_DIV_N && div < CLK_DIV_4) { n 439 drivers/misc/cardreader/rtsx_usb.c n = (n + 2) * 2 - 2; n 442 drivers/misc/cardreader/rtsx_usb.c dev_dbg(&ucr->pusb_intf->dev, "n = %d, div = %d\n", n, div); n 457 drivers/misc/cardreader/rtsx_usb.c rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); n 206 drivers/misc/cb710/core.c int n = 0; n 224 drivers/misc/cb710/core.c ++n; n 226 drivers/misc/cb710/core.c ++n; n 228 drivers/misc/cb710/core.c ++n; n 230 drivers/misc/cb710/core.c chip = devm_kzalloc(&pdev->dev, struct_size(chip, slot, n), n 219 drivers/misc/cxl/guest.c int i, n; n 224 drivers/misc/cxl/guest.c n = bitmap_find_next_zero_area(cur->bitmap, cur->range, n 226 drivers/misc/cxl/guest.c if (n < cur->range) { n 227 drivers/misc/cxl/guest.c bitmap_set(cur->bitmap, n, len); n 228 drivers/misc/cxl/guest.c *irq = cur->offset + n; n 240 drivers/misc/cxl/guest.c int i, n; n 250 drivers/misc/cxl/guest.c n = irq - cur->offset; n 251 drivers/misc/cxl/guest.c bitmap_clear(cur->bitmap, n, len); n 584 drivers/misc/cxl/pci.c #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) n 585 drivers/misc/cxl/pci.c #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) n 1132 drivers/misc/fastrpc.c struct fastrpc_invoke_ctx *ctx, *n; n 1145 drivers/misc/fastrpc.c list_for_each_entry_safe(ctx, n, &fl->pending, node) { n 855 drivers/misc/genwqe/card_ddcb.c pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats); n 856 drivers/misc/genwqe/card_ddcb.c memcpy(&pddcb->n.asiv[0], /* destination */ n 69 drivers/misc/genwqe/card_ddcb.h } n; n 135 drivers/misc/genwqe/card_ddcb.h #define ICRC_LENGTH(n) ((n) + 8 + 8 + 8) /* used ASIV + hdr fields */ n 136 drivers/misc/genwqe/card_ddcb.h #define VCRC_LENGTH(n) ((n)) /* used ASV */ n 270 drivers/misc/genwqe/card_sysfs.c struct attribute *attr, int n) n 31 drivers/misc/ibmasm/heartbeat.c static int panic_happened(struct notifier_block *n, unsigned long val, void *v) n 910 drivers/misc/ibmvmc.c ssize_t n; n 980 drivers/misc/ibmvmc.c n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes); n 985 drivers/misc/ibmvmc.c if (n) { n 180 drivers/misc/ics932s401.c int m, n, freq; n 183 drivers/misc/ics932s401.c n = data->regs[ICS932S401_REG_CPU_N_CTRL]; n 186 drivers/misc/ics932s401.c n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x80) << 1; n 187 drivers/misc/ics932s401.c n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x40) << 3; n 189 drivers/misc/ics932s401.c freq = BASE_CLOCK * (n + 8) / (m + 2); n 237 drivers/misc/ics932s401.c int m, n, freq; n 240 drivers/misc/ics932s401.c n = data->regs[ICS932S401_REG_SRC_N_CTRL]; n 243 drivers/misc/ics932s401.c n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; n 244 drivers/misc/ics932s401.c n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; n 246 drivers/misc/ics932s401.c freq = BASE_CLOCK * (n + 8) / (m + 2); n 285 drivers/misc/ics932s401.c int m, n, freq; n 288 drivers/misc/ics932s401.c n = data->regs[ICS932S401_REG_SRC_N_CTRL]; n 291 drivers/misc/ics932s401.c n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1; n 292 drivers/misc/ics932s401.c n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3; n 294 drivers/misc/ics932s401.c freq = BASE_CLOCK * (n + 8) / (m + 2); n 330 drivers/misc/lkdtm/core.c int i, n, out; n 336 drivers/misc/lkdtm/core.c n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n"); n 338 drivers/misc/lkdtm/core.c n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n", n 341 drivers/misc/lkdtm/core.c buf[n] = '\0'; n 344 drivers/misc/lkdtm/core.c buf, n); n 1068 drivers/misc/mei/bus.c struct mei_cl_device *cldev, *n; n 1078 drivers/misc/mei/bus.c list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) { n 129 drivers/misc/mei/dma-ring.c u32 offset, u32 n) n 134 drivers/misc/mei/dma-ring.c size_t b_n = n << 2; n 149 drivers/misc/mei/dma-ring.c u32 offset, u32 n) n 154 drivers/misc/mei/dma-ring.c size_t b_n = n << 2; n 247 drivers/misc/mic/vop/vop_main.c static void vop_del_vq(struct virtqueue *vq, int n) n 252 drivers/misc/mic/vop/vop_main.c dma_unmap_single(&vpdev->dev, vdev->used[n], n 253 drivers/misc/mic/vop/vop_main.c vdev->used_size[n], DMA_BIDIRECTIONAL); n 254 drivers/misc/mic/vop/vop_main.c free_pages((unsigned long)vdev->used_virt[n], n 255 drivers/misc/mic/vop/vop_main.c get_order(vdev->used_size[n])); n 257 drivers/misc/mic/vop/vop_main.c vpdev->hw_ops->unmap(vpdev, vdev->vr[n]); n 258 drivers/misc/mic/vop/vop_main.c vdev->vr[n] = NULL; n 264 drivers/misc/mic/vop/vop_main.c struct virtqueue *vq, *n; n 269 drivers/misc/mic/vop/vop_main.c list_for_each_entry_safe(vq, n, &dev->vqs, list) n 608 drivers/misc/pti.c size_t n = 0; n 621 drivers/misc/pti.c if (len - n > USER_COPY_SIZE) n 624 drivers/misc/pti.c size = len - n; n 628 drivers/misc/pti.c return n ? n : -EFAULT; n 632 drivers/misc/pti.c n += size; n 635 drivers/misc/pti.c } while (len > n); n 254 drivers/misc/sgi-gru/grufile.c int cbrs, dsrbytes, n; n 283 drivers/misc/sgi-gru/grufile.c n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; n 284 drivers/misc/sgi-gru/grufile.c cbrs = max(cbrs, n); n 285 drivers/misc/sgi-gru/grufile.c n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; n 286 drivers/misc/sgi-gru/grufile.c dsrbytes = max(dsrbytes, n); n 140 drivers/misc/sgi-gru/gruhandles.c int asid, int pagesize, int global, int n, n 146 drivers/misc/sgi-gru/gruhandles.c tgh->n = n; n 61 drivers/misc/sgi-gru/gruhandles.h #define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES) n 62 drivers/misc/sgi-gru/gruhandles.h #define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE) n 212 drivers/misc/sgi-gru/gruhandles.h unsigned int n:10; n 508 drivers/misc/sgi-gru/gruhandles.h unsigned long vaddrmask, int asid, int pagesize, int global, int n, n 154 drivers/misc/sgi-gru/grumain.c static unsigned long reserve_resources(unsigned long *p, int n, int mmax, n 160 drivers/misc/sgi-gru/grumain.c while (n--) { n 745 drivers/misc/sgi-gru/grumain.c #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) n 493 drivers/misc/sgi-gru/grutables.h #define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \ n 494 drivers/misc/sgi-gru/grutables.h * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE) n 49 drivers/misc/sgi-gru/grutlbpurge.c int n; n 51 drivers/misc/sgi-gru/grutlbpurge.c n = GRU_NUM_TGH - gru->gs_tgh_first_remote; n 52 drivers/misc/sgi-gru/grutlbpurge.c n = gru_random() % n; n 53 drivers/misc/sgi-gru/grutlbpurge.c n += gru->gs_tgh_first_remote; n 54 drivers/misc/sgi-gru/grutlbpurge.c return n; n 66 drivers/misc/sgi-gru/grutlbpurge.c int n; n 70 drivers/misc/sgi-gru/grutlbpurge.c n = get_on_blade_tgh(gru); n 72 drivers/misc/sgi-gru/grutlbpurge.c n = get_off_blade_tgh(gru); n 73 drivers/misc/sgi-gru/grutlbpurge.c tgh = get_tgh_by_index(gru, n); n 297 drivers/misc/sgi-gru/grutlbpurge.c int cpus, shift = 0, n; n 303 drivers/misc/sgi-gru/grutlbpurge.c n = 1 << fls(cpus - 1); n 311 drivers/misc/sgi-gru/grutlbpurge.c shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1)); n 23 drivers/misc/vexpress-syscfg.c #define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) n 24 drivers/misc/vexpress-syscfg.c #define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) n 25 drivers/misc/vexpress-syscfg.c #define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) n 26 drivers/misc/vexpress-syscfg.c #define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) n 27 drivers/misc/vexpress-syscfg.c #define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) n 601 drivers/misc/vmw_vmci/vmci_context.c struct vmci_handle_list *notifier, *n; n 633 drivers/misc/vmw_vmci/vmci_context.c list_for_each_entry(n, &context->notifier_list, node) { n 634 drivers/misc/vmw_vmci/vmci_context.c if (vmci_handle_is_equal(n->handle, notifier->handle)) { n 458 drivers/misc/xilinx_sdfec.c static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize, n 463 drivers/misc/xilinx_sdfec.c if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 || n 464 drivers/misc/xilinx_sdfec.c (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) { n 468 drivers/misc/xilinx_sdfec.c n <<= XSDFEC_REG0_N_LSB; n 476 drivers/misc/xilinx_sdfec.c wdata = k | n; n 606 drivers/misc/xilinx_sdfec.c u32 n, i; n 621 drivers/misc/xilinx_sdfec.c n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE; n 623 drivers/misc/xilinx_sdfec.c n += 1; n 625 drivers/misc/xilinx_sdfec.c res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page); n 626 drivers/misc/xilinx_sdfec.c if (res < n) { n 632 drivers/misc/xilinx_sdfec.c for (i = 0; i < n; i++) { n 650 drivers/misc/xilinx_sdfec.c int ret, n; n 678 drivers/misc/xilinx_sdfec.c ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize, n 704 drivers/misc/xilinx_sdfec.c n = ldpc->nlayers / 4; n 706 drivers/misc/xilinx_sdfec.c n++; n 708 drivers/misc/xilinx_sdfec.c ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n, n 2742 drivers/mmc/core/block.c ssize_t n = 0; n 2767 drivers/mmc/core/block.c n += sprintf(buf + n, "%02x", ext_csd[i]); n 2768 drivers/mmc/core/block.c n += sprintf(buf + n, "\n"); n 2770 drivers/mmc/core/block.c if (n != EXT_CSD_STR_LEN) { n 226 drivers/mmc/host/davinci_mmc.c unsigned int n) n 237 drivers/mmc/host/davinci_mmc.c if (n > host->buffer_bytes_left) n 238 drivers/mmc/host/davinci_mmc.c n = host->buffer_bytes_left; n 239 drivers/mmc/host/davinci_mmc.c host->buffer_bytes_left -= n; n 240 drivers/mmc/host/davinci_mmc.c host->bytes_left -= n; n 247 drivers/mmc/host/davinci_mmc.c for (i = 0; i < (n >> 2); i++) { n 251 drivers/mmc/host/davinci_mmc.c if (n & 3) { n 252 drivers/mmc/host/davinci_mmc.c iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); n 253 drivers/mmc/host/davinci_mmc.c p = p + (n & 3); n 256 drivers/mmc/host/davinci_mmc.c for (i = 0; i < (n >> 2); i++) { n 260 drivers/mmc/host/davinci_mmc.c if (n & 3) { n 261 drivers/mmc/host/davinci_mmc.c ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); n 262 drivers/mmc/host/davinci_mmc.c p = p + (n & 3); n 355 drivers/mmc/host/dw_mmc.h #define SDMMC_TMOUT_DATA(n) _SBF(8, (n)) n 357 drivers/mmc/host/dw_mmc.h #define SDMMC_TMOUT_RESP(n) ((n) & 0xFF) n 364 drivers/mmc/host/dw_mmc.h #define SDMMC_INT_SDIO(n) BIT(16 + (n)) n 400 drivers/mmc/host/dw_mmc.h #define SDMMC_CMD_INDX(n) ((n) & 0x1F) n 184 drivers/mmc/host/mmc_spi.c unsigned n, u8 byte) n 193 drivers/mmc/host/mmc_spi.c status = mmc_spi_readbytes(host, n); n 197 drivers/mmc/host/mmc_spi.c for (i = 0; i < n; i++) { n 666 drivers/mmc/host/omap.c int n, nwords; n 673 drivers/mmc/host/omap.c n = 64; n 674 drivers/mmc/host/omap.c if (n > host->buffer_bytes_left) n 675 drivers/mmc/host/omap.c n = host->buffer_bytes_left; n 678 drivers/mmc/host/omap.c nwords = DIV_ROUND_UP(n, 2); n 680 drivers/mmc/host/omap.c host->buffer_bytes_left -= n; n 681 drivers/mmc/host/omap.c host->total_bytes_left -= n; n 682 drivers/mmc/host/omap.c host->data->bytes_xfered += n; n 70 drivers/mmc/host/rtsx_pci_sdmmc.c int n = min(8, len - i); n 73 drivers/mmc/host/rtsx_pci_sdmmc.c for (j = 0; j < n; j++) n 77 drivers/mmc/host/rtsx_pci_sdmmc.c start + i, n, data); n 476 drivers/mmc/host/s3cmci.c u32 n = fifo & 3; n 480 drivers/mmc/host/s3cmci.c while (n--) { n 1362 drivers/mmc/host/vub300.c int n = 0; n 1364 drivers/mmc/host/vub300.c for (n = 0; n < sdio_funcs; n++) { n 1365 drivers/mmc/host/vub300.c struct sdio_func *sf = card->sdio_func[n]; n 1638 drivers/mtd/chips/cfi_cmdset_0001.c int n; n 1641 drivers/mtd/chips/cfi_cmdset_0001.c n = min_t(int, len, map_bankwidth(map)-gap); n 1643 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_load_partial(map, datum, buf, gap, n); n 1650 drivers/mtd/chips/cfi_cmdset_0001.c len -= n; n 1651 drivers/mtd/chips/cfi_cmdset_0001.c ofs += n; n 1652 drivers/mtd/chips/cfi_cmdset_0001.c buf += n; n 1653 drivers/mtd/chips/cfi_cmdset_0001.c (*retlen) += n; n 1792 drivers/mtd/chips/cfi_cmdset_0001.c int n = map_bankwidth(map) - word_gap; n 1793 drivers/mtd/chips/cfi_cmdset_0001.c if (n > vec->iov_len - vec_seek) n 1794 drivers/mtd/chips/cfi_cmdset_0001.c n = vec->iov_len - vec_seek; n 1795 drivers/mtd/chips/cfi_cmdset_0001.c if (n > len) n 1796 drivers/mtd/chips/cfi_cmdset_0001.c n = len; n 1803 drivers/mtd/chips/cfi_cmdset_0001.c word_gap, n); n 1805 drivers/mtd/chips/cfi_cmdset_0001.c len -= n; n 1806 drivers/mtd/chips/cfi_cmdset_0001.c word_gap += n; n 1813 drivers/mtd/chips/cfi_cmdset_0001.c vec_seek += n; n 2263 drivers/mtd/chips/cfi_cmdset_0001.c int n = min_t(int, size, map_bankwidth(map)-gap); n 2266 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_load_partial(map, datum, buf, gap, n); n 2271 drivers/mtd/chips/cfi_cmdset_0001.c offset += n; n 2272 drivers/mtd/chips/cfi_cmdset_0001.c buf += n; n 2273 drivers/mtd/chips/cfi_cmdset_0001.c size -= n; n 1398 drivers/mtd/chips/cfi_cmdset_0002.c int n = min_t(int, len, map_bankwidth(map) - gap); n 1401 drivers/mtd/chips/cfi_cmdset_0002.c if (n != map_bankwidth(map)) { n 1408 drivers/mtd/chips/cfi_cmdset_0002.c datum = map_word_load_partial(map, datum, buf, gap, n); n 1413 drivers/mtd/chips/cfi_cmdset_0002.c adr += n; n 1414 drivers/mtd/chips/cfi_cmdset_0002.c buf += n; n 1415 drivers/mtd/chips/cfi_cmdset_0002.c len -= n; n 1838 drivers/mtd/chips/cfi_cmdset_0002.c int n = 0; n 1861 drivers/mtd/chips/cfi_cmdset_0002.c n = min_t(int, len, map_bankwidth(map)-i); n 1863 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); n 1870 drivers/mtd/chips/cfi_cmdset_0002.c ofs += n; n 1871 drivers/mtd/chips/cfi_cmdset_0002.c buf += n; n 1872 drivers/mtd/chips/cfi_cmdset_0002.c (*retlen) += n; n 1873 drivers/mtd/chips/cfi_cmdset_0002.c len -= n; n 2329 drivers/mtd/chips/cfi_cmdset_0002.c int n = 0; n 2340 drivers/mtd/chips/cfi_cmdset_0002.c n = min_t(int, len, map_bankwidth(map) - i); n 2342 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); n 2349 drivers/mtd/chips/cfi_cmdset_0002.c ofs += n; n 2350 drivers/mtd/chips/cfi_cmdset_0002.c buf += n; n 2351 drivers/mtd/chips/cfi_cmdset_0002.c (*retlen) += n; n 2352 drivers/mtd/chips/cfi_cmdset_0002.c len -= n; n 354 drivers/mtd/devices/block2mtd.c size_t n; n 356 drivers/mtd/devices/block2mtd.c n = (size_t) ustrtoul(token, &endp, 0); n 360 drivers/mtd/devices/block2mtd.c *num = n; n 503 drivers/mtd/devices/lart.c int i,n; n 518 drivers/mtd/devices/lart.c i = n = 0; n 521 drivers/mtd/devices/lart.c while (len && i < BUSWIDTH) tmp[i++] = buf[n++], len--; n 526 drivers/mtd/devices/lart.c to += n; n 527 drivers/mtd/devices/lart.c buf += n; n 528 drivers/mtd/devices/lart.c *retlen += n; n 545 drivers/mtd/devices/lart.c i = n = 0; n 547 drivers/mtd/devices/lart.c while (len--) tmp[i++] = buf[n++]; n 552 drivers/mtd/devices/lart.c *retlen += n; n 105 drivers/mtd/devices/spear_smi.c #define FLASH_ID(n, es, id, psize, ssize, size) \ n 107 drivers/mtd/devices/spear_smi.c .name = n, \ n 581 drivers/mtd/ftl.c int n=0; n 584 drivers/mtd/ftl.c n=1; n 589 drivers/mtd/ftl.c n=1; n 594 drivers/mtd/ftl.c n=1; n 599 drivers/mtd/ftl.c n=1; n 605 drivers/mtd/ftl.c if (!n) n 309 drivers/mtd/inftlmount.c static int memcmpb(void *a, int c, int n) n 312 drivers/mtd/inftlmount.c for (i = 0; i < n; i++) { n 412 drivers/mtd/lpddr/lpddr_cmds.c int n = map_bankwidth(map) - word_gap; n 414 drivers/mtd/lpddr/lpddr_cmds.c if (n > vec->iov_len - vec_seek) n 415 drivers/mtd/lpddr/lpddr_cmds.c n = vec->iov_len - vec_seek; n 416 drivers/mtd/lpddr/lpddr_cmds.c if (n > len) n 417 drivers/mtd/lpddr/lpddr_cmds.c n = len; n 423 drivers/mtd/lpddr/lpddr_cmds.c vec->iov_base + vec_seek, word_gap, n); n 425 drivers/mtd/lpddr/lpddr_cmds.c len -= n; n 426 drivers/mtd/lpddr/lpddr_cmds.c word_gap += n; n 433 drivers/mtd/lpddr/lpddr_cmds.c vec_seek += n; n 431 drivers/mtd/mtdcore.c static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, n 436 drivers/mtd/mtdcore.c mtd = container_of(n, struct mtd_info, reboot_notifier); n 370 drivers/mtd/mtdswap.c struct mtdswap_oobdata n; n 376 drivers/mtd/mtdswap.c ops.oobbuf = (uint8_t *)&n; n 381 drivers/mtd/mtdswap.c n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN); n 382 drivers/mtd/mtdswap.c n.count = cpu_to_le32(eb->erase_count); n 386 drivers/mtd/mtdswap.c n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY); n 387 drivers/mtd/mtdswap.c ops.ooblen = sizeof(n.magic); n 105 drivers/mtd/nand/raw/atmel/pmecc.c #define ATMEL_PMECC_ECC(sector, n) \ n 106 drivers/mtd/nand/raw/atmel/pmecc.c ((((sector) + 1) * 0x40) + (n)) n 108 drivers/mtd/nand/raw/atmel/pmecc.c #define ATMEL_PMECC_REM(sector, n) \ n 109 drivers/mtd/nand/raw/atmel/pmecc.c ((((sector) + 1) * 0x40) + ((n) * 4) + 0x200) n 115 drivers/mtd/nand/raw/atmel/pmecc.c #define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16) n 388 drivers/mtd/nand/raw/cafe_nand.c int i, n; n 397 drivers/mtd/nand/raw/cafe_nand.c n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0, n 400 drivers/mtd/nand/raw/cafe_nand.c for (i = 0; i < n; i++) { n 407 drivers/mtd/nand/raw/cafe_nand.c n = -1374; n 411 drivers/mtd/nand/raw/cafe_nand.c n = -2048; n 434 drivers/mtd/nand/raw/cafe_nand.c if (n < 0) { n 441 drivers/mtd/nand/raw/cafe_nand.c dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n); n 442 drivers/mtd/nand/raw/cafe_nand.c mtd->ecc_stats.corrected += n; n 443 drivers/mtd/nand/raw/cafe_nand.c max_bitflips = max_t(unsigned int, max_bitflips, n); n 1500 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c int meta, n, page_size; n 1541 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c n = last - first + 1; n 1542 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c page_size = meta + (size + ecc_parity_size) * n; n 1545 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) | n 1563 drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c page, offs, len, col, first, n, page_size); n 80 drivers/mtd/nand/raw/lpc32xx_mlc.c #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24) n 81 drivers/mtd/nand/raw/lpc32xx_mlc.c #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19) n 82 drivers/mtd/nand/raw/lpc32xx_mlc.c #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16) n 83 drivers/mtd/nand/raw/lpc32xx_mlc.c #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12) n 84 drivers/mtd/nand/raw/lpc32xx_mlc.c #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8) n 85 drivers/mtd/nand/raw/lpc32xx_mlc.c #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4) n 86 drivers/mtd/nand/raw/lpc32xx_mlc.c #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0) n 88 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s) n 91 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_WDR(n) (((n) & 0xF) << 28) n 93 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24)) n 95 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20)) n 97 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16)) n 99 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_RDR(n) (((n) & 0xF) << 12) n 101 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8)) n 103 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4)) n 105 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0)) n 111 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCECC_TO_LINEPAR(n) (((n) >> 6) & 0x7FFF) n 112 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCECC_TO_COLPAR(n) ((n) & 0x3F) n 32 drivers/mtd/nand/raw/mpc5121_nfc.c #define NFC_MAIN_AREA(n) ((n) * 0x200) n 37 drivers/mtd/nand/raw/mpc5121_nfc.c #define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN)) n 930 drivers/mtd/nand/raw/mxc_nand.c int n = mtd->oobsize + mtd->writesize - col; n 932 drivers/mtd/nand/raw/mxc_nand.c n = min(n, len); n 934 drivers/mtd/nand/raw/mxc_nand.c memcpy(host->data_buf + col, buf, n); n 936 drivers/mtd/nand/raw/mxc_nand.c host->buf_start += n; n 949 drivers/mtd/nand/raw/mxc_nand.c int n = mtd->oobsize + mtd->writesize - col; n 951 drivers/mtd/nand/raw/mxc_nand.c n = min(n, len); n 953 drivers/mtd/nand/raw/mxc_nand.c memcpy(buf, host->data_buf + col, n); n 955 drivers/mtd/nand/raw/mxc_nand.c host->buf_start += n; n 951 drivers/mtd/nand/raw/nandsim.c struct list_head *pos, *n; n 952 drivers/mtd/nand/raw/nandsim.c list_for_each_safe(pos, n, &weak_blocks) { n 956 drivers/mtd/nand/raw/nandsim.c list_for_each_safe(pos, n, &weak_pages) { n 960 drivers/mtd/nand/raw/nandsim.c list_for_each_safe(pos, n, &grave_pages) { n 463 drivers/mtd/nand/raw/omap2.c unsigned n; n 471 drivers/mtd/nand/raw/omap2.c n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); n 472 drivers/mtd/nand/raw/omap2.c if (n == 0) { n 478 drivers/mtd/nand/raw/omap2.c tx = dmaengine_prep_slave_sg(info->dma, &sg, n, n 1341 drivers/mtd/nand/raw/qcom_nandc.c struct desc_info *desc, *n; n 1343 drivers/mtd/nand/raw/qcom_nandc.c list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { n 53 drivers/mtd/nand/raw/vf610_nfc.c #define NFC_MAIN_AREA(n) ((n) * 0x1000) n 251 drivers/mtd/nftlmount.c static int memcmpb(void *a, int c, int n) n 254 drivers/mtd/nftlmount.c for (i = 0; i < n; i++) { n 404 drivers/mtd/spi-nor/aspeed-smc.c int n; n 406 drivers/mtd/spi-nor/aspeed-smc.c for (n = 0; n < controller->info->nce; n++) { n 407 drivers/mtd/spi-nor/aspeed-smc.c chip = controller->chips[n]; n 51 drivers/mtd/spi-nor/intel-spi.c #define FDATA(n) (0x10 + ((n) * 4)) n 55 drivers/mtd/spi-nor/intel-spi.c #define FREG(n) (0x54 + ((n) * 4)) n 61 drivers/mtd/spi-nor/intel-spi.c #define PR(n) ((n) * 4) n 102 drivers/mtd/spi-nor/mtk-quadspi.c #define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n)) n 103 drivers/mtd/spi-nor/mtk-quadspi.c #define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n)) n 175 drivers/mtd/tests/nandbiterrs.c #define CBIT(v, n) ((v) & (1 << (n))) n 176 drivers/mtd/tests/nandbiterrs.c #define BCLR(v, n) ((v) = (v) & ~(1 << (n))) n 81 drivers/mtd/tests/readtest.c int i, j, n; n 86 drivers/mtd/tests/readtest.c n = mtd->erasesize; n 87 drivers/mtd/tests/readtest.c for (i = 0; i < n;) { n 91 drivers/mtd/tests/readtest.c for (j = 0; j < 32 && i < n; j++, i++) n 99 drivers/mtd/tests/readtest.c n = mtd->oobsize; n 101 drivers/mtd/tests/readtest.c for (oob = 0; oob < n;) { n 105 drivers/mtd/tests/readtest.c for (j = 0; j < 32 && oob < n; j++, oob++, i++) n 90 drivers/mtd/tests/speedtest.c int i, n = pgcnt / 2, err = 0; n 94 drivers/mtd/tests/speedtest.c for (i = 0; i < n; i++) { n 134 drivers/mtd/tests/speedtest.c int i, n = pgcnt / 2, err = 0; n 138 drivers/mtd/tests/speedtest.c for (i = 0; i < n; i++) { n 596 drivers/mtd/ubi/cdev.c int n, err = -EINVAL; n 626 drivers/mtd/ubi/cdev.c n = req->alignment & (ubi->min_io_size - 1); n 627 drivers/mtd/ubi/cdev.c if (req->alignment != 1 && n) n 638 drivers/mtd/ubi/cdev.c n = strnlen(req->name, req->name_len + 1); n 639 drivers/mtd/ubi/cdev.c if (n != req->name_len) n 682 drivers/mtd/ubi/cdev.c int i, n, err; n 702 drivers/mtd/ubi/cdev.c n = strlen(req->ents[i].name); n 703 drivers/mtd/ubi/cdev.c if (n != req->ents[i].name_len) n 709 drivers/mtd/ubi/cdev.c for (n = i + 1; n < req->count; n++) { n 710 drivers/mtd/ubi/cdev.c if (req->ents[i].vol_id == req->ents[n].vol_id) { n 715 drivers/mtd/ubi/cdev.c if (!strcmp(req->ents[i].name, req->ents[n].name)) { n 504 drivers/mtd/ubi/debug.c int err, n; n 513 drivers/mtd/ubi/debug.c n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, n 515 drivers/mtd/ubi/debug.c if (n == UBI_DFS_DIR_LEN) { n 522 drivers/mtd/ubi/fastmap.c int n = 0; n 525 drivers/mtd/ubi/fastmap.c n++; n 528 drivers/mtd/ubi/fastmap.c n++; n 532 drivers/mtd/ubi/fastmap.c n++; n 534 drivers/mtd/ubi/fastmap.c return n; n 640 drivers/mtd/ubi/vmt.c long long n; n 666 drivers/mtd/ubi/vmt.c n = vol->alignment & (ubi->min_io_size - 1); n 667 drivers/mtd/ubi/vmt.c if (vol->alignment != 1 && n) { n 672 drivers/mtd/ubi/vmt.c n = ubi->leb_size % vol->alignment; n 673 drivers/mtd/ubi/vmt.c if (vol->data_pad != n) { n 674 drivers/mtd/ubi/vmt.c ubi_err(ubi, "bad data_pad, has to be %lld", n); n 694 drivers/mtd/ubi/vmt.c n = ubi->leb_size - vol->data_pad; n 696 drivers/mtd/ubi/vmt.c ubi_err(ubi, "bad usable_leb_size, has to be %lld", n); n 706 drivers/mtd/ubi/vmt.c n = strnlen(vol->name, vol->name_len + 1); n 707 drivers/mtd/ubi/vmt.c if (n != vol->name_len) { n 708 drivers/mtd/ubi/vmt.c ubi_err(ubi, "bad name_len %lld", n); n 712 drivers/mtd/ubi/vmt.c n = (long long)vol->used_ebs * vol->usable_leb_size; n 726 drivers/mtd/ubi/vmt.c if (vol->used_bytes != n) { n 745 drivers/mtd/ubi/vmt.c if (vol->used_bytes < 0 || vol->used_bytes > n || n 746 drivers/mtd/ubi/vmt.c vol->used_bytes < n - vol->usable_leb_size) { n 160 drivers/mtd/ubi/vtbl.c int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; n 204 drivers/mtd/ubi/vtbl.c n = alignment & (ubi->min_io_size - 1); n 205 drivers/mtd/ubi/vtbl.c if (alignment != 1 && n) { n 210 drivers/mtd/ubi/vtbl.c n = ubi->leb_size % alignment; n 211 drivers/mtd/ubi/vtbl.c if (data_pad != n) { n 212 drivers/mtd/ubi/vtbl.c ubi_err(ubi, "bad data_pad, has to be %d", n); n 252 drivers/mtd/ubi/vtbl.c for (n = i + 1; n < ubi->vtbl_slots; n++) { n 254 drivers/mtd/ubi/vtbl.c int len2 = be16_to_cpu(vtbl[n].name_len); n 257 drivers/mtd/ubi/vtbl.c !strncmp(vtbl[i].name, vtbl[n].name, len1)) { n 259 drivers/mtd/ubi/vtbl.c i, n, vtbl[i].name); n 261 drivers/mtd/ubi/vtbl.c ubi_dump_vtbl_record(&vtbl[n], n); n 317 drivers/net/appletalk/ltpc.c int n; n 319 drivers/net/appletalk/ltpc.c n = qel->cbuflen; n 320 drivers/net/appletalk/ltpc.c if (n>100) n=100; n 321 drivers/net/appletalk/ltpc.c for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]); n 577 drivers/net/appletalk/ltpc.c int n; n 579 drivers/net/appletalk/ltpc.c n = q->cbuflen; n 580 drivers/net/appletalk/ltpc.c if (n>100) n=100; n 581 drivers/net/appletalk/ltpc.c for(i=0;i<n;i++) n 3655 drivers/net/bonding/bond_main.c static int bond_neigh_init(struct neighbour *n) n 3657 drivers/net/bonding/bond_main.c struct bonding *bond = netdev_priv(n->dev); n 3686 drivers/net/bonding/bond_main.c ret = parms.neigh_setup(n); n 1450 drivers/net/caif/caif_hsi.c struct list_head *n; n 1456 drivers/net/caif/caif_hsi.c list_for_each_safe(list_node, n, &cfhsi_list) { n 774 drivers/net/caif/caif_spi.c struct list_head *n; n 777 drivers/net/caif/caif_spi.c list_for_each_safe(list_node, n, &cfspi_list) { n 820 drivers/net/can/c_can/c_can.c int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl); n 822 drivers/net/can/c_can/c_can.c pkts += n; n 823 drivers/net/can/c_can/c_can.c quota -= n; n 870 drivers/net/can/c_can/c_can.c u32 pkts = 0, pend = 0, toread, n; n 895 drivers/net/can/c_can/c_can.c n = c_can_read_objects(dev, priv, toread, quota); n 896 drivers/net/can/c_can/c_can.c pkts += n; n 897 drivers/net/can/c_can/c_can.c quota -= n; n 605 drivers/net/can/cc770/cc770.c int n = CC770_MAX_MSG; n 607 drivers/net/can/cc770/cc770.c while (n--) { n 645 drivers/net/can/cc770/cc770.c int n = CC770_MAX_MSG; n 647 drivers/net/can/cc770/cc770.c while (n--) { n 717 drivers/net/can/cc770/cc770.c int o, n = 0; n 726 drivers/net/can/cc770/cc770.c while (n < CC770_MAX_IRQ) { n 731 drivers/net/can/cc770/cc770.c n++; n 758 drivers/net/can/cc770/cc770.c if (n >= CC770_MAX_IRQ) n 759 drivers/net/can/cc770/cc770.c netdev_dbg(dev, "%d messages handled in ISR", n); n 761 drivers/net/can/cc770/cc770.c return (n) ? IRQ_HANDLED : IRQ_NONE; n 789 drivers/net/can/flexcan.c u32 *timestamp, unsigned int n) n 797 drivers/net/can/flexcan.c mb = flexcan_get_mb(priv, n); n 846 drivers/net/can/flexcan.c if (n < 32) n 847 drivers/net/can/flexcan.c priv->write(BIT(n), ®s->iflag1); n 849 drivers/net/can/flexcan.c priv->write(BIT(n - 32), ®s->iflag2); n 208 drivers/net/can/ifi_canfd/ifi_canfd.c #define IFI_CANFD_FILTER_MASK(n) (0x800 + ((n) * 8) + 0) n 213 drivers/net/can/ifi_canfd/ifi_canfd.c #define IFI_CANFD_FILTER_IDENT(n) (0x800 + ((n) * 8) + 4) n 294 drivers/net/can/m_can/m_can.c #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) n 131 drivers/net/can/mscan/mscan.h #define _MSCAN_RESERVED_(n, num) u8 _res##n[num] n 134 drivers/net/can/mscan/mscan.h #define _MSCAN_RESERVED_(n, num) n 91 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8)) n 92 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff) n 132 drivers/net/can/rx-offload.c can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) n 156 drivers/net/can/rx-offload.c ×tamp, n); n 180 drivers/net/can/rx-offload.c ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); n 505 drivers/net/can/sja1000/sja1000.c int n = 0; n 515 drivers/net/can/sja1000/sja1000.c (n < SJA1000_MAX_IRQ)) { n 556 drivers/net/can/sja1000/sja1000.c n++; n 562 drivers/net/can/sja1000/sja1000.c if (n >= SJA1000_MAX_IRQ) n 563 drivers/net/can/sja1000/sja1000.c netdev_dbg(dev, "%d messages handled in ISR", n); n 565 drivers/net/can/sja1000/sja1000.c return (n) ? IRQ_HANDLED : IRQ_NONE; n 47 drivers/net/can/spi/mcp251x.c #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n)) n 48 drivers/net/can/spi/mcp251x.c #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94) n 53 drivers/net/can/spi/mcp251x.c #define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) n 108 drivers/net/can/spi/mcp251x.c #define TXBCTRL(n) (((n) * 0x10) + 0x30 + TXBCTRL_OFF) n 113 drivers/net/can/spi/mcp251x.c #define TXBSIDH(n) (((n) * 0x10) + 0x30 + TXBSIDH_OFF) n 115 drivers/net/can/spi/mcp251x.c #define TXBSIDL(n) (((n) * 0x10) + 0x30 + TXBSIDL_OFF) n 121 drivers/net/can/spi/mcp251x.c #define TXBEID8(n) (((n) * 0x10) + 0x30 + TXBEID8_OFF) n 122 drivers/net/can/spi/mcp251x.c #define TXBEID0(n) (((n) * 0x10) + 0x30 + TXBEID0_OFF) n 123 drivers/net/can/spi/mcp251x.c #define TXBDLC(n) (((n) * 0x10) + 0x30 + TXBDLC_OFF) n 132 drivers/net/can/spi/mcp251x.c #define RXBCTRL(n) (((n) * 0x10) + 0x60 + RXBCTRL_OFF) n 136 drivers/net/can/spi/mcp251x.c #define RXBSIDH(n) (((n) * 0x10) + 0x60 + RXBSIDH_OFF) n 138 drivers/net/can/spi/mcp251x.c #define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF) n 143 drivers/net/can/spi/mcp251x.c #define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF) n 144 drivers/net/can/spi/mcp251x.c #define RXBEID0(n) (((n) * 0x10) + 0x60 + RXBEID0_OFF) n 145 drivers/net/can/spi/mcp251x.c #define RXBDLC(n) (((n) * 0x10) + 0x60 + RXBDLC_OFF) n 155 drivers/net/can/spi/mcp251x.c #define RXFSID(n) ((n < 3) ? 0 : 4) n 156 drivers/net/can/spi/mcp251x.c #define RXFSIDH(n) ((n) * 4 + RXFSID(n)) n 157 drivers/net/can/spi/mcp251x.c #define RXFSIDL(n) ((n) * 4 + 1 + RXFSID(n)) n 158 drivers/net/can/spi/mcp251x.c #define RXFEID8(n) ((n) * 4 + 2 + RXFSID(n)) n 159 drivers/net/can/spi/mcp251x.c #define RXFEID0(n) ((n) * 4 + 3 + RXFSID(n)) n 160 drivers/net/can/spi/mcp251x.c #define RXMSIDH(n) ((n) * 4 + 0x20) n 161 drivers/net/can/spi/mcp251x.c #define RXMSIDL(n) ((n) * 4 + 0x21) n 162 drivers/net/can/spi/mcp251x.c #define RXMEID8(n) ((n) * 4 + 0x22) n 163 drivers/net/can/spi/mcp251x.c #define RXMEID0(n) ((n) * 4 + 0x23) n 643 drivers/net/can/sun4i_can.c int n = 0; n 646 drivers/net/can/sun4i_can.c (n < SUN4I_CAN_MAX_IRQ)) { n 647 drivers/net/can/sun4i_can.c n++; n 683 drivers/net/can/sun4i_can.c if (n >= SUN4I_CAN_MAX_IRQ) n 684 drivers/net/can/sun4i_can.c netdev_dbg(dev, "%d messages handled in ISR", n); n 686 drivers/net/can/sun4i_can.c return (n) ? IRQ_HANDLED : IRQ_NONE; n 111 drivers/net/can/usb/peak_usb/pcan_usb.c static int pcan_usb_send_cmd(struct peak_usb_device *dev, u8 f, u8 n, u8 *p) n 121 drivers/net/can/usb/peak_usb/pcan_usb.c dev->cmd_buf[PCAN_USB_CMD_NUM] = n; n 134 drivers/net/can/usb/peak_usb/pcan_usb.c f, n, err); n 141 drivers/net/can/usb/peak_usb/pcan_usb.c static int pcan_usb_wait_rsp(struct peak_usb_device *dev, u8 f, u8 n, u8 *p) n 151 drivers/net/can/usb/peak_usb/pcan_usb.c err = pcan_usb_send_cmd(dev, f, n, NULL); n 161 drivers/net/can/usb/peak_usb/pcan_usb.c "waiting rsp f=0x%x n=0x%x failure: %d\n", f, n, err); n 398 drivers/net/can/usb/peak_usb/pcan_usb.c static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, n 406 drivers/net/can/usb/peak_usb/pcan_usb.c if (n == PCAN_USB_ERROR_QOVR) n 414 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & PCAN_USB_ERROR_BUS_LIGHT) { n 421 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & PCAN_USB_ERROR_BUS_HEAVY) { n 425 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & PCAN_USB_ERROR_BUS_OFF) { n 429 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) { n 437 drivers/net/can/usb/peak_usb/pcan_usb.c if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { n 445 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & PCAN_USB_ERROR_BUS_OFF) { n 449 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & PCAN_USB_ERROR_BUS_LIGHT) { n 453 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) { n 462 drivers/net/can/usb/peak_usb/pcan_usb.c if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { n 543 drivers/net/can/usb/peak_usb/pcan_usb.c u8 f, n; n 551 drivers/net/can/usb/peak_usb/pcan_usb.c n = mc->ptr[PCAN_USB_CMD_NUM]; n 568 drivers/net/can/usb/peak_usb/pcan_usb.c err = pcan_usb_decode_error(mc, n, status_len); n 591 drivers/net/can/usb/peak_usb/pcan_usb.c if (n & PCAN_USB_ERROR_TXQFULL) n 316 drivers/net/can/usb/peak_usb/pcan_usb_fd.c int i, n; n 320 drivers/net/can/usb/peak_usb/pcan_usb_fd.c n = 1 << PUCAN_FLTSTD_ROW_IDX_BITS; n 325 drivers/net/can/usb/peak_usb/pcan_usb_fd.c n = idx + 1; n 328 drivers/net/can/usb/peak_usb/pcan_usb_fd.c for (i = idx; i < n; i++, cmd++) { n 74 drivers/net/can/xilinx_can.c #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ n 75 drivers/net/can/xilinx_can.c XCAN_CANFD_FRAME_SIZE * (n)) n 76 drivers/net/can/xilinx_can.c #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ n 77 drivers/net/can/xilinx_can.c XCAN_CANFD_FRAME_SIZE * (n)) n 78 drivers/net/can/xilinx_can.c #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ n 79 drivers/net/can/xilinx_can.c XCAN_CANFD_FRAME_SIZE * (n)) n 308 drivers/net/dsa/b53/b53_regs.h #define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10) n 320 drivers/net/dsa/b53/b53_regs.h #define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18) n 1159 drivers/net/dsa/bcm_sf2_cfp.c struct cfp_rule *rule, *n; n 1164 drivers/net/dsa/bcm_sf2_cfp.c list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next) n 129 drivers/net/dsa/mv88e6xxx/chip.c unsigned int n = d->hwirq; n 131 drivers/net/dsa/mv88e6xxx/chip.c chip->g1_irq.masked |= (1 << n); n 137 drivers/net/dsa/mv88e6xxx/chip.c unsigned int n = d->hwirq; n 139 drivers/net/dsa/mv88e6xxx/chip.c chip->g1_irq.masked &= ~(1 << n); n 146 drivers/net/dsa/mv88e6xxx/chip.c unsigned int n; n 159 drivers/net/dsa/mv88e6xxx/chip.c for (n = 0; n < chip->g1_irq.nirqs; ++n) { n 160 drivers/net/dsa/mv88e6xxx/chip.c if (reg & (1 << n)) { n 162 drivers/net/dsa/mv88e6xxx/chip.c n); n 981 drivers/net/dsa/mv88e6xxx/global2.c unsigned int n = d->hwirq; n 983 drivers/net/dsa/mv88e6xxx/global2.c chip->g2_irq.masked |= (1 << n); n 989 drivers/net/dsa/mv88e6xxx/global2.c unsigned int n = d->hwirq; n 991 drivers/net/dsa/mv88e6xxx/global2.c chip->g2_irq.masked &= ~(1 << n); n 999 drivers/net/dsa/mv88e6xxx/global2.c unsigned int n; n 1009 drivers/net/dsa/mv88e6xxx/global2.c for (n = 0; n < 16; ++n) { n 1010 drivers/net/dsa/mv88e6xxx/global2.c if (reg & (1 << n)) { n 1011 drivers/net/dsa/mv88e6xxx/global2.c sub_irq = irq_find_mapping(chip->g2_irq.domain, n); n 351 drivers/net/ethernet/3com/3c509.c static int el3_isa_suspend(struct device *dev, unsigned int n, n 358 drivers/net/ethernet/3com/3c509.c static int el3_isa_resume(struct device *dev, unsigned int n) n 106 drivers/net/ethernet/3com/3c574_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 153 drivers/net/ethernet/3com/3c589_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 1316 drivers/net/ethernet/3com/3c59x.c unsigned short n; n 1331 drivers/net/ethernet/3com/3c59x.c n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; n 1333 drivers/net/ethernet/3com/3c59x.c n |= 0x10; n 1335 drivers/net/ethernet/3com/3c59x.c n |= 0x4000; n 1336 drivers/net/ethernet/3com/3c59x.c window_write16(vp, n, 2, Wn2_ResetOptions); n 1639 drivers/net/ethernet/3com/3c59x.c unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; n 1641 drivers/net/ethernet/3com/3c59x.c n |= 0x10; n 1643 drivers/net/ethernet/3com/3c59x.c n |= 0x4000; n 1644 drivers/net/ethernet/3com/3c59x.c window_write16(vp, n, 2, Wn2_ResetOptions); n 78 drivers/net/ethernet/8390/pcnet_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 796 drivers/net/ethernet/agere/et131x.c static inline void add_10bit(u32 *v, int n) n 798 drivers/net/ethernet/agere/et131x.c *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); n 801 drivers/net/ethernet/agere/et131x.c static inline void add_12bit(u32 *v, int n) n 803 drivers/net/ethernet/agere/et131x.c *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); n 1091 drivers/net/ethernet/altera/altera_tse_main.c int n; n 1133 drivers/net/ethernet/altera/altera_tse_main.c for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) { n 125 drivers/net/ethernet/amazon/ena/ena_netdev.h #define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \ n 126 drivers/net/ethernet/amazon/ena/ena_netdev.h (((idx) + (n)) & ((ring_size) - 1)) n 589 drivers/net/ethernet/amd/am79c961a.c u_int status, n = 100; n 614 drivers/net/ethernet/amd/am79c961a.c } while (--n && status & (CSR0_RINT | CSR0_TINT)); n 87 drivers/net/ethernet/amd/atarilance.c #define DPRINTK(n,a) \ n 89 drivers/net/ethernet/amd/atarilance.c if (lance_debug >= n) \ n 391 drivers/net/ethernet/amd/nmclan_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 79 drivers/net/ethernet/amd/sun3lance.c #define DPRINTK(n,a) \ n 81 drivers/net/ethernet/amd/sun3lance.c if (lance_debug >= n) \ n 13 drivers/net/ethernet/apm/xgene-v2/ethtool.c #define XGE_EXTD_STAT(m, n) \ n 16 drivers/net/ethernet/apm/xgene-v2/ethtool.c n, \ n 824 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); n 826 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c if (n < self->aq_link_status.mbps) { n 632 drivers/net/ethernet/atheros/ag71xx.c int sent = 0, bytes_compl = 0, n = 0; n 642 drivers/net/ethernet/atheros/ag71xx.c while (ring->dirty + n != ring->curr) { n 647 drivers/net/ethernet/atheros/ag71xx.c i = (ring->dirty + n) & ring_mask; n 664 drivers/net/ethernet/atheros/ag71xx.c n++; n 674 drivers/net/ethernet/atheros/ag71xx.c ring->dirty += n; n 676 drivers/net/ethernet/atheros/ag71xx.c while (n > 0) { n 678 drivers/net/ethernet/atheros/ag71xx.c n--; n 1330 drivers/net/ethernet/atheros/ag71xx.c int i, n, ring_min, ring_mask, ring_size; n 1352 drivers/net/ethernet/atheros/ag71xx.c n = ag71xx_fill_dma_desc(ring, (u32)dma_addr, n 1354 drivers/net/ethernet/atheros/ag71xx.c if (n < 0) n 1357 drivers/net/ethernet/atheros/ag71xx.c i = (ring->curr + n - 1) & ring_mask; n 1366 drivers/net/ethernet/atheros/ag71xx.c ring->curr += n; n 852 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c u32 i, j, k, n; n 876 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c for (n = 0; n < size; n++) { n 877 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c addr = read_addr[k].addr + n*4; n 2516 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); n 2533 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); n 6598 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c int n, rc; n 6606 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c n = 10; n 6611 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c while ((zbuf[n++] != 0) && (n < len)); n 6613 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; n 6614 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c bp->strm->avail_in = len - n; n 13462 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) n 13468 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (i = 0; i < n/4; i++) n 13476 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) n 13482 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (i = 0, j = 0; i < n/8; i++, j += 2) { n 13493 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) n 13499 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { n 13513 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) n 13519 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for (i = 0; i < n/2; i++) n 607 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c int n, u8 *base, u8 stride, u8 size) n 621 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c if (counter < n) { n 3965 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c int n) n 3967 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c o->registry.exact_match.num_macs_set = n; n 3971 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c int n) n 3973 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c o->registry.aprox_match.num_bins_set = n; n 329 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h struct bnx2x_vlan_mac_obj *o, int n, u8 *base, n 653 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); n 1071 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c int i, n; n 1075 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { n 1080 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c vf->bars[n].bar = start + size * vf->abs_vfid; n 1081 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c vf->bars[n].size = size; n 4101 drivers/net/ethernet/broadcom/bnxt/bnxt.c static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) n 4103 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_napi *bnapi = bp->bnapi[n]; n 4751 drivers/net/ethernet/broadcom/bnxt/bnxt.c u32 nsegs, n, segs = 0, flags; n 4772 drivers/net/ethernet/broadcom/bnxt/bnxt.c n = BNXT_RX_PAGE_SIZE / mss; n 4773 drivers/net/ethernet/broadcom/bnxt/bnxt.c nsegs = (MAX_SKB_FRAGS - 1) * n; n 4775 drivers/net/ethernet/broadcom/bnxt/bnxt.c n = mss / BNXT_RX_PAGE_SIZE; n 4777 drivers/net/ethernet/broadcom/bnxt/bnxt.c n++; n 4778 drivers/net/ethernet/broadcom/bnxt/bnxt.c nsegs = (MAX_SKB_FRAGS - n) / n; n 9242 drivers/net/ethernet/broadcom/bnxt/bnxt.c int n = pf->active_vfs; n 9244 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (n) n 9245 drivers/net/ethernet/broadcom/bnxt/bnxt.c bnxt_cfg_hw_sriov(bp, &n, true); n 10162 drivers/net/ethernet/broadcom/bnxt/bnxt.c int n = 0, tmo; n 10167 drivers/net/ethernet/broadcom/bnxt/bnxt.c n = bnxt_get_registered_vfs(bp); n 10168 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (n < 0) { n 10170 drivers/net/ethernet/broadcom/bnxt/bnxt.c n); n 10174 drivers/net/ethernet/broadcom/bnxt/bnxt.c } else if (n > 0) { n 10175 drivers/net/ethernet/broadcom/bnxt/bnxt.c u16 vf_tmo_dsecs = n * 10; n 10659 drivers/net/ethernet/broadcom/bnxt/bnxt.c int n = bnxt_get_registered_vfs(bp); n 10662 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (n < 0) { n 10664 drivers/net/ethernet/broadcom/bnxt/bnxt.c n, jiffies_to_msecs(jiffies - n 10667 drivers/net/ethernet/broadcom/bnxt/bnxt.c } else if (n > 0) { n 10673 drivers/net/ethernet/broadcom/bnxt/bnxt.c n); n 640 drivers/net/ethernet/broadcom/bnxt/bnxt.h #define ADV_RAW_CMP(idx, n) ((idx) + (n)) n 667 drivers/net/ethernet/broadcom/bnxt/bnxt.h #define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ n 668 drivers/net/ethernet/broadcom/bnxt/bnxt.h ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ n 670 drivers/net/ethernet/broadcom/bnxt/bnxt.h ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) n 312 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c int rc, n, i; n 317 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c n = IEEE_8021QAZ_MAX_TCS; n 318 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c data_len = sizeof(*data) + sizeof(*fw_app) * n; n 340 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c n = data->count; n 341 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c for (i = 0; i < n; i++, fw_app++) { n 353 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c n++; n 362 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c if (n == i) n 365 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c len = (n - 1 - i) * sizeof(*fw_app); n 368 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c n--; n 369 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c memset(fw_app + n, 0, sizeof(*fw_app)); n 371 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c data->count = n; n 372 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c data->len = cpu_to_le16(sizeof(*fw_app) * n); n 377 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); n 192 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ n 193 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ n 194 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) n 196 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ n 197 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ n 198 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) n 220 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ n 221 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ n 222 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) n 224 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ n 225 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ n 226 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) n 248 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ n 249 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ n 250 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) n 262 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ n 264 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c __stringify(counter##_pri##n) } n 266 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ n 268 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c __stringify(counter##_pri##n) } n 591 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c long n = bnxt_rx_bytes_pri_arr[i].base_off + n 594 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); n 597 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c long n = bnxt_rx_pkts_pri_arr[i].base_off + n 600 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); n 603 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c long n = bnxt_tx_bytes_pri_arr[i].base_off + n 606 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); n 609 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c long n = bnxt_tx_pkts_pri_arr[i].base_off + n 612 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); n 588 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c u16 n = pf->active_vfs; n 590 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; n 591 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; n 593 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c n; n 594 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; n 596 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; n 597 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; n 599 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_irqs -= vf_msix * n; n 14 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h #define BNXT_FWD_RESP_SIZE_ERR(n) \ n 15 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \ n 18 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h #define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \ n 19 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\ n 22 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h #define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \ n 23 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\ n 1244 drivers/net/ethernet/broadcom/cnic.c int i, j, n, ret, pages; n 1283 drivers/net/ethernet/broadcom/cnic.c n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; n 1285 drivers/net/ethernet/broadcom/cnic.c long off = CNIC_KWQ16_DATA_SIZE * (i % n); n 1291 drivers/net/ethernet/broadcom/cnic.c if ((i % n) == (n - 1)) n 1701 drivers/net/ethernet/broadcom/cnic.c int i, j, n = 2, n_max; n 1714 drivers/net/ethernet/broadcom/cnic.c req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; n 1809 drivers/net/ethernet/broadcom/cnic.c if (n >= n_max) n 1811 drivers/net/ethernet/broadcom/cnic.c req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; n 4643 drivers/net/ethernet/broadcom/cnic.c int n = (i % cp->l2_rx_ring_size) + 1; n 4645 drivers/net/ethernet/broadcom/cnic.c buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); n 5009 drivers/net/ethernet/broadcom/cnic.c int n = (i % cp->l2_rx_ring_size) + 1; n 5011 drivers/net/ethernet/broadcom/cnic.c buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); n 91 drivers/net/ethernet/broadcom/sb1250-mac.c #define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) n 93 drivers/net/ethernet/broadcom/sb1250-mac.c #define UNIT_INT(n) (K_INT_MAC_0 + (n)) n 1675 drivers/net/ethernet/brocade/bna/bfa_ioc.c u32 n = FLASH_BLOCKING_OP_MAX; n 1678 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (--n <= 0) n 1695 drivers/net/ethernet/brocade/bna/bfa_ioc.c u32 n; n 1708 drivers/net/ethernet/brocade/bna/bfa_ioc.c n = s / fifo_sz; n 1709 drivers/net/ethernet/brocade/bna/bfa_ioc.c l = (n + 1) * fifo_sz - s; n 1720 drivers/net/ethernet/brocade/bna/bfa_ioc.c n = BFA_FLASH_BLOCKING_OP_MAX; n 1722 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (--n <= 0) { n 25 drivers/net/ethernet/brocade/bna/cna_fwimg.c u32 n; n 39 drivers/net/ethernet/brocade/bna/cna_fwimg.c for (n = 0; n < *bfi_image_size; n++) n 40 drivers/net/ethernet/brocade/bna/cna_fwimg.c le32_to_cpus(*bfi_image + n); n 34 drivers/net/ethernet/calxeda/xgmac.c #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ n 396 drivers/net/ethernet/calxeda/xgmac.c #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) n 1087 drivers/net/ethernet/cavium/liquidio/lio_ethtool.c struct napi_struct *napi, *n; n 1108 drivers/net/ethernet/cavium/liquidio/lio_ethtool.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 1248 drivers/net/ethernet/cavium/liquidio/lio_main.c struct napi_struct *napi, *n; n 1265 drivers/net/ethernet/cavium/liquidio/lio_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 1275 drivers/net/ethernet/cavium/liquidio/lio_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 1818 drivers/net/ethernet/cavium/liquidio/lio_main.c struct napi_struct *napi, *n; n 1823 drivers/net/ethernet/cavium/liquidio/lio_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 1878 drivers/net/ethernet/cavium/liquidio/lio_main.c struct napi_struct *napi, *n; n 1917 drivers/net/ethernet/cavium/liquidio/lio_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 661 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c struct napi_struct *napi, *n; n 678 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 687 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 911 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c struct napi_struct *napi, *n; n 916 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 955 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c struct napi_struct *napi, *n; n 978 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) n 1270 drivers/net/ethernet/cavium/thunder/nicvf_queues.c long n; /* size of the current piece of payload */ n 1287 drivers/net/ethernet/cavium/thunder/nicvf_queues.c n = p_len - p_used; n 1288 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (n > f_size - f_used) n 1289 drivers/net/ethernet/cavium/thunder/nicvf_queues.c n = f_size - f_used; n 1290 drivers/net/ethernet/cavium/thunder/nicvf_queues.c f_used += n; n 1291 drivers/net/ethernet/cavium/thunder/nicvf_queues.c p_used += n; n 617 drivers/net/ethernet/chelsio/cxgb/sge.c static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) n 623 drivers/net/ethernet/chelsio/cxgb/sge.c q->in_use -= n; n 625 drivers/net/ethernet/chelsio/cxgb/sge.c while (n--) { n 114 drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h #define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */ n 645 drivers/net/ethernet/chelsio/cxgb3/common.h int n, unsigned int offset); n 701 drivers/net/ethernet/chelsio/cxgb3/common.h unsigned int n, unsigned int *valp); n 702 drivers/net/ethernet/chelsio/cxgb3/common.h int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, n 714 drivers/net/ethernet/chelsio/cxgb3/common.h int t3_mac_set_num_ucast(struct cmac *mac, int n); n 384 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1; n 386 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c snprintf(adap->msix_info[0].desc, n, "%s", adap->name); n 387 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c adap->msix_info[0].desc[n] = 0; n 394 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c snprintf(adap->msix_info[msi_idx].desc, n, n 396 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c adap->msix_info[msi_idx].desc[n] = 0; n 430 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c int i, n = 0; n 434 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c n += adap2pinfo(adapter, i)->nqsets; n 436 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c for (i = 0; i < n; ++i) n 444 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c unsigned long n) n 448 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { n 481 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c int n) n 483 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c while (n--) n 484 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c dev_kfree_skb_any(skbs[n]); n 1019 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) n 1021 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c while (n--) { n 74 drivers/net/ethernet/chelsio/cxgb3/l2t.c static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) n 76 drivers/net/ethernet/chelsio/cxgb3/l2t.c neigh_hold(n); n 79 drivers/net/ethernet/chelsio/cxgb3/l2t.c e->neigh = n; n 286 drivers/net/ethernet/chelsio/cxgb3/sge.c unsigned int n) n 296 drivers/net/ethernet/chelsio/cxgb3/sge.c while (n--) { n 505 drivers/net/ethernet/chelsio/cxgb3/sge.c static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) n 511 drivers/net/ethernet/chelsio/cxgb3/sge.c while (n--) { n 749 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline unsigned int sgl_len(unsigned int n) n 752 drivers/net/ethernet/chelsio/cxgb3/sge.c return (3 * n) / 2 + (n & 1); n 762 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline unsigned int flits_to_desc(unsigned int n) n 764 drivers/net/ethernet/chelsio/cxgb3/sge.c BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); n 765 drivers/net/ethernet/chelsio/cxgb3/sge.c return flit_desc_map[n]; n 1876 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sk_buff *skbs[], int n) n 1878 drivers/net/ethernet/chelsio/cxgb3/sge.c if (n) { n 1880 drivers/net/ethernet/chelsio/cxgb3/sge.c tdev->recv(tdev, skbs, n); n 2463 drivers/net/ethernet/chelsio/cxgb3/sge.c __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); n 2465 drivers/net/ethernet/chelsio/cxgb3/sge.c return (n | r->len_cq) == 0; n 85 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c int n, unsigned int offset) n 87 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c while (n--) { n 145 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, n 154 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c if (start >= size64 || start + n > size64) n 158 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c while (n--) { n 914 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c unsigned int n, const u8 *data) n 920 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c if (addr + n > SF_SIZE || offset + n > 256) n 929 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c for (left = n; left; left -= c) { n 946 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c if (memcmp(data - n, (u8 *) buf + offset, n)) n 1181 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c unsigned int n, unsigned int *valp) n 1188 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c for ( ; !ret && n--; addr += 4) { n 2588 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c unsigned int n = mem_size / pg_size; n 2590 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c return n - n % 24; n 59 drivers/net/ethernet/chelsio/cxgb3/t3cdev.h int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n); n 256 drivers/net/ethernet/chelsio/cxgb3/xgmac.c int t3_mac_set_num_ucast(struct cmac *mac, int n) n 258 drivers/net/ethernet/chelsio/cxgb3/xgmac.c if (n > EXACT_ADDR_FILTERS) n 260 drivers/net/ethernet/chelsio/cxgb3/xgmac.c mac->nucast = n; n 134 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int n, i; n 316 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (n = 0; n < i - 1; n++) n 317 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c if (meminfo_buff->avail[n].limit < n 318 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c meminfo_buff->avail[n + 1].base) n 319 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c (md++)->base = meminfo_buff->avail[n].limit; n 321 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c if (meminfo_buff->avail[n].limit) n 322 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c (md++)->base = meminfo_buff->avail[n].limit; n 324 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = md - meminfo_buff->mem; n 325 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c meminfo_buff->mem_c = n; n 327 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc), n 1261 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int i, rc, n = 0; n 1265 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_tp_pio_array) + n 1269 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_tp_pio_array) + n 1273 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = n / (IREG_NUM_ELEM * sizeof(u32)); n 1274 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size = sizeof(struct ireg_buf) * n; n 1283 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1285 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1287 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 1309 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1311 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1313 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 1335 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_tp_mib_index_array) / n 1338 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_tp_mib_index_array) / n 1341 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n ; i++) { n 1598 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int i, rc, n; n 1601 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1602 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size = sizeof(struct ireg_buf) * n * 2; n 1609 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 1627 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1628 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 1654 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int i, rc, n; n 1657 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1658 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size = sizeof(struct ireg_buf) * n * 2; n 1665 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 1683 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32)); n 1684 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 1802 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int i, rc, n; n 1805 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32)); n 1811 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 2248 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c u32 size = 0, i, n, total_size = 0; n 2252 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = padap->params.arch.mps_tcam_size; n 2253 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size = sizeof(struct cudbg_mps_tcam) * n; n 2259 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 2569 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int i, rc, n; n 2575 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); n 2576 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size = sizeof(struct ireg_buf) * n * 2; n 2582 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 2596 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32)); n 2597 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 2688 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int i, rc, n; n 2691 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t5_up_cim_reg_array) / n 2694 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_up_cim_reg_array) / n 2699 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size = sizeof(struct ireg_buf) * n; n 2705 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 2878 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c int i, rc, n; n 2884 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); n 2885 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c size = sizeof(struct ireg_buf) * n; n 2891 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < n; i++) { n 1408 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); n 1688 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int start, int n, const u16 *rspq, unsigned int nrspq); n 1709 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h size_t n); n 1711 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h size_t n); n 1712 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, n 1714 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, n 1893 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h unsigned int n, bool unmap); n 1904 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); n 72 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c u32 value, n = 0, len = 0; n 190 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t5_tp_pio_array) + n 195 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t6_tp_pio_array) + n 202 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = n / (IREG_NUM_ELEM * sizeof(u32)); n 203 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c len = sizeof(struct ireg_buf) * n; n 227 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); n 228 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c len = sizeof(struct ireg_buf) * n * 2; n 231 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); n 232 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c len = sizeof(struct ireg_buf) * n * 2; n 260 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t6_ma_ireg_array) / n 262 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c len = sizeof(struct ireg_buf) * n * 2; n 270 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = 0; n 272 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t5_up_cim_reg_array) / n 275 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t6_up_cim_reg_array) / n 277 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c len = sizeof(struct ireg_buf) * n; n 287 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c n = sizeof(t6_hma_ireg_array) / n 289 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c len = sizeof(struct ireg_buf) * n; n 2265 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c #define G_PFnLKPIDX(map, n) \ n 2266 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M) n 2267 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c #define G_PFnMSKSIZE(mask, n) \ n 2268 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c (((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M) n 2666 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c int i, n, r = (uintptr_t)v - 1; n 2691 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c for (i = 0; i < n; ++i) \ n 2708 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, s->ethqsets - 4 * r); n 2768 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, utxq_info->ntxq - 4 * r); n 2786 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, urxq_info->nrxq - 4 * r); n 2816 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, urxq_info->nciq - 4 * r); n 2838 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, urxq_info->nrxq - 4 * r); n 2864 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, urxq_info->nrxq - 4 * r); n 2890 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, urxq_info->nrxq - 4 * r); n 2919 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, utxq_info->ntxq - 4 * r); n 2948 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c n = min(4, adap->params.nports - 4 * r); n 1293 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c unsigned int n = pi->rss_size; n 1299 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c while (n--) n 1300 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c p[n] = pi->rss[n]; n 691 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); n 694 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); n 697 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", n 706 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", n 5208 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void reduce_ethqs(struct adapter *adap, int n) n 5213 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c while (n < adap->sge.ethqsets) n 5219 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (adap->sge.ethqsets <= n) n 5224 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c n = 0; n 5227 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c pi->first_qset = n; n 5228 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c n += pi->nqsets; n 231 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c static void t4_free_uld_rxqs(struct adapter *adap, int n, n 234 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c for ( ; n; n--, q++) { n 408 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c int n = sizeof(adap->msix_info_ulds[0].desc); n 414 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", n 126 drivers/net/ethernet/chelsio/cxgb4/l2t.c static void neigh_replace(struct l2t_entry *e, struct neighbour *n) n 128 drivers/net/ethernet/chelsio/cxgb4/l2t.c neigh_hold(n); n 131 drivers/net/ethernet/chelsio/cxgb4/l2t.c e->neigh = n; n 371 drivers/net/ethernet/chelsio/cxgb4/sge.c unsigned int n, bool unmap) n 378 drivers/net/ethernet/chelsio/cxgb4/sge.c while (n--) { n 494 drivers/net/ethernet/chelsio/cxgb4/sge.c static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) n 496 drivers/net/ethernet/chelsio/cxgb4/sge.c while (n--) { n 591 drivers/net/ethernet/chelsio/cxgb4/sge.c static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, n 616 drivers/net/ethernet/chelsio/cxgb4/sge.c while (n) { n 643 drivers/net/ethernet/chelsio/cxgb4/sge.c n--; n 647 drivers/net/ethernet/chelsio/cxgb4/sge.c while (n--) { n 743 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int sgl_len(unsigned int n) n 761 drivers/net/ethernet/chelsio/cxgb4/sge.c n--; n 762 drivers/net/ethernet/chelsio/cxgb4/sge.c return (3 * n) / 2 + (n & 1) + 2; n 772 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int flits_to_desc(unsigned int n) n 774 drivers/net/ethernet/chelsio/cxgb4/sge.c BUG_ON(n > SGE_MAX_WR_LEN / 8); n 775 drivers/net/ethernet/chelsio/cxgb4/sge.c return DIV_ROUND_UP(n, 8); n 960 drivers/net/ethernet/chelsio/cxgb4/sge.c inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) n 971 drivers/net/ethernet/chelsio/cxgb4/sge.c u32 val = PIDX_V(n); n 982 drivers/net/ethernet/chelsio/cxgb4/sge.c q->db_pidx_inc += n; n 986 drivers/net/ethernet/chelsio/cxgb4/sge.c u32 val = PIDX_T5_V(n); n 1000 drivers/net/ethernet/chelsio/cxgb4/sge.c if (n == 1 && q->bar2_qid == 0) { n 1173 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void txq_advance(struct sge_txq *q, unsigned int n) n 1175 drivers/net/ethernet/chelsio/cxgb4/sge.c q->in_use += n; n 1176 drivers/net/ethernet/chelsio/cxgb4/sge.c q->pidx += n; n 2683 drivers/net/ethernet/chelsio/cxgb4/sge.c int n; n 2686 drivers/net/ethernet/chelsio/cxgb4/sge.c for (p = gl->frags, n = gl->nfrags - 1; n--; p++) n 3994 drivers/net/ethernet/chelsio/cxgb4/sge.c void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) n 3996 drivers/net/ethernet/chelsio/cxgb4/sge.c for ( ; n; n--, q++) n 3080 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c unsigned int n, const u8 *data) n 3086 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) n 3095 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (left = n; left; left -= c) { n 3115 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (memcmp(data - n, (u8 *)buf + offset, n)) { n 5158 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int start, int n, const u16 *rspq, unsigned int nrspq) n 5172 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c while (n > 0) { n 5173 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int nq = min(n, 32); n 5180 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c n -= nq; n 9756 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) n 9762 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (qid > 5 || (n & 3)) n 9766 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (n > nwords) n 9767 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c n = nwords; n 9774 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0; i < n; i++, addr++) { n 9798 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) n 9805 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if ((qid > (cim_num_obq - 1)) || (n & 3)) n 9814 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (n > nwords) n 9815 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c n = nwords; n 9817 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0; i < n; i++, addr++) { n 9839 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, n 9847 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for ( ; !ret && n--; addr += 4) { n 9866 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, n 9874 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for ( ; !ret && n--; addr += 4) { n 10142 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int ret, i, n, cfg_addr; n 10173 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c n = size - i; n 10175 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c n = SF_PAGE_SIZE; n 10176 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_write_flash(adap, addr, n, cfg_data); n 2065 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c for (qs = 0; qs < n; ++qs) \ n 2076 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c int n = min(QPL, adapter->sge.ethqsets - QPL * r); n 2218 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c for (qs = 0; qs < n; ++qs) \ n 2233 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c int n = min(QPL, adapter->sge.ethqsets - QPL * r); n 2827 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c static void reduce_ethqs(struct adapter *adapter, int n) n 2836 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c BUG_ON(n < adapter->params.nports); n 2837 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c while (n < adapter->sge.ethqsets) n 2843 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c if (adapter->sge.ethqsets <= n) n 2851 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c n = 0; n 2854 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c pi->first_qset = n; n 2855 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c n += pi->nqsets; n 378 drivers/net/ethernet/chelsio/cxgb4vf/sge.c unsigned int n, bool unmap) n 387 drivers/net/ethernet/chelsio/cxgb4vf/sge.c while (n--) { n 473 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) n 475 drivers/net/ethernet/chelsio/cxgb4vf/sge.c while (n--) { n 605 drivers/net/ethernet/chelsio/cxgb4vf/sge.c int n, gfp_t gfp) n 619 drivers/net/ethernet/chelsio/cxgb4vf/sge.c BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); n 632 drivers/net/ethernet/chelsio/cxgb4vf/sge.c while (n) { n 672 drivers/net/ethernet/chelsio/cxgb4vf/sge.c n--; n 676 drivers/net/ethernet/chelsio/cxgb4vf/sge.c while (n--) { n 789 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline unsigned int sgl_len(unsigned int n) n 808 drivers/net/ethernet/chelsio/cxgb4vf/sge.c n--; n 809 drivers/net/ethernet/chelsio/cxgb4vf/sge.c return (3 * n) / 2 + (n & 1) + 2; n 965 drivers/net/ethernet/chelsio/cxgb4vf/sge.c int n) n 976 drivers/net/ethernet/chelsio/cxgb4vf/sge.c u32 val = PIDX_V(n); n 981 drivers/net/ethernet/chelsio/cxgb4vf/sge.c u32 val = PIDX_T5_V(n); n 995 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (n == 1 && tq->bar2_qid == 0) { n 1142 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static inline void txq_advance(struct sge_txq *tq, unsigned int n) n 1144 drivers/net/ethernet/chelsio/cxgb4vf/sge.c tq->in_use += n; n 1145 drivers/net/ethernet/chelsio/cxgb4vf/sge.c tq->pidx += n; n 1249 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c int start, int n, const u16 *rspq, int nrspq) n 1271 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c while (n > 0) { n 1273 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c int nq = min(n, 32); n 1287 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c n -= nq; n 98 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c struct neighbour *n; n 105 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c n = dst_neigh_lookup(&rt->dst, &peer_ip); n 106 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c if (!n) n 108 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c if (!cxgb_our_interface(lldi, get_real_dev, n->dev) && n 109 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c !(n->dev->flags & IFF_LOOPBACK)) { n 110 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c neigh_release(n); n 114 drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c neigh_release(n); n 94 drivers/net/ethernet/cisco/enic/enic_clsf.c struct enic_rfs_fltr_node *n; n 97 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_for_each_entry_safe(n, tmp, hhead, node) { n 98 drivers/net/ethernet/cisco/enic/enic_clsf.c enic_delfltr(enic, n->fltr_id); n 99 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_del(&n->node); n 100 drivers/net/ethernet/cisco/enic/enic_clsf.c kfree(n); n 114 drivers/net/ethernet/cisco/enic/enic_clsf.c struct enic_rfs_fltr_node *n; n 117 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_for_each_entry_safe(n, tmp, hhead, node) n 118 drivers/net/ethernet/cisco/enic/enic_clsf.c if (n->fltr_id == fltr_id) n 119 drivers/net/ethernet/cisco/enic/enic_clsf.c return n; n 136 drivers/net/ethernet/cisco/enic/enic_clsf.c struct enic_rfs_fltr_node *n; n 139 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_for_each_entry_safe(n, tmp, hhead, node) { n 140 drivers/net/ethernet/cisco/enic/enic_clsf.c res = rps_may_expire_flow(enic->netdev, n->rq_id, n 141 drivers/net/ethernet/cisco/enic/enic_clsf.c n->flow_id, n->fltr_id); n 143 drivers/net/ethernet/cisco/enic/enic_clsf.c res = enic_delfltr(enic, n->fltr_id); n 146 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_del(&n->node); n 147 drivers/net/ethernet/cisco/enic/enic_clsf.c kfree(n); n 175 drivers/net/ethernet/cisco/enic/enic_clsf.c struct enic_rfs_fltr_node *n; n 189 drivers/net/ethernet/cisco/enic/enic_clsf.c n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); n 191 drivers/net/ethernet/cisco/enic/enic_clsf.c if (n) { /* entry already present */ n 192 drivers/net/ethernet/cisco/enic/enic_clsf.c if (rxq_index == n->rq_id) { n 211 drivers/net/ethernet/cisco/enic/enic_clsf.c res = enic_delfltr(enic, n->fltr_id); n 216 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_del(&n->node); n 229 drivers/net/ethernet/cisco/enic/enic_clsf.c ret = enic_delfltr(enic, n->fltr_id); n 240 drivers/net/ethernet/cisco/enic/enic_clsf.c d->fltr_id = n->fltr_id; n 248 drivers/net/ethernet/cisco/enic/enic_clsf.c n->rq_id = rxq_index; n 249 drivers/net/ethernet/cisco/enic/enic_clsf.c n->fltr_id = res; n 250 drivers/net/ethernet/cisco/enic/enic_clsf.c n->flow_id = flow_id; n 260 drivers/net/ethernet/cisco/enic/enic_clsf.c n = kmalloc(sizeof(*n), GFP_ATOMIC); n 261 drivers/net/ethernet/cisco/enic/enic_clsf.c if (!n) { n 269 drivers/net/ethernet/cisco/enic/enic_clsf.c kfree(n); n 273 drivers/net/ethernet/cisco/enic/enic_clsf.c n->rq_id = rxq_index; n 274 drivers/net/ethernet/cisco/enic/enic_clsf.c n->fltr_id = res; n 275 drivers/net/ethernet/cisco/enic/enic_clsf.c n->flow_id = flow_id; n 276 drivers/net/ethernet/cisco/enic/enic_clsf.c n->keys = keys; n 277 drivers/net/ethernet/cisco/enic/enic_clsf.c INIT_HLIST_NODE(&n->node); n 278 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]); n 424 drivers/net/ethernet/cisco/enic/enic_ethtool.c struct enic_rfs_fltr_node *n; n 427 drivers/net/ethernet/cisco/enic/enic_ethtool.c hlist_for_each_entry_safe(n, tmp, hhead, node) { n 430 drivers/net/ethernet/cisco/enic/enic_ethtool.c rule_locs[cnt] = n->fltr_id; n 443 drivers/net/ethernet/cisco/enic/enic_ethtool.c struct enic_rfs_fltr_node *n; n 445 drivers/net/ethernet/cisco/enic/enic_ethtool.c n = htbl_fltr_search(enic, (u16)fsp->location); n 446 drivers/net/ethernet/cisco/enic/enic_ethtool.c if (!n) n 448 drivers/net/ethernet/cisco/enic/enic_ethtool.c switch (n->keys.basic.ip_proto) { n 460 drivers/net/ethernet/cisco/enic/enic_ethtool.c fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys); n 463 drivers/net/ethernet/cisco/enic/enic_ethtool.c fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys); n 466 drivers/net/ethernet/cisco/enic/enic_ethtool.c fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src; n 469 drivers/net/ethernet/cisco/enic/enic_ethtool.c fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst; n 472 drivers/net/ethernet/cisco/enic/enic_ethtool.c fsp->ring_cookie = n->rq_id; n 2380 drivers/net/ethernet/cisco/enic/enic_main.c unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); n 2394 drivers/net/ethernet/cisco/enic/enic_main.c BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); n 2395 drivers/net/ethernet/cisco/enic/enic_main.c for (i = 0; i < n + m + 2; i++) n 2403 drivers/net/ethernet/cisco/enic/enic_main.c enic->rq_count >= n && n 2405 drivers/net/ethernet/cisco/enic/enic_main.c enic->cq_count >= n + m && n 2406 drivers/net/ethernet/cisco/enic/enic_main.c enic->intr_count >= n + m + 2) { n 2409 drivers/net/ethernet/cisco/enic/enic_main.c n + m + 2, n + m + 2) > 0) { n 2411 drivers/net/ethernet/cisco/enic/enic_main.c enic->rq_count = n; n 2413 drivers/net/ethernet/cisco/enic/enic_main.c enic->cq_count = n + m; n 2414 drivers/net/ethernet/cisco/enic/enic_main.c enic->intr_count = n + m + 2; n 925 drivers/net/ethernet/cortina/gemini.h #define TOE_QUEUE_HDR_ADDR(n) (TOE_TOE_QUE_HDR_BASE + n * 32) n 930 drivers/net/ethernet/cortina/gemini.h #define INTERRUPT_QUEUE_HDR_ADDR(n) (TOE_INTR_Q_HDR_BASE + n * 8) n 4973 drivers/net/ethernet/dec/tulip/de4x5.c int i, j, k, n, limit=ARRAY_SIZE(phy_info); n 4980 drivers/net/ethernet/dec/tulip/de4x5.c for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) { n 4982 drivers/net/ethernet/dec/tulip/de4x5.c if (i==0) n++; /* Count cycles */ n 1920 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, n 1932 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c for (i = 0; i < n; i++) { n 1942 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c return n - drops; n 1609 drivers/net/ethernet/freescale/enetc/enetc.c int i, n, err, nvec; n 1613 drivers/net/ethernet/freescale/enetc/enetc.c n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); n 1615 drivers/net/ethernet/freescale/enetc/enetc.c if (n < 0) n 1616 drivers/net/ethernet/freescale/enetc/enetc.c return n; n 1618 drivers/net/ethernet/freescale/enetc/enetc.c if (n != nvec) n 50 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */ n 51 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */ n 52 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8) n 72 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_RBDCR(n) (0x8180 + (n) * 0x200) n 93 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4) n 94 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4) n 145 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) n 153 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */ n 154 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16) n 159 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */ n 160 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8) n 165 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */ n 173 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ n 181 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/ n 182 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ n 184 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ n 191 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */ n 195 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10) n 196 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10) n 197 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10) n 198 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10) n 199 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */ n 200 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */ n 203 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */ n 273 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */ n 286 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) n 330 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_bdr_rd(hw, t, n, off) \ n 331 drivers/net/ethernet/freescale/enetc/enetc_hw.h enetc_rd(hw, ENETC_BDR(t, n, off)) n 332 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_bdr_wr(hw, t, n, off, val) \ n 333 drivers/net/ethernet/freescale/enetc/enetc_hw.h enetc_wr(hw, ENETC_BDR(t, n, off), val) n 334 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off) n 335 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off) n 336 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_txbdr_wr(hw, n, off, val) \ n 337 drivers/net/ethernet/freescale/enetc/enetc_hw.h enetc_bdr_wr(hw, TX, n, off, val) n 338 drivers/net/ethernet/freescale/enetc/enetc_hw.h #define enetc_rxbdr_wr(hw, n, off, val) \ n 339 drivers/net/ethernet/freescale/enetc/enetc_hw.h enetc_bdr_wr(hw, RX, n, off, val) n 34 drivers/net/ethernet/freescale/enetc/enetc_ptp.c int err, len, n; n 82 drivers/net/ethernet/freescale/enetc/enetc_ptp.c n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); n 83 drivers/net/ethernet/freescale/enetc/enetc_ptp.c if (n != 1) { n 327 drivers/net/ethernet/freescale/fec.h #define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2)) n 79 drivers/net/ethernet/freescale/fec_ptp.c #define FEC_TCSR(n) (0x608 + n * 0x08) n 80 drivers/net/ethernet/freescale/fec_ptp.c #define FEC_TCCR(n) (0x60C + n * 0x08) n 67 drivers/net/ethernet/fujitsu/fmvj18x_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 188 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) n 189 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c #define dma_cnt(n) ((n) >> 5) n 190 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c #define dma_byte(n) ((n) << 5) n 421 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c int i, k, n; n 437 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_RD_FIRST_STATS_NUM; n 440 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_RD_OTHER_STATS_NUM; n 443 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c for (k = 0; k < n; k++) { n 458 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c u16 i, k, n; n 479 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_RD_FIRST_STATS_NUM; n 482 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_RD_OTHER_STATS_NUM; n 485 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c for (k = 0; k < n; k++) { n 9657 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c int i, k, n; n 9682 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; n 9685 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_32_BIT_REG_RTN_DATANUM; n 9687 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c for (k = 0; k < n; k++) { n 9711 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c int i, k, n; n 9736 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; n 9739 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c n = HCLGE_64_BIT_REG_RTN_DATANUM; n 9741 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c for (k = 0; k < n; k++) { n 232 drivers/net/ethernet/ibm/emac/core.c int n = dev->stop_timeout; n 234 drivers/net/ethernet/ibm/emac/core.c while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) { n 236 drivers/net/ethernet/ibm/emac/core.c --n; n 238 drivers/net/ethernet/ibm/emac/core.c if (unlikely(!n)) n 257 drivers/net/ethernet/ibm/emac/core.c int n = dev->stop_timeout; n 258 drivers/net/ethernet/ibm/emac/core.c while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { n 260 drivers/net/ethernet/ibm/emac/core.c --n; n 262 drivers/net/ethernet/ibm/emac/core.c if (unlikely(!n)) n 281 drivers/net/ethernet/ibm/emac/core.c int n = dev->stop_timeout; n 283 drivers/net/ethernet/ibm/emac/core.c while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { n 285 drivers/net/ethernet/ibm/emac/core.c --n; n 287 drivers/net/ethernet/ibm/emac/core.c if (unlikely(!n)) n 339 drivers/net/ethernet/ibm/emac/core.c int n = 20; n 386 drivers/net/ethernet/ibm/emac/core.c while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n) n 387 drivers/net/ethernet/ibm/emac/core.c --n; n 391 drivers/net/ethernet/ibm/emac/core.c if (!n && !try_internal_clock) { n 393 drivers/net/ethernet/ibm/emac/core.c n = 20; n 407 drivers/net/ethernet/ibm/emac/core.c if (n) { n 803 drivers/net/ethernet/ibm/emac/core.c int n, err = -ETIMEDOUT; n 816 drivers/net/ethernet/ibm/emac/core.c n = 20; n 819 drivers/net/ethernet/ibm/emac/core.c if (!--n) { n 841 drivers/net/ethernet/ibm/emac/core.c n = 200; n 844 drivers/net/ethernet/ibm/emac/core.c if (!--n) { n 875 drivers/net/ethernet/ibm/emac/core.c int n, err = -ETIMEDOUT; n 888 drivers/net/ethernet/ibm/emac/core.c n = 20; n 891 drivers/net/ethernet/ibm/emac/core.c if (!--n) { n 914 drivers/net/ethernet/ibm/emac/core.c n = 200; n 917 drivers/net/ethernet/ibm/emac/core.c if (!--n) { n 1640 drivers/net/ethernet/ibm/emac/core.c int slot = dev->ack_slot, n = 0; n 1645 drivers/net/ethernet/ibm/emac/core.c ++n; n 1659 drivers/net/ethernet/ibm/emac/core.c if (n) { n 1665 drivers/net/ethernet/ibm/emac/core.c DBG2(dev, "tx %d pkts" NL, n); n 416 drivers/net/ethernet/ibm/emac/mal.c int n; n 419 drivers/net/ethernet/ibm/emac/mal.c n = mc->ops->poll_rx(mc->dev, budget - received); n 420 drivers/net/ethernet/ibm/emac/mal.c if (n) { n 421 drivers/net/ethernet/ibm/emac/mal.c received += n; n 460 drivers/net/ethernet/ibm/emac/mal.c int n = 10; n 467 drivers/net/ethernet/ibm/emac/mal.c while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) n 468 drivers/net/ethernet/ibm/emac/mal.c --n; n 470 drivers/net/ethernet/ibm/emac/mal.c if (unlikely(!n)) n 116 drivers/net/ethernet/ibm/emac/mal.h #define MAL_TXCTPR(n) ((n) + 0x20) n 117 drivers/net/ethernet/ibm/emac/mal.h #define MAL_RXCTPR(n) ((n) + 0x40) n 118 drivers/net/ethernet/ibm/emac/mal.h #define MAL_RCBS(n) ((n) + 0x60) n 137 drivers/net/ethernet/ibm/emac/mal.h #define MAL_CHAN_MASK(n) (0x80000000 >> (n)) n 48 drivers/net/ethernet/ibm/emac/tah.c int n; n 52 drivers/net/ethernet/ibm/emac/tah.c n = 100; n 53 drivers/net/ethernet/ibm/emac/tah.c while ((in_be32(&p->mr) & TAH_MR_SR) && n) n 54 drivers/net/ethernet/ibm/emac/tah.c --n; n 56 drivers/net/ethernet/ibm/emac/tah.c if (unlikely(!n)) n 147 drivers/net/ethernet/intel/e1000e/netdev.c int n = 0; n 153 drivers/net/ethernet/intel/e1000e/netdev.c for (n = 0; n < 2; n++) n 154 drivers/net/ethernet/intel/e1000e/netdev.c regs[n] = __er32(hw, E1000_RXDCTL(n)); n 157 drivers/net/ethernet/intel/e1000e/netdev.c for (n = 0; n < 2; n++) n 158 drivers/net/ethernet/intel/e1000e/netdev.c regs[n] = __er32(hw, E1000_TXDCTL(n)); n 161 drivers/net/ethernet/intel/e1000e/netdev.c for (n = 0; n < 2; n++) n 162 drivers/net/ethernet/intel/e1000e/netdev.c regs[n] = __er32(hw, E1000_TARC(n)); n 980 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c u32 reta, n; n 985 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c n = indir[4 * i + j]; n 987 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c n = ethtool_rxfh_indir_default(4 * i + j, n 990 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c table[j] = n; n 310 drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h #define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ n 311 drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } n 3427 drivers/net/ethernet/intel/i40e/i40e_main.c int i, n; n 3440 drivers/net/ethernet/intel/i40e/i40e_main.c for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { n 3441 drivers/net/ethernet/intel/i40e/i40e_main.c if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) n 3444 drivers/net/ethernet/intel/i40e/i40e_main.c qoffset = vsi->tc_config.tc_info[n].qoffset; n 3445 drivers/net/ethernet/intel/i40e/i40e_main.c qcount = vsi->tc_config.tc_info[n].qcount; n 3449 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ring->dcb_tc = n; n 3450 drivers/net/ethernet/intel/i40e/i40e_main.c tx_ring->dcb_tc = n; n 13331 drivers/net/ethernet/intel/i40e/i40e_main.c int i, n, bkt; n 13394 drivers/net/ethernet/intel/i40e/i40e_main.c for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { n 13398 drivers/net/ethernet/intel/i40e/i40e_main.c n++; /* count the VSIs */ n 13405 drivers/net/ethernet/intel/i40e/i40e_main.c n++; /* count the VEBs */ n 13409 drivers/net/ethernet/intel/i40e/i40e_main.c if (n == 0 && veb && veb->uplink_seid != 0) n 13906 drivers/net/ethernet/intel/i40e/i40e_main.c int i, n = 0; n 13913 drivers/net/ethernet/intel/i40e/i40e_main.c n++; n 13917 drivers/net/ethernet/intel/i40e/i40e_main.c if (n != 1) { n 13920 drivers/net/ethernet/intel/i40e/i40e_main.c veb->seid, n); n 3702 drivers/net/ethernet/intel/i40e/i40e_txrx.c int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, n 3725 drivers/net/ethernet/intel/i40e/i40e_txrx.c for (i = 0; i < n; i++) { n 3739 drivers/net/ethernet/intel/i40e/i40e_txrx.c return n - drops; n 204 drivers/net/ethernet/intel/i40e/i40e_txrx.h #define I40E_RX_NEXT_DESC(r, i, n) \ n 209 drivers/net/ethernet/intel/i40e/i40e_txrx.h (n) = I40E_RX_DESC((r), (i)); \ n 212 drivers/net/ethernet/intel/i40e/i40e_txrx.h #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ n 214 drivers/net/ethernet/intel/i40e/i40e_txrx.h I40E_RX_NEXT_DESC((r), (i), (n)); \ n 215 drivers/net/ethernet/intel/i40e/i40e_txrx.h prefetch((n)); \ n 496 drivers/net/ethernet/intel/i40e/i40e_txrx.h int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, n 302 drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h #define IAVF_CHECK_STRUCT_LEN(n, X) enum iavf_static_assert_enum_##X \ n 303 drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h { iavf_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) } n 183 drivers/net/ethernet/intel/iavf/iavf_txrx.h #define IAVF_RX_NEXT_DESC(r, i, n) \ n 188 drivers/net/ethernet/intel/iavf/iavf_txrx.h (n) = IAVF_RX_DESC((r), (i)); \ n 191 drivers/net/ethernet/intel/iavf/iavf_txrx.h #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \ n 193 drivers/net/ethernet/intel/iavf/iavf_txrx.h IAVF_RX_NEXT_DESC((r), (i), (n)); \ n 194 drivers/net/ethernet/intel/iavf/iavf_txrx.h prefetch((n)); \ n 486 drivers/net/ethernet/intel/ice/ice_common.c #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ n 487 drivers/net/ethernet/intel/ice/ice_common.c (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) n 110 drivers/net/ethernet/intel/ice/ice_dcb_lib.c int i, n; n 125 drivers/net/ethernet/intel/ice/ice_dcb_lib.c ice_for_each_traffic_class(n) { n 126 drivers/net/ethernet/intel/ice/ice_dcb_lib.c if (!(vsi->tc_cfg.ena_tc & BIT(n))) n 129 drivers/net/ethernet/intel/ice/ice_dcb_lib.c qoffset = vsi->tc_cfg.tc_info[n].qoffset; n 130 drivers/net/ethernet/intel/ice/ice_dcb_lib.c qcount = vsi->tc_cfg.tc_info[n].qcount_tx; n 134 drivers/net/ethernet/intel/ice/ice_dcb_lib.c tx_ring->dcb_tc = n; n 135 drivers/net/ethernet/intel/ice/ice_dcb_lib.c rx_ring->dcb_tc = n; n 44 drivers/net/ethernet/intel/ice/ice_ethtool.c #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \ n 45 drivers/net/ethernet/intel/ice/ice_ethtool.c ICE_VSI_STATS_LEN + ice_q_stats_len(n)) n 39 drivers/net/ethernet/intel/ice/ice_switch.c #define ICE_SW_RULE_LG_ACT_SIZE(n) \ n 44 drivers/net/ethernet/intel/ice/ice_switch.c ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act))) n 45 drivers/net/ethernet/intel/ice/ice_switch.c #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ n 50 drivers/net/ethernet/intel/ice/ice_switch.c ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi))) n 294 drivers/net/ethernet/intel/igb/igb_main.c int n = 0; n 300 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 301 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_RDLEN(n)); n 304 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 305 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_RDH(n)); n 308 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 309 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_RDT(n)); n 312 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 313 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_RXDCTL(n)); n 316 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 317 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_RDBAL(n)); n 320 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 321 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_RDBAH(n)); n 324 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 325 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_RDBAL(n)); n 328 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 329 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_TDBAH(n)); n 332 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 333 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_TDLEN(n)); n 336 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 337 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_TDH(n)); n 340 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 341 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_TDT(n)); n 344 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < 4; n++) n 345 drivers/net/ethernet/intel/igb/igb_main.c regs[n] = rd32(E1000_TXDCTL(n)); n 369 drivers/net/ethernet/intel/igb/igb_main.c u16 i, n; n 396 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < adapter->num_tx_queues; n++) { n 398 drivers/net/ethernet/intel/igb/igb_main.c tx_ring = adapter->tx_ring[n]; n 401 drivers/net/ethernet/intel/igb/igb_main.c n, tx_ring->next_to_use, tx_ring->next_to_clean, n 425 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < adapter->num_tx_queues; n++) { n 426 drivers/net/ethernet/intel/igb/igb_main.c tx_ring = adapter->tx_ring[n]; n 470 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < adapter->num_rx_queues; n++) { n 471 drivers/net/ethernet/intel/igb/igb_main.c rx_ring = adapter->rx_ring[n]; n 473 drivers/net/ethernet/intel/igb/igb_main.c n, rx_ring->next_to_use, rx_ring->next_to_clean); n 503 drivers/net/ethernet/intel/igb/igb_main.c for (n = 0; n < adapter->num_rx_queues; n++) { n 504 drivers/net/ethernet/intel/igb/igb_main.c rx_ring = adapter->rx_ring[n]; n 6792 drivers/net/ethernet/intel/igb/igb_main.c int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; n 6801 drivers/net/ethernet/intel/igb/igb_main.c vf_data->num_vf_mc_hashes = n; n 6804 drivers/net/ethernet/intel/igb/igb_main.c if (n > 30) n 6805 drivers/net/ethernet/intel/igb/igb_main.c n = 30; n 6808 drivers/net/ethernet/intel/igb/igb_main.c for (i = 0; i < n; i++) n 1271 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c u32 n = (_n); \ n 1272 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ n 1273 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c common_hash ^= lo_hash_dword >> n; \ n 1274 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ n 1275 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c bucket_hash ^= lo_hash_dword >> n; \ n 1276 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ n 1277 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c sig_hash ^= lo_hash_dword << (16 - n); \ n 1278 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ n 1279 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c common_hash ^= hi_hash_dword >> n; \ n 1280 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ n 1281 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c bucket_hash ^= hi_hash_dword >> n; \ n 1282 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ n 1283 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c sig_hash ^= hi_hash_dword << (16 - n); \ n 1416 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c u32 n = (_n); \ n 1417 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ n 1418 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c bucket_hash ^= lo_hash_dword >> n; \ n 1419 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ n 1420 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c bucket_hash ^= hi_hash_dword >> n; \ n 550 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n) n 556 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c n, ring->next_to_use, ring->next_to_clean, n 571 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c int n = 0; n 611 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (n = 0; n < adapter->num_tx_queues; n++) { n 612 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ring = adapter->tx_ring[n]; n 613 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_print_buffer(ring, n); n 616 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (n = 0; n < adapter->num_xdp_queues; n++) { n 617 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ring = adapter->xdp_ring[n]; n 618 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ixgbe_print_buffer(ring, n); n 662 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (n = 0; n < adapter->num_tx_queues; n++) { n 663 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ring = adapter->tx_ring[n]; n 714 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (n = 0; n < adapter->num_rx_queues; n++) { n 715 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring = adapter->rx_ring[n]; n 717 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c n, rx_ring->next_to_use, rx_ring->next_to_clean); n 771 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (n = 0; n < adapter->num_rx_queues; n++) { n 772 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_ring = adapter->rx_ring[n]; n 10329 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_xdp_xmit(struct net_device *dev, int n, n 10353 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (i = 0; i < n; i++) { n 10367 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c return n - drops; n 729 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c int n; n 741 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c for (n = 0; n < adapter->num_rx_queues; n++) { n 742 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c rx_ring = adapter->rx_ring[n]; n 2813 drivers/net/ethernet/marvell/mv643xx_eth.c int n; n 2815 drivers/net/ethernet/marvell/mv643xx_eth.c for (n = 0; n < 3; n++) { n 2816 drivers/net/ethernet/marvell/mv643xx_eth.c platform_device_del(port_platdev[n]); n 2817 drivers/net/ethernet/marvell/mv643xx_eth.c port_platdev[n] = NULL; n 112 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_CLS_FLOW_TBL2_FLD_OFFS(n) ((n) * 6) n 113 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_CLS_FLOW_TBL2_FLD(n, x) ((x) << ((n) * 6)) n 507 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) n 514 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) n 515 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) n 31 drivers/net/ethernet/marvell/octeontx2/af/common.h #define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2) n 128 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c #define CAM_MASK(n) (BIT_ULL(n) - 1) n 836 drivers/net/ethernet/marvell/skge.c int n = min_t(int, length, sizeof(val)); n 838 drivers/net/ethernet/marvell/skge.c memcpy(data, &val, n); n 839 drivers/net/ethernet/marvell/skge.c length -= n; n 840 drivers/net/ethernet/marvell/skge.c data += n; n 841 drivers/net/ethernet/marvell/skge.c offset += n; n 863 drivers/net/ethernet/marvell/skge.c int n = min_t(int, length, sizeof(val)); n 865 drivers/net/ethernet/marvell/skge.c if (n < sizeof(val)) n 867 drivers/net/ethernet/marvell/skge.c memcpy(&val, data, n); n 871 drivers/net/ethernet/marvell/skge.c length -= n; n 872 drivers/net/ethernet/marvell/skge.c data += n; n 873 drivers/net/ethernet/marvell/skge.c offset += n; n 2795 drivers/net/ethernet/mellanox/mlx4/cmd.c unsigned n; n 2802 drivers/net/ethernet/mellanox/mlx4/cmd.c n = find_first_bit(actv_ports.ports, dev->caps.num_ports); n 2803 drivers/net/ethernet/mellanox/mlx4/cmd.c if (port <= n) n 2804 drivers/net/ethernet/mellanox/mlx4/cmd.c port = n + 1; n 1236 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c u32 n = mlx4_en_get_rxfh_indir_size(dev); n 1240 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c rss_rings = priv->prof->rss_rings ?: n; n 1243 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c for (i = 0; i < n; i++) { n 1259 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c u32 n = mlx4_en_get_rxfh_indir_size(dev); n 1269 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c for (i = 0; i < n; i++) { n 1275 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c if (ring_index[i] != (i % (rss_rings ?: n))) n 1280 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c rss_rings = n; n 129 drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ n 130 drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h sizeof(((struct net_device_stats *)0)->n)) n 187 drivers/net/ethernet/mellanox/mlx5/core/cmd.c int n = mlx5_calc_cmd_blocks(msg); n 190 drivers/net/ethernet/mellanox/mlx5/core/cmd.c for (i = 0; i < n && next; i++) { n 230 drivers/net/ethernet/mellanox/mlx5/core/cmd.c int n = mlx5_calc_cmd_blocks(ent->out); n 239 drivers/net/ethernet/mellanox/mlx5/core/cmd.c for (i = 0; i < n && next; i++) { n 775 drivers/net/ethernet/mellanox/mlx5/core/cmd.c int n = mlx5_calc_cmd_blocks(msg); n 806 drivers/net/ethernet/mellanox/mlx5/core/cmd.c for (i = 0; i < n && next; i++) { n 1069 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++stats->n; n 1219 drivers/net/ethernet/mellanox/mlx5/core/cmd.c int n; n 1227 drivers/net/ethernet/mellanox/mlx5/core/cmd.c n = mlx5_calc_cmd_blocks(msg); n 1229 drivers/net/ethernet/mellanox/mlx5/core/cmd.c for (i = 0; i < n; i++) { n 1240 drivers/net/ethernet/mellanox/mlx5/core/cmd.c block->block_num = cpu_to_be32(n - i - 1); n 1532 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++stats->n; n 1822 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_msg *n; n 1827 drivers/net/ethernet/mellanox/mlx5/core/cmd.c list_for_each_entry_safe(msg, n, &ch->head, list) { n 134 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c if (stats->n) n 135 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c field = div64_u64(stats->sum, stats->n); n 149 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c stats->n = 0; n 182 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c debugfs_create_u64("n", 0400, stats->root, &stats->n); n 520 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c struct hlist_node *n; n 524 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist) n 81 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c struct neighbour *n = NULL; n 116 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c n = dst_neigh_lookup(&rt->dst, &fl4->daddr); n 118 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c if (!n) n 121 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c *out_n = n; n 141 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c struct neighbour *n = NULL; n 164 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c n = dst_neigh_lookup(dst, &fl6->daddr); n 166 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c if (!n) n 169 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c *out_n = n; n 215 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c struct neighbour *n = NULL; n 230 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c &fl4, &n, &ttl); n 255 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c e->m_neigh.dev = n->dev; n 256 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c e->m_neigh.family = n->ops->family; n 257 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); n 270 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c read_lock_bh(&n->lock); n 271 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c nud_state = n->nud_state; n 272 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ether_addr_copy(e->h_dest, n->ha); n 273 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c read_unlock_bh(&n->lock); n 297 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c neigh_event_send(n, NULL); n 314 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c neigh_release(n); n 322 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c if (n) n 323 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c neigh_release(n); n 334 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c struct neighbour *n = NULL; n 349 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c &fl6, &n, &ttl); n 374 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c e->m_neigh.dev = n->dev; n 375 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c e->m_neigh.family = n->ops->family; n 376 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); n 389 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c read_lock_bh(&n->lock); n 390 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c nud_state = n->nud_state; n 391 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ether_addr_copy(e->h_dest, n->ha); n 392 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c read_unlock_bh(&n->lock); n 415 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c neigh_event_send(n, NULL); n 433 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c neigh_release(n); n 441 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c if (n) n 442 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c neigh_release(n); n 31 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) n 33 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); n 471 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, n 494 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c for (i = 0; i < n; i++) { n 528 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c return n - drops; n 72 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, n 387 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c unsigned int orig_fsz, frag_offset = 0, n = 0; n 396 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c n++; n 400 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c page_ref_add(skb_frag_page(f), n - 1); n 408 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c page_ref_add(skb_frag_page(f), n - 1); n 471 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c int n, tc, num_sqs = 0; n 479 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c for (n = 0; n < priv->channels.num; n++) { n 480 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c c = priv->channels.c[n]; n 643 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c struct neighbour *n = nhe->n; n 657 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c read_lock_bh(&n->lock); n 658 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c memcpy(ha, n->ha, ETH_ALEN); n 659 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c nud_state = n->nud_state; n 660 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c dead = n->dead; n 661 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c read_unlock_bh(&n->lock); n 677 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c neigh_release(n); n 882 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c struct neighbour *n) n 888 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c neigh_hold(n); n 893 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c nhe->n = n; n 897 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c neigh_release(n); n 916 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c struct neighbour *n; n 921 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c n = ptr; n 923 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) n 925 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (n->tbl != &arp_tbl) n 929 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c m_neigh.dev = n->dev; n 930 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c m_neigh.family = n->ops->family; n 931 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); n 939 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c mlx5e_rep_queue_neigh_update_work(priv, nhe, n); n 130 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h struct neighbour *n; n 87 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) n 93 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 ci_top = min_t(u32, wq_sz, ci + n); n 95 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c for (; ci < ci_top; ci++, n--) { n 103 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c for (ci = 0; ci < n; ci++) { n 453 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) n 461 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c } while (--n); n 1487 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c struct neighbour *n; n 1544 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); n 1545 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c if (!n) n 1548 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c neigh_event_send(n, NULL); n 1549 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c neigh_release(n); n 767 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_comp *eq, *n; n 769 drivers/net/ethernet/mellanox/mlx5/core/eq.c list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { n 841 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_eq_comp *eq, *n; n 845 drivers/net/ethernet/mellanox/mlx5/core/eq.c list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { n 2058 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c int n) n 2065 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c return steering->fdb_sub_ns[n]; n 162 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c unsigned n; n 168 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); n 169 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c if (n >= MLX5_NUM_4K_IN_PAGE) { n 173 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c clear_bit(n, &fp->bitmask); n 178 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; n 188 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c int n; n 196 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; n 198 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c set_bit(n, &fwp->bitmask); n 124 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n) n 126 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->wqe_ctr += n; n 127 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->cur_sz += n; n 1868 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 1911 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return neigh_entry->key.n->tbl->family; n 1922 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 1924 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_entry->key.n; n 1925 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return ntohl(*((__be32 *) n->primary_key)); n 1931 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 1933 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_entry->key.n; n 1934 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return (struct in6_addr *) &n->primary_key; n 1949 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n, n 1958 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry->key.n = n; n 2036 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) n 2042 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); n 2046 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index); n 2075 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) n 2079 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c key.n = n; n 2104 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 2118 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_lookup(&arp_tbl, &dipn, dev); n 2119 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!n) n 2123 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_event_send(n, NULL); n 2124 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_release(n); n 2133 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 2146 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_lookup(&nd_tbl, &dip, dev); n 2147 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!n) n 2151 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_event_send(n, NULL); n 2152 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_release(n); n 2292 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_event_send(neigh_entry->key.n, NULL); n 2340 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_event_send(neigh_entry->key.n, NULL); n 2363 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n = neigh_entry->key.n; n 2364 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c u32 dip = ntohl(*((__be32 *) n->primary_key)); n 2380 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n = neigh_entry->key.n; n 2382 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c const char *dip = n->primary_key; n 2394 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n = neigh_entry->key.n; n 2400 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (ipv6_addr_type((struct in6_addr *) &n->primary_key) & n 2417 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (neigh_entry->key.n->tbl->family == AF_INET) { n 2422 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c } else if (neigh_entry->key.n->tbl->family == AF_INET6) { n 2435 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry->key.n->flags |= NTF_OFFLOADED; n 2437 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry->key.n->flags &= ~NTF_OFFLOADED; n 2455 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 2464 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n = net_work->n; n 2473 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c read_lock_bh(&n->lock); n 2474 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c memcpy(ha, n->ha, ETH_ALEN); n 2475 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nud_state = n->nud_state; n 2476 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c dead = n->dead; n 2477 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c read_unlock_bh(&n->lock); n 2483 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); n 2487 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); n 2502 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_release(n); n 2559 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 2584 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = ptr; n 2586 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) n 2589 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); n 2601 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c net_work->n = n; n 2607 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_clone(n); n 3464 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n, *old_n = neigh_entry->key.n; n 3473 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); n 3474 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!n) { n 3475 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, n 3477 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (IS_ERR(n)) n 3478 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return PTR_ERR(n); n 3479 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_event_send(n, NULL); n 3483 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry->key.n = n; n 3488 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c read_lock_bh(&n->lock); n 3489 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nud_state = n->nud_state; n 3490 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c dead = n->dead; n 3491 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c read_unlock_bh(&n->lock); n 3497 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_clone(n); n 3502 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_release(n); n 3507 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry->key.n = old_n; n 3509 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_release(n); n 3563 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 3575 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); n 3576 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (!n) { n 3577 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, n 3579 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c if (IS_ERR(n)) n 3580 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c return PTR_ERR(n); n 3581 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_event_send(n, NULL); n 3583 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); n 3585 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n); n 3601 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c read_lock_bh(&n->lock); n 3602 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c nud_state = n->nud_state; n 3603 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c dead = n->dead; n 3604 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c read_unlock_bh(&n->lock); n 3610 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_release(n); n 3618 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct neighbour *n; n 3622 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c n = neigh_entry->key.n; n 3637 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c neigh_release(n); n 4780 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); n 4782 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_fib_entry_offload_refresh(n, op, 0); n 4798 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list); n 4801 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c mlxsw_sp_fib_entry_update(mlxsw_sp, n); n 6518 drivers/net/ethernet/micrel/ksz884x.c int n; n 6524 drivers/net/ethernet/micrel/ksz884x.c n = SWITCH_PORT_NUM; n 6530 drivers/net/ethernet/micrel/ksz884x.c if (n == SWITCH_PORT_NUM) n 6531 drivers/net/ethernet/micrel/ksz884x.c n = p; n 6536 drivers/net/ethernet/micrel/ksz884x.c if (n < SWITCH_PORT_NUM) n 6539 drivers/net/ethernet/micrel/ksz884x.c if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) { n 6540 drivers/net/ethernet/micrel/ksz884x.c p = n; n 6546 drivers/net/ethernet/micrel/ksz884x.c for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) { n 6561 drivers/net/ethernet/micrel/ksz884x.c n = hw->mib_cnt; n 6562 drivers/net/ethernet/micrel/ksz884x.c if (n > n_stats) n 6563 drivers/net/ethernet/micrel/ksz884x.c n = n_stats; n 6564 drivers/net/ethernet/micrel/ksz884x.c n_stats -= n; n 6565 drivers/net/ethernet/micrel/ksz884x.c for (i = 0; i < n; i++) n 1654 drivers/net/ethernet/mscc/ocelot.c int n = __ffs(ocelot->lags[p]); n 1656 drivers/net/ethernet/mscc/ocelot.c ocelot->lags[n] = ocelot->lags[p]; n 1659 drivers/net/ethernet/mscc/ocelot.c ocelot_setup_lag(ocelot, n); n 255 drivers/net/ethernet/mscc/ocelot_ace.c u32 i, j, n = 0, value = 0, mask = 0; n 263 drivers/net/ethernet/mscc/ocelot_ace.c value += (val[j] << n); n 264 drivers/net/ethernet/mscc/ocelot_ace.c mask += (msk[j] << n); n 265 drivers/net/ethernet/mscc/ocelot_ace.c n += 8; n 266 drivers/net/ethernet/mscc/ocelot_ace.c if (n == ENTRY_WIDTH || (i + 1) == count) { n 267 drivers/net/ethernet/mscc/ocelot_ace.c offset -= n; n 268 drivers/net/ethernet/mscc/ocelot_ace.c vcap_key_set(data, offset, n, value, mask); n 269 drivers/net/ethernet/mscc/ocelot_ace.c n = 0; n 598 drivers/net/ethernet/mscc/ocelot_ace.c struct list_head *pos, *n; n 607 drivers/net/ethernet/mscc/ocelot_ace.c list_for_each_safe(pos, n, &block->rules) { n 108 drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h #define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) n 109 drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h #define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) n 182 drivers/net/ethernet/neterion/s2io-regs.h #define TX_TRAFFIC_INT_n(n) s2BIT(n) n 186 drivers/net/ethernet/neterion/s2io-regs.h #define RX_TRAFFIC_INT_n(n) s2BIT(n) n 192 drivers/net/ethernet/neterion/s2io-regs.h #define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5) n 232 drivers/net/ethernet/neterion/s2io-regs.h #define STATREQTO_VAL(n) TBD n 255 drivers/net/ethernet/neterion/s2io-regs.h #define STAT_BC(n) vBIT(n,4,12) n 263 drivers/net/ethernet/neterion/s2io-regs.h #define STAT_TRSF_PER(n) TBD n 265 drivers/net/ethernet/neterion/s2io-regs.h #define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32) n 439 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6) n 442 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26) n 443 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2) n 446 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7) n 447 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7) n 448 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7) n 451 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16) n 452 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16) n 453 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16) n 454 drivers/net/ethernet/neterion/s2io-regs.h #define TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16) n 604 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3) n 607 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29) n 610 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7) n 611 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7) n 612 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7) n 615 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16) n 616 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16) n 617 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16) n 618 drivers/net/ethernet/neterion/s2io-regs.h #define RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16) n 737 drivers/net/ethernet/neterion/s2io-regs.h #define RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6) n 740 drivers/net/ethernet/neterion/s2io-regs.h #define RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48) n 744 drivers/net/ethernet/neterion/s2io-regs.h #define RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48) n 773 drivers/net/ethernet/neterion/s2io-regs.h #define MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4) n 776 drivers/net/ethernet/neterion/s2io-regs.h #define MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4) n 805 drivers/net/ethernet/neterion/s2io-regs.h #define RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8) n 815 drivers/net/ethernet/neterion/s2io-regs.h #define RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6) n 817 drivers/net/ethernet/neterion/s2io-regs.h #define RTS_DS_MEM_DATA(n) vBIT(n,0,8) n 852 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8) n 853 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8) n 854 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8) n 855 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8) n 856 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8) n 857 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8) n 858 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8) n 859 drivers/net/ethernet/neterion/s2io-regs.h #define RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8) n 46 drivers/net/ethernet/neterion/s2io.h #define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) n 1097 drivers/net/ethernet/neterion/vxge/vxge-config.c struct list_head *p, *n; n 1104 drivers/net/ethernet/neterion/vxge/vxge-config.c list_for_each_safe(p, n, &blockpool->free_block_list) { n 1119 drivers/net/ethernet/neterion/vxge/vxge-config.c list_for_each_safe(p, n, &blockpool->free_entry_list) { n 2406 drivers/net/ethernet/neterion/vxge/vxge-config.c struct list_head *p, *n; n 2408 drivers/net/ethernet/neterion/vxge/vxge-config.c list_for_each_safe(p, n, &blockpool->free_block_list) { n 31 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define vxge_bVALn(bits, loc, n) \ n 32 drivers/net/ethernet/neterion/vxge/vxge-reg.h ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1)) n 662 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n) n 664 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n) n 666 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) n 668 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) n 670 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) n 672 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n) n 674 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n) n 676 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n) n 678 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) n 682 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n) n 684 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n) n 688 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n) n 690 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n) n 692 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n) n 694 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n) n 696 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n) n 698 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n) n 704 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n) n 706 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n) n 708 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n) n 710 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n) n 816 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n) n 937 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) n 941 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) n 945 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) n 960 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n) n 964 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n) n 972 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n) n 1408 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n) n 1632 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n) n 1634 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n) n 1676 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n) n 1681 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n) n 1848 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n) n 1896 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n) n 1898 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n) n 2104 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n) n 2105 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n) n 2106 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n) n 2107 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n) n 2108 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n) n 2109 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n) n 2113 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n) n 2114 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n) n 2127 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n) n 2128 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n) n 2540 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n) n 2875 drivers/net/ethernet/neterion/vxge/vxge-reg.h #define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n) n 29 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n) n 34 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; n 40 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) n 42 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n)); n 46 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) n 51 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; n 153 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c unsigned int n) n 155 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; n 160 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c unsigned int n) n 162 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; n 167 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c unsigned int n) n 169 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; n 174 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c unsigned int n) n 176 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; n 1610 drivers/net/ethernet/netronome/nfp/bpf/jit.c if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n)) n 4509 drivers/net/ethernet/netronome/nfp/bpf/jit.c dst_idx = meta->n + 1 + meta->insn.imm; n 4511 drivers/net/ethernet/netronome/nfp/bpf/jit.c dst_idx = meta->n + 1 + meta->insn.off; n 341 drivers/net/ethernet/netronome/nfp/bpf/main.h unsigned short n; n 159 drivers/net/ethernet/netronome/nfp/bpf/offload.c meta->n = i; n 25 drivers/net/ethernet/netronome/nfp/bpf/verifier.c backward = meta->n - insn_idx; n 26 drivers/net/ethernet/netronome/nfp/bpf/verifier.c forward = insn_idx - meta->n; n 71 drivers/net/ethernet/netronome/nfp/bpf/verifier.c if (nfp_prog->adjust_head_location != meta->n) n 78 drivers/net/ethernet/netronome/nfp/bpf/verifier.c location = meta->n; n 728 drivers/net/ethernet/netronome/nfp/bpf/verifier.c meta->n + 1 + meta->insn.imm); n 753 drivers/net/ethernet/netronome/nfp/bpf/verifier.c if (aux[meta->n].zext_dst) n 822 drivers/net/ethernet/netronome/nfp/bpf/verifier.c } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) { n 824 drivers/net/ethernet/netronome/nfp/bpf/verifier.c off, meta->jmp_dst->n, n 173 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct neighbour *n; n 198 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev); n 199 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c if (!n) n 203 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c neigh_event_send(n, NULL); n 204 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c neigh_release(n); n 332 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct neighbour *n; n 340 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c n = redir->neigh; n 343 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c n = (struct neighbour *)ptr; n 349 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c flow.daddr = *(__be32 *)n->primary_key; n 354 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c if (!nfp_netdev_is_nfp_repr(n->dev) && n 355 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c !nfp_flower_internal_port_can_offload(app, n->dev)) n 364 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c rt = ip_route_output_key(dev_net(n->dev), &flow); n 375 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); n 385 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct neighbour *n; n 410 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c n = dst_neigh_lookup(&rt->dst, &flow.daddr); n 412 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c if (!n) n 414 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); n 415 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c neigh_release(n); n 92 drivers/net/ethernet/netronome/nfp/nfp_main.c int n, err; n 108 drivers/net/ethernet/netronome/nfp/nfp_main.c n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data, n 110 drivers/net/ethernet/netronome/nfp/nfp_main.c if (n != in_length) n 148 drivers/net/ethernet/netronome/nfp/nfp_main.c n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA, n 150 drivers/net/ethernet/netronome/nfp/nfp_main.c if (n != out_length) n 956 drivers/net/ethernet/netronome/nfp/nfp_net.h unsigned int n); n 434 drivers/net/ethernet/netronome/nfp/nfp_net_common.c unsigned int n) n 438 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; n 441 drivers/net/ethernet/netronome/nfp/nfp_net_common.c memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); n 3545 drivers/net/ethernet/netronome/nfp/nfp_net_common.c int n; n 3556 drivers/net/ethernet/netronome/nfp/nfp_net_common.c n = snprintf(name, len, "n%d", nn->id); n 3557 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (n >= len) n 261 drivers/net/ethernet/netronome/nfp/nfp_net_main.c unsigned int n; n 263 drivers/net/ethernet/netronome/nfp/nfp_net_main.c n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs, n 266 drivers/net/ethernet/netronome/nfp/nfp_net_main.c n); n 267 drivers/net/ethernet/netronome/nfp/nfp_net_main.c irqs_left -= n; n 122 drivers/net/ethernet/netronome/nfp/nfp_port.c int n; n 135 drivers/net/ethernet/netronome/nfp/nfp_port.c n = snprintf(name, len, "p%d", eth_port->label_port); n 137 drivers/net/ethernet/netronome/nfp/nfp_port.c n = snprintf(name, len, "p%ds%d", eth_port->label_port, n 142 drivers/net/ethernet/netronome/nfp/nfp_port.c n = snprintf(name, len, "pf%d", port->pf_id); n 144 drivers/net/ethernet/netronome/nfp/nfp_port.c n = snprintf(name, len, "pf%ds%d", port->pf_id, n 148 drivers/net/ethernet/netronome/nfp/nfp_port.c n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id); n 154 drivers/net/ethernet/netronome/nfp/nfp_port.c if (n >= len) n 35 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c int n; n 41 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c n = nfp_mbox_cmd(pf, NFP_MBOX_POOL_GET, &id, sizeof(id), n 43 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c if (n < 0) n 44 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c return n; n 45 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c if (n < sizeof(get_data)) n 84 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c int n, err; n 89 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c n = nfp_pf_rtsym_read_optional(pf, NFP_SHARED_BUF_COUNT_SYM_NAME, 0); n 90 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c if (n <= 0) n 91 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c return n; n 92 drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c num_entries = n; n 375 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c int n; n 377 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c for (n = 0; n < nfp->bars; n++) { n 378 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c struct nfp_bar *bar = &nfp->bar[n]; n 381 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return n; n 393 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c int n, busy = 0; n 395 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c for (n = 0; n < nfp->bars; n++) { n 396 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c const struct nfp_bar *bar = &nfp->bar[n]; n 409 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return n; n 427 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c int n; n 431 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width); n 432 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c if (n < 0) n 437 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return n; n 715 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c int n; n 717 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c for (n = 0; n < nfp->bars; n++, bar++) { n 931 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c int n, width; n 967 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c for (n = 0; n < length; n += sizeof(u32)) n 969 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return n; n 975 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c for (n = 0; n < length; n += sizeof(u64)) n 977 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return n; n 994 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c int n, width; n 1030 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c for (n = 0; n < length; n += sizeof(u32)) { n 1034 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return n; n 1040 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c for (n = 0; n < length; n += sizeof(u64)) { n 1044 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return n; n 683 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c int n; n 685 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); n 686 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (n != sizeof(tmp)) n 687 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c return n < 0 ? n : -EIO; n 705 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c int n; n 708 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); n 710 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; n 725 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c int n; n 727 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); n 728 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (n != sizeof(tmp)) n 729 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c return n < 0 ? n : -EIO; n 747 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c int n; n 750 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); n 752 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; n 962 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c size_t n, offset; n 965 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c for (offset = 0; offset < length; offset += n) { n 969 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c n = min_t(size_t, length - offset, n 973 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c kernel_vaddr + offset, n); n 976 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (ret != n) n 977 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c return offset + n; n 1031 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c size_t n, offset; n 1034 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c for (offset = 0; offset < length; offset += n) { n 1038 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c n = min_t(size_t, length - offset, n 1042 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c kernel_vaddr + offset, n); n 1045 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (ret != n) n 1046 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c return offset + n; n 44 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c int n; n 46 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); n 47 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c if (n != sizeof(tmp)) n 48 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c return n < 0 ? n : -EIO; n 67 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c int n; n 70 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); n 72 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; n 88 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c int n; n 90 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); n 91 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c if (n != sizeof(tmp)) n 92 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c return n < 0 ? n : -EIO; n 111 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c int n; n 114 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); n 116 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; n 102 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c int err, n, size; n 141 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c for (n = 0; n < cache->num; n++) n 143 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c &cache->symtab[n], &rtsymtab[n]); n 196 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c int n; n 201 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c for (n = 0; n < rtbl->num; n++) n 202 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c if (strcmp(name, rtbl->symtab[n].name) == 0) n 203 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c return &rtbl->symtab[n]; n 86 drivers/net/ethernet/ni/nixge.c #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10) n 92 drivers/net/ethernet/ni/nixge.c #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5) n 93 drivers/net/ethernet/ni/nixge.c #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0) n 116 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_IPGT_LOAD(n) ((n) & 0x7F) n 121 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F) n 122 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8) n 127 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF) n 128 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8) n 133 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF) n 153 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2) n 172 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F) n 173 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8) n 178 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF) n 228 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF) n 237 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF) n 238 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF) n 243 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF) n 263 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF) n 264 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF) n 269 drivers/net/ethernet/nxp/lpc_eth.c #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF) n 699 drivers/net/ethernet/pasemi/pasemi_mac.c unsigned int n; n 712 drivers/net/ethernet/pasemi/pasemi_mac.c n = rx->next_to_clean; n 714 drivers/net/ethernet/pasemi/pasemi_mac.c prefetch(&RX_DESC(rx, n)); n 717 drivers/net/ethernet/pasemi/pasemi_mac.c macrx = RX_DESC(rx, n); n 718 drivers/net/ethernet/pasemi/pasemi_mac.c prefetch(&RX_DESC(rx, n+4)); n 731 drivers/net/ethernet/pasemi/pasemi_mac.c eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >> n 735 drivers/net/ethernet/pasemi/pasemi_mac.c dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M); n 776 drivers/net/ethernet/pasemi/pasemi_mac.c RX_DESC(rx, n) = 0; n 777 drivers/net/ethernet/pasemi/pasemi_mac.c RX_DESC(rx, n+1) = 0; n 784 drivers/net/ethernet/pasemi/pasemi_mac.c n += 4; n 787 drivers/net/ethernet/pasemi/pasemi_mac.c if (n > RX_RING_SIZE) { n 790 drivers/net/ethernet/pasemi/pasemi_mac.c n &= (RX_RING_SIZE-1); n 793 drivers/net/ethernet/pasemi/pasemi_mac.c rx_ring(mac)->next_to_clean = n; n 361 drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h #define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) n 15 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c #define MASK(n) ((1ULL<<(n))-1) n 419 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c int i, n, init_delay = 0; n 430 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c if (netxen_rom_fast_read(adapter, 0, &n) != 0 || n 431 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c (n != 0xcafecafe) || n 432 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c netxen_rom_fast_read(adapter, 4, &n) != 0) { n 434 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c "n: %08x\n", netxen_nic_driver_name, n); n 437 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c offset = n & 0xffffU; n 438 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c n = (n >> 16) & 0xffffU; n 440 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c if (netxen_rom_fast_read(adapter, 0, &n) != 0 || n 441 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c !(n & 0x80000000)) { n 443 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c "n: %08x\n", netxen_nic_driver_name, n); n 447 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c n &= ~0x80000000; n 450 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c if (n >= 1024) { n 452 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c " initialized.\n", __func__, n); n 456 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); n 460 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c for (i = 0; i < n; i++) { n 472 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c for (i = 0; i < n; i++) { n 147 drivers/net/ethernet/qlogic/qed/qed_cxt.c #define CDUT_SEG_BLK(n) (1 + (u8)(n)) n 148 drivers/net/ethernet/qlogic/qed/qed_cxt.c #define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS) n 253 drivers/net/ethernet/qlogic/qed/qed_hw.c void *addr, u32 hw_addr, size_t n, bool to_device) n 259 drivers/net/ethernet/qlogic/qed/qed_hw.c while (done < n) { n 260 drivers/net/ethernet/qlogic/qed/qed_hw.c quota = min_t(size_t, n - done, n 285 drivers/net/ethernet/qlogic/qed/qed_hw.c struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) n 289 drivers/net/ethernet/qlogic/qed/qed_hw.c hw_addr, dest, hw_addr, (unsigned long)n); n 291 drivers/net/ethernet/qlogic/qed/qed_hw.c qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); n 295 drivers/net/ethernet/qlogic/qed/qed_hw.c struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) n 299 drivers/net/ethernet/qlogic/qed/qed_hw.c hw_addr, hw_addr, src, (unsigned long)n); n 301 drivers/net/ethernet/qlogic/qed/qed_hw.c qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); n 193 drivers/net/ethernet/qlogic/qed/qed_hw.h size_t n); n 209 drivers/net/ethernet/qlogic/qed/qed_hw.h size_t n); n 114 drivers/net/ethernet/qlogic/qede/qede_filter.c struct qede_arfs_fltr_node *n, n 120 drivers/net/ethernet/qlogic/qede/qede_filter.c if (n->used) n 125 drivers/net/ethernet/qlogic/qede/qede_filter.c params.addr = n->mapping; n 126 drivers/net/ethernet/qlogic/qede/qede_filter.c params.length = n->buf_len; n 129 drivers/net/ethernet/qlogic/qede/qede_filter.c params.b_is_drop = n->b_is_drop; n 131 drivers/net/ethernet/qlogic/qede/qede_filter.c if (n->vfid) { n 133 drivers/net/ethernet/qlogic/qede/qede_filter.c params.vf_id = n->vfid - 1; n 136 drivers/net/ethernet/qlogic/qede/qede_filter.c if (n->tuple.stringify) { n 139 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.stringify(&n->tuple, tuple_buffer); n 143 drivers/net/ethernet/qlogic/qede/qede_filter.c n->sw_id, tuple_buffer, n->vfid, rxq_id); n 146 drivers/net/ethernet/qlogic/qede/qede_filter.c n->used = true; n 147 drivers/net/ethernet/qlogic/qede/qede_filter.c n->filter_op = add_fltr; n 148 drivers/net/ethernet/qlogic/qede/qede_filter.c op->ntuple_filter_config(edev->cdev, n, ¶ms); n 429 drivers/net/ethernet/qlogic/qede/qede_filter.c struct qede_arfs_fltr_node *n; n 438 drivers/net/ethernet/qlogic/qede/qede_filter.c n = kzalloc(sizeof(*n), GFP_ATOMIC); n 439 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n) n 442 drivers/net/ethernet/qlogic/qede/qede_filter.c n->data = kzalloc(min_hlen, GFP_ATOMIC); n 443 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n->data) { n 444 drivers/net/ethernet/qlogic/qede/qede_filter.c kfree(n); n 448 drivers/net/ethernet/qlogic/qede/qede_filter.c n->sw_id = (u16)bit_id; n 450 drivers/net/ethernet/qlogic/qede/qede_filter.c return n; n 457 drivers/net/ethernet/qlogic/qede/qede_filter.c struct qede_arfs_fltr_node *n; n 487 drivers/net/ethernet/qlogic/qede/qede_filter.c n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx), n 489 drivers/net/ethernet/qlogic/qede/qede_filter.c if (n) { n 491 drivers/net/ethernet/qlogic/qede/qede_filter.c n->next_rxq_id = rxq_index; n 493 drivers/net/ethernet/qlogic/qede/qede_filter.c if (test_bit(QEDE_FLTR_VALID, &n->state)) { n 494 drivers/net/ethernet/qlogic/qede/qede_filter.c if (n->rxq_id != rxq_index) n 495 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_configure_arfs_fltr(edev, n, n->rxq_id, n 498 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n->used) { n 499 drivers/net/ethernet/qlogic/qede/qede_filter.c n->rxq_id = rxq_index; n 500 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_configure_arfs_fltr(edev, n, n->rxq_id, n 505 drivers/net/ethernet/qlogic/qede/qede_filter.c rc = n->sw_id; n 511 drivers/net/ethernet/qlogic/qede/qede_filter.c n = qede_alloc_filter(edev, min_hlen); n 512 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n) { n 517 drivers/net/ethernet/qlogic/qede/qede_filter.c n->buf_len = min_hlen; n 518 drivers/net/ethernet/qlogic/qede/qede_filter.c n->rxq_id = rxq_index; n 519 drivers/net/ethernet/qlogic/qede/qede_filter.c n->next_rxq_id = rxq_index; n 520 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.src_port = ports[0]; n 521 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.dst_port = ports[1]; n 522 drivers/net/ethernet/qlogic/qede/qede_filter.c n->flow_id = flow_id; n 525 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.src_ipv4 = ip_hdr(skb)->saddr; n 526 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.dst_ipv4 = ip_hdr(skb)->daddr; n 528 drivers/net/ethernet/qlogic/qede/qede_filter.c memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr, n 530 drivers/net/ethernet/qlogic/qede/qede_filter.c memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, n 534 drivers/net/ethernet/qlogic/qede/qede_filter.c eth = (struct ethhdr *)n->data; n 536 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.eth_proto = skb->protocol; n 537 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.ip_proto = ip_proto; n 538 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE; n 539 drivers/net/ethernet/qlogic/qede/qede_filter.c memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); n 541 drivers/net/ethernet/qlogic/qede/qede_filter.c rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); n 545 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_configure_arfs_fltr(edev, n, n->rxq_id, true); n 552 drivers/net/ethernet/qlogic/qede/qede_filter.c return n->sw_id; n 1690 drivers/net/ethernet/qlogic/qede/qede_filter.c struct qede_arfs_fltr_node *n, n 1694 drivers/net/ethernet/qlogic/qede/qede_filter.c n->b_is_drop = true; n 1698 drivers/net/ethernet/qlogic/qede/qede_filter.c n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); n 1699 drivers/net/ethernet/qlogic/qede/qede_filter.c n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie); n 1700 drivers/net/ethernet/qlogic/qede/qede_filter.c n->next_rxq_id = n->rxq_id; n 1702 drivers/net/ethernet/qlogic/qede/qede_filter.c if (n->vfid) n 1704 drivers/net/ethernet/qlogic/qede/qede_filter.c "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); n 1948 drivers/net/ethernet/qlogic/qede/qede_filter.c struct qede_arfs_fltr_node *n; n 1981 drivers/net/ethernet/qlogic/qede/qede_filter.c n = kzalloc(sizeof(*n), GFP_KERNEL); n 1982 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n) { n 1989 drivers/net/ethernet/qlogic/qede/qede_filter.c n->data = kzalloc(min_hlen, GFP_KERNEL); n 1990 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n->data) { n 1991 drivers/net/ethernet/qlogic/qede/qede_filter.c kfree(n); n 1996 drivers/net/ethernet/qlogic/qede/qede_filter.c memcpy(&n->tuple, &t, sizeof(n->tuple)); n 1998 drivers/net/ethernet/qlogic/qede/qede_filter.c n->buf_len = min_hlen; n 1999 drivers/net/ethernet/qlogic/qede/qede_filter.c n->b_is_drop = true; n 2000 drivers/net/ethernet/qlogic/qede/qede_filter.c n->sw_id = f->cookie; n 2002 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.build_hdr(&n->tuple, n->data); n 2004 drivers/net/ethernet/qlogic/qede/qede_filter.c rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); n 2008 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_configure_arfs_fltr(edev, n, n->rxq_id, true); n 2009 drivers/net/ethernet/qlogic/qede/qede_filter.c rc = qede_poll_arfs_filter_config(edev, n); n 2096 drivers/net/ethernet/qlogic/qede/qede_filter.c struct qede_arfs_fltr_node *n; n 2117 drivers/net/ethernet/qlogic/qede/qede_filter.c n = kzalloc(sizeof(*n), GFP_KERNEL); n 2118 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n) { n 2124 drivers/net/ethernet/qlogic/qede/qede_filter.c n->data = kzalloc(min_hlen, GFP_KERNEL); n 2125 drivers/net/ethernet/qlogic/qede/qede_filter.c if (!n->data) { n 2126 drivers/net/ethernet/qlogic/qede/qede_filter.c kfree(n); n 2131 drivers/net/ethernet/qlogic/qede/qede_filter.c n->sw_id = fsp->location; n 2132 drivers/net/ethernet/qlogic/qede/qede_filter.c set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); n 2133 drivers/net/ethernet/qlogic/qede/qede_filter.c n->buf_len = min_hlen; n 2135 drivers/net/ethernet/qlogic/qede/qede_filter.c memcpy(&n->tuple, &t, sizeof(n->tuple)); n 2137 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_flow_set_destination(edev, n, fsp); n 2140 drivers/net/ethernet/qlogic/qede/qede_filter.c n->tuple.build_hdr(&n->tuple, n->data); n 2142 drivers/net/ethernet/qlogic/qede/qede_filter.c rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); n 2146 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_configure_arfs_fltr(edev, n, n->rxq_id, true); n 2147 drivers/net/ethernet/qlogic/qede/qede_filter.c rc = qede_poll_arfs_filter_config(edev, n); n 383 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h #define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) n 15 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c #define MASK(n) ((1ULL<<(n))-1) n 637 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct hlist_node *n; n 645 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { n 665 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) n 682 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct hlist_node *n; n 689 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { n 388 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c int i, n, init_delay; n 448 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || n 449 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c qlcnic_rom_fast_read(adapter, 4, &n) != 0) { n 450 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); n 453 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c offset = n & 0xffffU; n 454 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c n = (n >> 16) & 0xffffU; n 456 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c if (n >= 1024) { n 461 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); n 465 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c for (i = 0; i < n; i++) { n 476 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c for (i = 0; i < n; i++) { n 180 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c struct hlist_node *n; n 182 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { n 311 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c struct hlist_node *n; n 333 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { n 789 drivers/net/ethernet/realtek/r8169_main.c void (*delay)(unsigned int), unsigned int d, int n, n 794 drivers/net/ethernet/realtek/r8169_main.c for (i = 0; i < n; i++) { n 800 drivers/net/ethernet/realtek/r8169_main.c c->msg, !high, n, d); n 806 drivers/net/ethernet/realtek/r8169_main.c unsigned int d, int n) n 808 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); n 813 drivers/net/ethernet/realtek/r8169_main.c unsigned int d, int n) n 815 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); n 820 drivers/net/ethernet/realtek/r8169_main.c unsigned int d, int n) n 822 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, msleep, d, n, true); n 827 drivers/net/ethernet/realtek/r8169_main.c unsigned int d, int n) n 829 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, msleep, d, n, false); n 5628 drivers/net/ethernet/realtek/r8169_main.c unsigned int n) n 5632 drivers/net/ethernet/realtek/r8169_main.c for (i = 0; i < n; i++) { n 128 drivers/net/ethernet/rocker/rocker.h struct neighbour *n); n 130 drivers/net/ethernet/rocker/rocker.h struct neighbour *n); n 1710 drivers/net/ethernet/rocker/rocker_main.c struct neighbour *n) n 1716 drivers/net/ethernet/rocker/rocker_main.c return wops->port_neigh_update(rocker_port, n); n 1720 drivers/net/ethernet/rocker/rocker_main.c struct neighbour *n) n 1726 drivers/net/ethernet/rocker/rocker_main.c return wops->port_neigh_destroy(rocker_port, n); n 2026 drivers/net/ethernet/rocker/rocker_main.c struct neighbour *n) n 2028 drivers/net/ethernet/rocker/rocker_main.c struct rocker_port *rocker_port = netdev_priv(n->dev); n 2031 drivers/net/ethernet/rocker/rocker_main.c err = rocker_world_port_neigh_destroy(rocker_port, n); n 3181 drivers/net/ethernet/rocker/rocker_main.c struct neighbour *n = ptr; n 3186 drivers/net/ethernet/rocker/rocker_main.c if (n->tbl != &arp_tbl) n 3188 drivers/net/ethernet/rocker/rocker_main.c dev = n->dev; n 3192 drivers/net/ethernet/rocker/rocker_main.c err = rocker_world_port_neigh_update(rocker_port, n); n 1352 drivers/net/ethernet/rocker/rocker_ofdpa.c struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); n 1355 drivers/net/ethernet/rocker/rocker_ofdpa.c if (!n) { n 1356 drivers/net/ethernet/rocker/rocker_ofdpa.c n = neigh_create(&arp_tbl, &ip_addr, dev); n 1357 drivers/net/ethernet/rocker/rocker_ofdpa.c if (IS_ERR(n)) n 1358 drivers/net/ethernet/rocker/rocker_ofdpa.c return PTR_ERR(n); n 1366 drivers/net/ethernet/rocker/rocker_ofdpa.c if (n->nud_state & NUD_VALID) n 1368 drivers/net/ethernet/rocker/rocker_ofdpa.c ip_addr, n->ha); n 1370 drivers/net/ethernet/rocker/rocker_ofdpa.c neigh_event_send(n, NULL); n 1372 drivers/net/ethernet/rocker/rocker_ofdpa.c neigh_release(n); n 2689 drivers/net/ethernet/rocker/rocker_ofdpa.c struct neighbour *n) n 2692 drivers/net/ethernet/rocker/rocker_ofdpa.c int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) | n 2694 drivers/net/ethernet/rocker/rocker_ofdpa.c __be32 ip_addr = *(__be32 *) n->primary_key; n 2696 drivers/net/ethernet/rocker/rocker_ofdpa.c return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha); n 2700 drivers/net/ethernet/rocker/rocker_ofdpa.c struct neighbour *n) n 2704 drivers/net/ethernet/rocker/rocker_ofdpa.c __be32 ip_addr = *(__be32 *) n->primary_key; n 2706 drivers/net/ethernet/rocker/rocker_ofdpa.c return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha); n 872 drivers/net/ethernet/sfc/ef10.c static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) n 882 drivers/net/ethernet/sfc/ef10.c for (i = 0; i < n; i++) { n 1034 drivers/net/ethernet/sfc/ef10.c static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) n 1036 drivers/net/ethernet/sfc/ef10.c return n == 0 ? 0 : -ENOBUFS; n 319 drivers/net/ethernet/sfc/ethtool.c unsigned int n = 0, i; n 322 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, &tests->phy_alive, n 324 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, &tests->nvram, n 326 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, &tests->interrupt, n 331 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, n 335 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, n 341 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, &tests->memory, n 343 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, &tests->registers, n 357 drivers/net/ethernet/sfc/ethtool.c efx_fill_test(n++, strings, data, &tests->phy_ext[i], n 366 drivers/net/ethernet/sfc/ethtool.c n = efx_fill_loopback_test(efx, n 367 drivers/net/ethernet/sfc/ethtool.c &tests->loopback[mode], mode, n, n 371 drivers/net/ethernet/sfc/ethtool.c return n; n 301 drivers/net/ethernet/sfc/falcon/ethtool.c unsigned int n = 0, i; n 304 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, &tests->phy_alive, n 306 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, &tests->nvram, n 308 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, &tests->interrupt, n 313 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, n 317 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, n 323 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, &tests->memory, n 325 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, &tests->registers, n 339 drivers/net/ethernet/sfc/falcon/ethtool.c ef4_fill_test(n++, strings, data, &tests->phy_ext[i], n 348 drivers/net/ethernet/sfc/falcon/ethtool.c n = ef4_fill_loopback_test(efx, n 349 drivers/net/ethernet/sfc/falcon/ethtool.c &tests->loopback[mode], mode, n, n 353 drivers/net/ethernet/sfc/falcon/ethtool.c return n; n 340 drivers/net/ethernet/sfc/mcdi_pcol.h #define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \ n 343 drivers/net/ethernet/sfc/mcdi_pcol.h (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) n 345 drivers/net/ethernet/sfc/mcdi_pcol.h #define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \ n 348 drivers/net/ethernet/sfc/mcdi_pcol.h (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) n 350 drivers/net/ethernet/sfc/mcdi_pcol.h #define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \ n 353 drivers/net/ethernet/sfc/mcdi_pcol.h (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) n 358 drivers/net/ethernet/sfc/mcdi_pcol.h #define EVB_STACK_ID(n) (((n) & 0xff) << 16) n 237 drivers/net/ethernet/sfc/tx_tso.c int n; n 247 drivers/net/ethernet/sfc/tx_tso.c n = min(st->in_len, st->packet_space); n 249 drivers/net/ethernet/sfc/tx_tso.c st->packet_space -= n; n 250 drivers/net/ethernet/sfc/tx_tso.c st->out_len -= n; n 251 drivers/net/ethernet/sfc/tx_tso.c st->in_len -= n; n 253 drivers/net/ethernet/sfc/tx_tso.c efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); n 270 drivers/net/ethernet/sfc/tx_tso.c st->dma_addr += n; n 97 drivers/net/ethernet/smsc/smc911x.c #define DBG(n, dev, args...) \ n 99 drivers/net/ethernet/smsc/smc911x.c if (SMC_DEBUG & (n)) \ n 105 drivers/net/ethernet/smsc/smc911x.c #define DBG(n, dev, args...) do { } while (0) n 70 drivers/net/ethernet/smsc/smc91c92_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 144 drivers/net/ethernet/smsc/smc91x.c #define DBG(n, dev, fmt, ...) \ n 146 drivers/net/ethernet/smsc/smc91x.c if (SMC_DEBUG >= (n)) \ n 104 drivers/net/ethernet/socionext/netsec.c #define MHZ(n) ((n) * 1000 * 1000) n 1739 drivers/net/ethernet/socionext/netsec.c static int netsec_xdp_xmit(struct net_device *ndev, int n, n 1751 drivers/net/ethernet/socionext/netsec.c for (i = 0; i < n; i++) { n 1770 drivers/net/ethernet/socionext/netsec.c return n - drops; n 45 drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c size_t n; n 54 drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c for (n = 0; n < dmi_data->nfuncs; n++, func_data++) n 2587 drivers/net/ethernet/sun/niu.h #define MRVL88X2011_LED(n,v) ((v)<<((n)*4)) n 2588 drivers/net/ethernet/sun/niu.h #define MRVL88X2011_LED_STAT(n,v) ((v)>>((n)*4)) n 739 drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h #define XLGMAC_MTL_REG(pdata, n, reg) \ n 740 drivers/net/ethernet/synopsys/dwc-xlgmac-reg.h ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg)) n 950 drivers/net/ethernet/tehuti/tehuti.c static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n) n 952 drivers/net/ethernet/tehuti/tehuti.c BDX_ASSERT((n < 0) || (n >= db->nelem)); n 953 drivers/net/ethernet/tehuti/tehuti.c return db->elems + n; n 961 drivers/net/ethernet/tehuti/tehuti.c static inline void bdx_rxdb_free_elem(struct rxdb *db, int n) n 963 drivers/net/ethernet/tehuti/tehuti.c BDX_ASSERT((n >= db->nelem) || (n < 0)); n 964 drivers/net/ethernet/tehuti/tehuti.c db->stack[(db->top)++] = n; n 75 drivers/net/ethernet/ti/cpsw.c int n; \ n 79 drivers/net/ethernet/ti/cpsw.c for (n = cpsw->data.slaves, \ n 81 drivers/net/ethernet/ti/cpsw.c n; n--) \ n 2374 drivers/net/ethernet/ti/cpsw.c static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, n 2384 drivers/net/ethernet/ti/cpsw.c for (i = 0; i < n; i++) { n 2396 drivers/net/ethernet/ti/cpsw.c return n - drops; n 501 drivers/net/ethernet/ti/netcp_core.c struct netcp_hook_list *next, *n; n 505 drivers/net/ethernet/ti/netcp_core.c list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) { n 550 drivers/net/ethernet/ti/netcp_core.c struct netcp_hook_list *next, *n; n 554 drivers/net/ethernet/ti/netcp_core.c list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) { n 663 drivers/net/ethernet/via/via-rhine.c int n; n 668 drivers/net/ethernet/via/via-rhine.c n = inb(pioaddr + ConfigA) | 0x20; n 669 drivers/net/ethernet/via/via-rhine.c outb(n, pioaddr + ConfigA); n 671 drivers/net/ethernet/via/via-rhine.c n = inb(pioaddr + ConfigD) | 0x80; n 672 drivers/net/ethernet/via/via-rhine.c outb(n, pioaddr + ConfigD); n 1741 drivers/net/ethernet/via/via-velocity.c int q, int n) n 1743 drivers/net/ethernet/via/via-velocity.c struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); n 1156 drivers/net/ethernet/via/via-velocity.h #define mac_set_dma_length(regs, n) {\ n 1157 drivers/net/ethernet/via/via-velocity.h BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\ n 1160 drivers/net/ethernet/via/via-velocity.h #define mac_set_rx_thresh(regs, n) {\ n 1161 drivers/net/ethernet/via/via-velocity.h BYTE_REG_BITS_SET((n),(MCFG_RFT0|MCFG_RFT1),&((regs)->MCFG));\ n 1172 drivers/net/ethernet/via/via-velocity.h #define mac_tx_queue_run(regs, n) {\ n 1173 drivers/net/ethernet/via/via-velocity.h writew(TRDCSR_RUN<<((n)*4),&((regs)->TDCSRSet));\ n 1176 drivers/net/ethernet/via/via-velocity.h #define mac_tx_queue_wake(regs, n) {\ n 1177 drivers/net/ethernet/via/via-velocity.h writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\ n 111 drivers/net/ethernet/wiznet/w5100.c #define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */ n 112 drivers/net/ethernet/wiznet/w5100.c #define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */ n 132 drivers/net/ethernet/wiznet/w5100.c #define W5500_Sn_RXMEM_SIZE(n) \ n 133 drivers/net/ethernet/wiznet/w5100.c (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */ n 134 drivers/net/ethernet/wiznet/w5100.c #define W5500_Sn_TXMEM_SIZE(n) \ n 135 drivers/net/ethernet/wiznet/w5100.c (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */ n 43 drivers/net/ethernet/wiznet/w5300.c #define MR_WDF(n) (((n)&7)<<11) /* Write data fetch time */ n 235 drivers/net/ethernet/xircom/xirc2ps_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 1133 drivers/net/ethernet/xircom/xirc2ps_cs.c unsigned n, nn; n 1135 drivers/net/ethernet/xircom/xirc2ps_cs.c n = lp->last_ptr_value; n 1138 drivers/net/ethernet/xircom/xirc2ps_cs.c if (nn < n) /* rollover */ n 1139 drivers/net/ethernet/xircom/xirc2ps_cs.c dev->stats.tx_packets += 256 - n; n 1140 drivers/net/ethernet/xircom/xirc2ps_cs.c else if (n == nn) { /* happens sometimes - don't know why */ n 1143 drivers/net/ethernet/xircom/xirc2ps_cs.c dev->stats.tx_packets += lp->last_ptr_value - n; n 231 drivers/net/ethernet/xscale/ixp4xx_eth.c #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ n 232 drivers/net/ethernet/xscale/ixp4xx_eth.c (n) * sizeof(struct desc)) n 233 drivers/net/ethernet/xscale/ixp4xx_eth.c #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) n 235 drivers/net/ethernet/xscale/ixp4xx_eth.c #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ n 236 drivers/net/ethernet/xscale/ixp4xx_eth.c ((n) + RX_DESCS) * sizeof(struct desc)) n 237 drivers/net/ethernet/xscale/ixp4xx_eth.c #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) n 681 drivers/net/ethernet/xscale/ixp4xx_eth.c int n; n 687 drivers/net/ethernet/xscale/ixp4xx_eth.c if ((n = queue_get_desc(rxq, port, 0)) < 0) { n 710 drivers/net/ethernet/xscale/ixp4xx_eth.c desc = rx_desc_ptr(port, n); n 731 drivers/net/ethernet/xscale/ixp4xx_eth.c queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); n 738 drivers/net/ethernet/xscale/ixp4xx_eth.c skb = port->rx_buff_tab[n]; n 744 drivers/net/ethernet/xscale/ixp4xx_eth.c memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], n 760 drivers/net/ethernet/xscale/ixp4xx_eth.c port->rx_buff_tab[n] = temp; n 765 drivers/net/ethernet/xscale/ixp4xx_eth.c queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); n 828 drivers/net/ethernet/xscale/ixp4xx_eth.c int len, offset, bytes, n; n 871 drivers/net/ethernet/xscale/ixp4xx_eth.c n = queue_get_desc(txreadyq, port, 1); n 872 drivers/net/ethernet/xscale/ixp4xx_eth.c BUG_ON(n < 0); n 873 drivers/net/ethernet/xscale/ixp4xx_eth.c desc = tx_desc_ptr(port, n); n 876 drivers/net/ethernet/xscale/ixp4xx_eth.c port->tx_buff_tab[n] = skb; n 878 drivers/net/ethernet/xscale/ixp4xx_eth.c port->tx_buff_tab[n] = mem; n 885 drivers/net/ethernet/xscale/ixp4xx_eth.c queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); n 1303 drivers/net/ethernet/xscale/ixp4xx_eth.c int n = queue_get_desc(port->plat->txreadyq, port, 1); n 1304 drivers/net/ethernet/xscale/ixp4xx_eth.c BUG_ON(n < 0); n 1305 drivers/net/ethernet/xscale/ixp4xx_eth.c desc = tx_desc_ptr(port, n); n 1306 drivers/net/ethernet/xscale/ixp4xx_eth.c phys = tx_desc_phys(port, n); n 2930 drivers/net/fddi/defxx.c static void my_skb_align(struct sk_buff *skb, int n) n 2935 drivers/net/fddi/defxx.c v = ALIGN(x, n); /* Where we want to be */ n 1077 drivers/net/fddi/skfp/fplustm.c if (!tb->n) { /* not used */ n 1109 drivers/net/fddi/skfp/fplustm.c tb->n = 0 ; n 1166 drivers/net/fddi/skfp/fplustm.c tb->n++ ; n 1231 drivers/net/fddi/skfp/fplustm.c if (tb->n) { n 68 drivers/net/fddi/skfp/h/cmtdef.h #define DB_ECMN(n, fmt, ...) \ n 69 drivers/net/fddi/skfp/h/cmtdef.h DB_PR((DB_TEST).d_ecm >= (n), fmt, ##__VA_ARGS__) n 72 drivers/net/fddi/skfp/h/cmtdef.h #define DB_RMTN(n, fmt, ...) \ n 73 drivers/net/fddi/skfp/h/cmtdef.h DB_PR((DB_TEST).d_rmt >= (n), fmt, ##__VA_ARGS__) n 76 drivers/net/fddi/skfp/h/cmtdef.h #define DB_CFMN(n, fmt, ...) \ n 77 drivers/net/fddi/skfp/h/cmtdef.h DB_PR((DB_TEST).d_cfm >= (n), fmt, ##__VA_ARGS__) n 80 drivers/net/fddi/skfp/h/cmtdef.h #define DB_PCMN(n, fmt, ...) \ n 81 drivers/net/fddi/skfp/h/cmtdef.h DB_PR((DB_TEST).d_pcm >= (n), fmt, ##__VA_ARGS__) n 84 drivers/net/fddi/skfp/h/cmtdef.h #define DB_SMTN(n, fmt, ...) \ n 85 drivers/net/fddi/skfp/h/cmtdef.h DB_PR((DB_TEST).d_smtf >= (n), fmt, ##__VA_ARGS__) n 88 drivers/net/fddi/skfp/h/cmtdef.h #define DB_SBAN(n, fmt, ...) \ n 89 drivers/net/fddi/skfp/h/cmtdef.h DB_PR((DB_TEST).d_sba >= (n), fmt, ##__VA_ARGS__) n 92 drivers/net/fddi/skfp/h/cmtdef.h #define DB_ESSN(n, fmt, ...) \ n 93 drivers/net/fddi/skfp/h/cmtdef.h DB_PR((DB_TEST).d_ess >= (n), fmt, ##__VA_ARGS__) n 220 drivers/net/fddi/skfp/h/fplustm.h u_char n ; /* usage counter */ n 1040 drivers/net/fddi/skfp/hwmtm.c int n ; n 1080 drivers/net/fddi/skfp/hwmtm.c n = 0 ; n 1129 drivers/net/fddi/skfp/hwmtm.c n += rbctrl & 0xffff ; n 1212 drivers/net/fddi/skfp/hwmtm.c if (len != (n-4)) { n 1213 drivers/net/fddi/skfp/hwmtm.c DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n); n 1276 drivers/net/fddi/skfp/hwmtm.c n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; n 1277 drivers/net/fddi/skfp/hwmtm.c DB_RX(6, "cp SMT frame to mb: len = %d", n); n 1278 drivers/net/fddi/skfp/hwmtm.c memcpy(data,r->rxd_virt,n) ; n 1279 drivers/net/fddi/skfp/hwmtm.c data += n ; n 1806 drivers/net/fddi/skfp/hwmtm.c int n ; n 1833 drivers/net/fddi/skfp/hwmtm.c n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ; n 1834 drivers/net/fddi/skfp/hwmtm.c if (n >= len) { n 1835 drivers/net/fddi/skfp/hwmtm.c n = len ; n 1837 drivers/net/fddi/skfp/hwmtm.c DB_TX(5, "frag: virt/len = 0x%p/%d", data, n); n 1839 drivers/net/fddi/skfp/hwmtm.c frag_len[frag_count] = n ; n 1841 drivers/net/fddi/skfp/hwmtm.c len -= n ; n 1842 drivers/net/fddi/skfp/hwmtm.c data += n ; n 1946 drivers/net/fddi/skfp/hwmtm.c int n ; n 1971 drivers/net/fddi/skfp/hwmtm.c for (n = frag_count; n; n--) { n 530 drivers/net/fddi/skfp/pcmplc.c int n ; n 536 drivers/net/fddi/skfp/pcmplc.c for (i = len-1,n = 0 ; i >= 0 ; i--) { n 537 drivers/net/fddi/skfp/pcmplc.c n = (n<<1) | phy->t_val[phy->bitn+i] ; n 547 drivers/net/fddi/skfp/pcmplc.c outpw(PLC(np,PL_XMIT_VECTOR),n) ; n 1616 drivers/net/fddi/skfp/pcmplc.c int n ; n 1618 drivers/net/fddi/skfp/pcmplc.c for (n = 0 ; n < NUMPHYS ; n++) { n 1619 drivers/net/fddi/skfp/pcmplc.c if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE && n 1620 drivers/net/fddi/skfp/pcmplc.c smc->y[n].mib->fddiPORTNeighborType == TM) n 1634 drivers/net/fddi/skfp/pcmplc.c int n ; n 1784 drivers/net/fddi/skfp/pcmplc.c n = inpw(PLC(np,PL_RCV_VECTOR)) ; n 1786 drivers/net/fddi/skfp/pcmplc.c phy->r_val[plc->p_start+i] = n & 1 ; n 1787 drivers/net/fddi/skfp/pcmplc.c n >>= 1 ; n 1561 drivers/net/fddi/skfp/pmf.c int n ; n 1597 drivers/net/fddi/skfp/pmf.c n = pa->p_len ; n 1598 drivers/net/fddi/skfp/pmf.c if ( (n < 0 ) || (n > (int)(len - PARA_LEN))) { n 1599 drivers/net/fddi/skfp/pmf.c n = len - PARA_LEN ; n 1606 drivers/net/fddi/skfp/pmf.c if (n < 24) { n 1607 drivers/net/fddi/skfp/pmf.c dump_hex((char *)(pa+1),(int) n) ; n 1615 drivers/net/fddi/skfp/pmf.c n -= 16 ; n 1617 drivers/net/fddi/skfp/pmf.c while (n > 0) { n 1618 drivers/net/fddi/skfp/pmf.c nn = (n > 16) ? 16 : n ; n 1619 drivers/net/fddi/skfp/pmf.c if (n > 64) { n 1629 drivers/net/fddi/skfp/pmf.c n -= nn ; n 1645 drivers/net/fddi/skfp/pmf.c int n = 0 ; n 1647 drivers/net/fddi/skfp/pmf.c n++ ; n 1649 drivers/net/fddi/skfp/pmf.c printf("%x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ; n 1651 drivers/net/fddi/skfp/pmf.c printf("%02x%s",*p++ & 0xff,len ? ( (n & 7) ? " " : "-") : "") ; n 1615 drivers/net/fddi/skfp/skfddi.c int n; n 1632 drivers/net/fddi/skfp/skfddi.c for (n = FDDI_MAC_HDR_LEN; n; n--) n 47 drivers/net/fddi/skfp/srf.c u_char n ; n 84 drivers/net/fddi/skfp/srf.c for (index = 0 ; index < init->n ; index++) { n 219 drivers/net/hamradio/dmascc.c static int setup_adapter(int card_base, int type, int n) __init; n 297 drivers/net/hamradio/dmascc.c int h, i, j, n; n 307 drivers/net/hamradio/dmascc.c n = 0; n 399 drivers/net/hamradio/dmascc.c (setup_adapter(base[i], h, n) == 0)) n 400 drivers/net/hamradio/dmascc.c n++; n 409 drivers/net/hamradio/dmascc.c if (n) n 439 drivers/net/hamradio/dmascc.c static int __init setup_adapter(int card_base, int type, int n) n 571 drivers/net/hamradio/dmascc.c snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i); n 969 drivers/net/hamradio/dmascc.c int i, n; n 973 drivers/net/hamradio/dmascc.c n = (priv->chip == Z85230) ? 3 : 1; n 978 drivers/net/hamradio/dmascc.c (int) priv->tx_buf[priv->tx_tail] + n); n 980 drivers/net/hamradio/dmascc.c priv->tx_len[priv->tx_tail] - n); n 995 drivers/net/hamradio/dmascc.c for (i = 0; i < n; i++) n 357 drivers/net/ieee802154/atusb.c static int atusb_alloc_urbs(struct atusb *atusb, int n) n 361 drivers/net/ieee802154/atusb.c while (n) { n 368 drivers/net/ieee802154/atusb.c n--; n 3487 drivers/net/macsec.c struct macsec_dev *m, *n; n 3491 drivers/net/macsec.c list_for_each_entry_safe(m, n, &rxd->secys, secys) { n 3499 drivers/net/macsec.c struct macsec_dev *m, *n; n 3503 drivers/net/macsec.c list_for_each_entry_safe(m, n, &rxd->secys, secys) { n 383 drivers/net/macvlan.c struct hlist_node *h, *n; n 385 drivers/net/macvlan.c hlist_for_each_safe(h, n, &port->vlan_source_hash[i]) { n 168 drivers/net/netdevsim/dev.c u64 n; n 180 drivers/net/netdevsim/dev.c n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); n 181 drivers/net/netdevsim/dev.c err = devlink_resource_register(devlink, "fib", n, n 189 drivers/net/netdevsim/dev.c n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); n 190 drivers/net/netdevsim/dev.c err = devlink_resource_register(devlink, "fib-rules", n, n 208 drivers/net/netdevsim/dev.c n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); n 209 drivers/net/netdevsim/dev.c err = devlink_resource_register(devlink, "fib", n, n 217 drivers/net/netdevsim/dev.c n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); n 218 drivers/net/netdevsim/dev.c err = devlink_resource_register(devlink, "fib-rules", n, n 130 drivers/net/phy/aquantia_main.c #define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s } n 61 drivers/net/phy/mdio-boardinfo.c unsigned int n) n 66 drivers/net/phy/mdio-boardinfo.c be = kcalloc(n, sizeof(*be), GFP_KERNEL); n 70 drivers/net/phy/mdio-boardinfo.c for (i = 0; i < n; i++, be++, info++) { n 74 drivers/net/phy/mdio-mux-meson-g12a.c u32 val, m, n; n 78 drivers/net/phy/mdio-mux-meson-g12a.c n = FIELD_GET(PLL_CTL0_N, val); n 80 drivers/net/phy/mdio-mux-meson-g12a.c return parent_rate * m / n; n 403 drivers/net/phy/phy_device.c struct list_head *pos, *n; n 410 drivers/net/phy/phy_device.c list_for_each_safe(pos, n, &phy_fixup_list) { n 2340 drivers/net/phy/phy_device.c int phy_drivers_register(struct phy_driver *new_driver, int n, n 2345 drivers/net/phy/phy_device.c for (i = 0; i < n; i++) { n 2363 drivers/net/phy/phy_device.c void phy_drivers_unregister(struct phy_driver *drv, int n) n 2367 drivers/net/phy/phy_device.c for (i = 0; i < n; i++) n 1384 drivers/net/plip/plip.c int n = simple_strtoul(str+7, NULL, 10); n 1386 drivers/net/plip/plip.c parport[parport_ptr++] = n; n 835 drivers/net/ppp/ppp_async.c int c, i, j, n, s, f; n 855 drivers/net/ppp/ppp_async.c n = 1; n 857 drivers/net/ppp/ppp_async.c n = scan_ordinary(ap, buf, count); n 862 drivers/net/ppp/ppp_async.c for (j = 0; j < n; ++j) n 870 drivers/net/ppp/ppp_async.c } else if (n > 0 && (ap->state & SC_TOSS) == 0) { n 889 drivers/net/ppp/ppp_async.c if (n > skb_tailroom(skb)) { n 893 drivers/net/ppp/ppp_async.c sp = skb_put_data(skb, buf, n); n 901 drivers/net/ppp/ppp_async.c if (n >= count) n 904 drivers/net/ppp/ppp_async.c c = buf[n]; n 905 drivers/net/ppp/ppp_async.c if (flags != NULL && flags[n] != 0) { n 918 drivers/net/ppp/ppp_async.c ++n; n 920 drivers/net/ppp/ppp_async.c buf += n; n 922 drivers/net/ppp/ppp_async.c flags += n; n 923 drivers/net/ppp/ppp_async.c count -= n; n 287 drivers/net/ppp/ppp_generic.c static int unit_set(struct idr *p, void *ptr, int n); n 288 drivers/net/ppp/ppp_generic.c static void unit_put(struct idr *p, int n); n 289 drivers/net/ppp/ppp_generic.c static void *unit_find(struct idr *p, int n); n 3286 drivers/net/ppp/ppp_generic.c static int unit_set(struct idr *p, void *ptr, int n) n 3290 drivers/net/ppp/ppp_generic.c unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL); n 3303 drivers/net/ppp/ppp_generic.c static void unit_put(struct idr *p, int n) n 3305 drivers/net/ppp/ppp_generic.c idr_remove(p, n); n 3309 drivers/net/ppp/ppp_generic.c static void *unit_find(struct idr *p, int n) n 3311 drivers/net/ppp/ppp_generic.c return idr_find(p, n); n 267 drivers/net/rionet.c int n; n 276 drivers/net/rionet.c if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot) n 277 drivers/net/rionet.c rionet_rx_fill(ndev, n); n 82 drivers/net/slip/slhc.c static unsigned char *encode(unsigned char *cp, unsigned short n); n 182 drivers/net/slip/slhc.c encode(unsigned char *cp, unsigned short n) n 184 drivers/net/slip/slhc.c if(n >= 256 || n == 0){ n 186 drivers/net/slip/slhc.c cp = put16(cp,n); n 188 drivers/net/slip/slhc.c *cp++ = n; n 1296 drivers/net/tap.c int n = tap->numqueues; n 1299 drivers/net/tap.c rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); n 1306 drivers/net/tap.c ret = ptr_ring_resize_multiple(rings, n, n 455 drivers/net/tun.c struct hlist_node *n; n 457 drivers/net/tun.c hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) n 470 drivers/net/tun.c struct hlist_node *n; n 472 drivers/net/tun.c hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { n 493 drivers/net/tun.c struct hlist_node *n; n 495 drivers/net/tun.c hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { n 750 drivers/net/tun.c int i, n = tun->numqueues; n 752 drivers/net/tun.c for (i = 0; i < n; i++) { n 769 drivers/net/tun.c for (i = 0; i < n; i++) { n 904 drivers/net/tun.c int n = ether_crc(ETH_ALEN, addr) >> 26; n 905 drivers/net/tun.c mask[n >> 5] |= (1 << (n & 31)); n 910 drivers/net/tun.c int n = ether_crc(ETH_ALEN, addr) >> 26; n 911 drivers/net/tun.c return mask[n >> 5] & (1 << (n & 31)); n 918 drivers/net/tun.c int err, alen, n, nexact; n 941 drivers/net/tun.c for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) n 942 drivers/net/tun.c memcpy(filter->addr[n], addr[n].u, ETH_ALEN); n 944 drivers/net/tun.c nexact = n; n 949 drivers/net/tun.c for (; n < uf.count; n++) { n 950 drivers/net/tun.c if (!is_multicast_ether_addr(addr[n].u)) { n 954 drivers/net/tun.c addr_hash_set(filter->mask, addr[n].u); n 1281 drivers/net/tun.c static int tun_xdp_xmit(struct net_device *dev, int n, n 1288 drivers/net/tun.c int cnt = n; n 1309 drivers/net/tun.c for (i = 0; i < n; i++) { n 2545 drivers/net/tun.c int n = ctl->num; n 2553 drivers/net/tun.c for (i = 0; i < n; i++) { n 2936 drivers/net/tun.c static void tun_detach_filter(struct tun_struct *tun, int n) n 2941 drivers/net/tun.c for (i = 0; i < n; i++) { n 3620 drivers/net/tun.c int n = tun->numqueues + tun->numdisabled; n 3623 drivers/net/tun.c rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); n 3634 drivers/net/tun.c ret = ptr_ring_resize_multiple(rings, n, n 1120 drivers/net/usb/cdc_ncm.c u16 n = 0, index, ndplen; n 1195 drivers/net/usb/cdc_ncm.c for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) { n 1215 drivers/net/usb/cdc_ncm.c if (n == 0) { n 1263 drivers/net/usb/cdc_ncm.c ctx->tx_curr_frame_num = n; n 1265 drivers/net/usb/cdc_ncm.c if (n == 0) { n 1271 drivers/net/usb/cdc_ncm.c } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) { n 1276 drivers/net/usb/cdc_ncm.c if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT) n 1281 drivers/net/usb/cdc_ncm.c if (n == ctx->tx_max_datagrams) n 1331 drivers/net/usb/cdc_ncm.c usbnet_set_skb_tx_stats(skb_out, n, n 1338 drivers/net/usb/cdc_ncm.c if (ctx->tx_curr_skb != NULL && n > 0) n 214 drivers/net/usb/net1080.c #define STATUS_PACKETS_OTHER(n) (((n) >> 8) & 0x03) n 219 drivers/net/usb/net1080.c #define STATUS_PACKETS_THIS(n) (((n) >> 0) & 0x03) n 392 drivers/net/veth.c static int veth_xdp_xmit(struct net_device *dev, int n, n 397 drivers/net/veth.c int i, ret, drops = n; n 427 drivers/net/veth.c for (i = 0; i < n; i++) { n 443 drivers/net/veth.c return n; n 445 drivers/net/veth.c ret = n - drops; n 486 drivers/net/virtio_net.c int n, struct xdp_frame **frames, u32 flags) n 512 drivers/net/virtio_net.c drops = n; n 532 drivers/net/virtio_net.c for (i = 0; i < n; i++) { n 541 drivers/net/virtio_net.c ret = n - drops; n 551 drivers/net/virtio_net.c sq->stats.xdp_tx += n; n 699 drivers/net/vmxnet3/vmxnet3_ethtool.c unsigned int n = rssConf->indTableSize; n 705 drivers/net/vmxnet3/vmxnet3_ethtool.c while (n--) n 706 drivers/net/vmxnet3/vmxnet3_ethtool.c p[n] = rssConf->indTable[n]; n 1817 drivers/net/vxlan.c struct neighbour *n; n 1847 drivers/net/vxlan.c n = neigh_lookup(&arp_tbl, &tip, dev); n 1849 drivers/net/vxlan.c if (n) { n 1853 drivers/net/vxlan.c if (!(n->nud_state & NUD_CONNECTED)) { n 1854 drivers/net/vxlan.c neigh_release(n); n 1858 drivers/net/vxlan.c f = vxlan_find_mac(vxlan, n->ha, vni); n 1861 drivers/net/vxlan.c neigh_release(n); n 1866 drivers/net/vxlan.c n->ha, sha); n 1868 drivers/net/vxlan.c neigh_release(n); n 1895 drivers/net/vxlan.c struct neighbour *n, bool isrouter) n 1939 drivers/net/vxlan.c ether_addr_copy(eth_hdr(reply)->h_source, n->ha); n 1956 drivers/net/vxlan.c pip6->saddr = *(struct in6_addr *)n->primary_key; n 1968 drivers/net/vxlan.c ether_addr_copy(&na->opt[2], n->ha); n 1991 drivers/net/vxlan.c struct neighbour *n; n 2006 drivers/net/vxlan.c n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); n 2008 drivers/net/vxlan.c if (n) { n 2012 drivers/net/vxlan.c if (!(n->nud_state & NUD_CONNECTED)) { n 2013 drivers/net/vxlan.c neigh_release(n); n 2017 drivers/net/vxlan.c f = vxlan_find_mac(vxlan, n->ha, vni); n 2020 drivers/net/vxlan.c neigh_release(n); n 2024 drivers/net/vxlan.c reply = vxlan_na_create(skb, n, n 2027 drivers/net/vxlan.c neigh_release(n); n 2053 drivers/net/vxlan.c struct neighbour *n; n 2058 drivers/net/vxlan.c n = NULL; n 2067 drivers/net/vxlan.c n = neigh_lookup(&arp_tbl, &pip->daddr, dev); n 2068 drivers/net/vxlan.c if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { n 2088 drivers/net/vxlan.c n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); n 2089 drivers/net/vxlan.c if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { n 2106 drivers/net/vxlan.c if (n) { n 2109 drivers/net/vxlan.c diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); n 2113 drivers/net/vxlan.c memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); n 2115 drivers/net/vxlan.c neigh_release(n); n 2729 drivers/net/vxlan.c struct hlist_node *p, *n; n 2732 drivers/net/vxlan.c hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { n 2857 drivers/net/vxlan.c struct hlist_node *p, *n; n 2860 drivers/net/vxlan.c hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { n 925 drivers/net/wan/cosa.c int n; n 929 drivers/net/wan/cosa.c if ((n=iminor(file_inode(file))>>CARD_MINOR_BITS) n 934 drivers/net/wan/cosa.c cosa = cosa_cards+n; n 936 drivers/net/wan/cosa.c if ((n=iminor(file_inode(file)) n 941 drivers/net/wan/cosa.c chan = cosa->chan + n; n 311 drivers/net/wan/ixp4xx_hss.c #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ n 312 drivers/net/wan/ixp4xx_hss.c (n) * sizeof(struct desc)) n 313 drivers/net/wan/ixp4xx_hss.c #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) n 315 drivers/net/wan/ixp4xx_hss.c #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ n 316 drivers/net/wan/ixp4xx_hss.c ((n) + RX_DESCS) * sizeof(struct desc)) n 317 drivers/net/wan/ixp4xx_hss.c #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) n 666 drivers/net/wan/ixp4xx_hss.c int n; n 672 drivers/net/wan/ixp4xx_hss.c if ((n = queue_get_desc(rxq, port, 0)) < 0) { n 696 drivers/net/wan/ixp4xx_hss.c desc = rx_desc_ptr(port, n); n 745 drivers/net/wan/ixp4xx_hss.c queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); n 752 drivers/net/wan/ixp4xx_hss.c skb = port->rx_buff_tab[n]; n 758 drivers/net/wan/ixp4xx_hss.c memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], n 772 drivers/net/wan/ixp4xx_hss.c port->rx_buff_tab[n] = temp; n 777 drivers/net/wan/ixp4xx_hss.c queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); n 831 drivers/net/wan/ixp4xx_hss.c int len, offset, bytes, n; n 876 drivers/net/wan/ixp4xx_hss.c n = queue_get_desc(txreadyq, port, 1); n 877 drivers/net/wan/ixp4xx_hss.c BUG_ON(n < 0); n 878 drivers/net/wan/ixp4xx_hss.c desc = tx_desc_ptr(port, n); n 881 drivers/net/wan/ixp4xx_hss.c port->tx_buff_tab[n] = skb; n 883 drivers/net/wan/ixp4xx_hss.c port->tx_buff_tab[n] = mem; n 889 drivers/net/wan/ixp4xx_hss.c queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); n 650 drivers/net/wan/lmc/lmc_media.c av->n = 120; n 656 drivers/net/wan/lmc/lmc_media.c write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); n 668 drivers/net/wan/lmc/lmc_media.c write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); n 835 drivers/net/wan/lmc/lmc_media.c static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) n 841 drivers/net/wan/lmc/lmc_media.c LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r); n 862 drivers/net/wan/lmc/lmc_media.c write_av9110_bit (sc, n >> i); n 53 drivers/net/wan/lmc/lmc_var.h #define DELAY(n) SLOW_DOWN_IO n 58 drivers/net/wan/lmc/lmc_var.h #define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \ n 63 drivers/net/wan/lmc/lmc_var.h n--; }} while(0) n 151 drivers/net/wan/lmc/lmc_var.h u32 n; n 1520 drivers/net/wan/sbni.c int n, parm; n 1525 drivers/net/wan/sbni.c for( n = 0, parm = 0; *p && n < 8; ) { n 1526 drivers/net/wan/sbni.c (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 ); n 1530 drivers/net/wan/sbni.c ++p, ++n, parm = 0; n 932 drivers/net/wimax/i2400m/tx.c unsigned n; n 945 drivers/net/wimax/i2400m/tx.c n = i2400m->tx_out / I2400M_TX_BUF_SIZE; n 947 drivers/net/wimax/i2400m/tx.c i2400m->tx_in -= n * I2400M_TX_BUF_SIZE; n 149 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 152 drivers/net/wireless/ath/ath10k/ce.c ar->hw_ce_regs->dst_wr_index_addr, n); n 164 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 167 drivers/net/wireless/ath/ath10k/ce.c ar->hw_ce_regs->sr_wr_index_addr, n); n 249 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 252 drivers/net/wireless/ath/ath10k/ce.c ar->hw_ce_regs->sr_size_addr, n); n 257 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 266 drivers/net/wireless/ath/ath10k/ce.c ath10k_set_ring_byte(n, ctrl_regs->dmax)); n 271 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 280 drivers/net/wireless/ath/ath10k/ce.c ath10k_set_ring_byte(n, ctrl_regs->src_ring)); n 285 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 294 drivers/net/wireless/ath/ath10k/ce.c ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); n 359 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 362 drivers/net/wireless/ath/ath10k/ce.c ar->hw_ce_regs->dr_size_addr, n); n 367 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 374 drivers/net/wireless/ath/ath10k/ce.c (ath10k_set_ring_byte(n, srcr_wm->wm_high))); n 379 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 386 drivers/net/wireless/ath/ath10k/ce.c (ath10k_set_ring_byte(n, srcr_wm->wm_low))); n 391 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 398 drivers/net/wireless/ath/ath10k/ce.c (ath10k_set_ring_byte(n, dstr_wm->wm_high))); n 403 drivers/net/wireless/ath/ath10k/ce.c unsigned int n) n 410 drivers/net/wireless/ath/ath10k/ce.c (ath10k_set_ring_byte(n, dstr_wm->wm_low))); n 43 drivers/net/wireless/ath/ath10k/htt_rx.c struct hlist_node *n; n 47 drivers/net/wireless/ath/ath10k/htt_rx.c hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { n 2292 drivers/net/wireless/ath/ath10k/mac.c int i, n; n 2356 drivers/net/wireless/ath/ath10k/mac.c for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) n 2360 drivers/net/wireless/ath/ath10k/mac.c arg->peer_ht_rates.rates[n++] = i; n 2372 drivers/net/wireless/ath/ath10k/mac.c if (n == 0) { n 2377 drivers/net/wireless/ath/ath10k/mac.c arg->peer_ht_rates.num_rates = n; n 5336 drivers/net/wireless/ath/ath10k/wmi.c size_t i, n; n 5358 drivers/net/wireless/ath/ath10k/wmi.c n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), n 5360 drivers/net/wireless/ath/ath10k/wmi.c for (i = 0; i < n; i++) n 5375 drivers/net/wireless/ath/ath10k/wmi.c int i, n; n 5396 drivers/net/wireless/ath/ath10k/wmi.c n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), n 5398 drivers/net/wireless/ath/ath10k/wmi.c for (i = 0; i < n; i++) n 837 drivers/net/wireless/ath/ath5k/debug.c int i, n; n 851 drivers/net/wireless/ath/ath5k/debug.c n = 0; n 854 drivers/net/wireless/ath/ath5k/debug.c n++; n 858 drivers/net/wireless/ath/ath5k/debug.c " len: %d bufs: %d\n", txq->txq_len, n); n 951 drivers/net/wireless/ath/ath5k/reg.h #define AR5K_GPIOCR_IN(n) (0 << ((n) * 2)) /* Mode 0 for pin n */ n 952 drivers/net/wireless/ath/ath5k/reg.h #define AR5K_GPIOCR_OUT0(n) (1 << ((n) * 2)) /* Mode 1 for pin n */ n 953 drivers/net/wireless/ath/ath5k/reg.h #define AR5K_GPIOCR_OUT1(n) (2 << ((n) * 2)) /* Mode 2 for pin n */ n 954 drivers/net/wireless/ath/ath5k/reg.h #define AR5K_GPIOCR_OUT(n) (3 << ((n) * 2)) /* Mode 3 for pin n */ n 955 drivers/net/wireless/ath/ath5k/reg.h #define AR5K_GPIOCR_INT_SEL(n) ((n) << 12) /* Interrupt for GPIO pin n */ n 155 drivers/net/wireless/ath/ath9k/hw.c u32 ath9k_hw_reverse_bits(u32 val, u32 n) n 160 drivers/net/wireless/ath/ath9k/hw.c for (i = 0, retval = 0; i < n; i++) { n 1045 drivers/net/wireless/ath/ath9k/hw.h u32 ath9k_hw_reverse_bits(u32 val, u32 n); n 547 drivers/net/wireless/ath/carl9170/debug.c int err = 0, i, n = 0, max_len = 32, res; n 556 drivers/net/wireless/ath/carl9170/debug.c res = sscanf(buf, "0x%X %d", ®, &n); n 563 drivers/net/wireless/ath/carl9170/debug.c n = 1; n 565 drivers/net/wireless/ath/carl9170/debug.c if (n > 15) { n 570 drivers/net/wireless/ath/carl9170/debug.c if ((reg >= 0x280000) || ((reg + (n << 2)) >= 0x280000)) { n 580 drivers/net/wireless/ath/carl9170/debug.c for (i = 0; i < n; i++) { n 1320 drivers/net/wireless/ath/carl9170/phy.c #define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n]) n 1435 drivers/net/wireless/ath/carl9170/phy.c int idx, i, n; n 1470 drivers/net/wireless/ath/carl9170/phy.c for (n = 0; n < ntargets; n++) { n 1471 drivers/net/wireless/ath/carl9170/phy.c if (ctpl[n].freq == 0xff) n 1473 drivers/net/wireless/ath/carl9170/phy.c pwr_freqs[n] = ctpl[n].freq; n 1475 drivers/net/wireless/ath/carl9170/phy.c ntargets = n; n 1477 drivers/net/wireless/ath/carl9170/phy.c for (n = 0; n < 4; n++) n 1478 drivers/net/wireless/ath/carl9170/phy.c ctpres[n] = carl9170_interpolate_u8(f, n 1479 drivers/net/wireless/ath/carl9170/phy.c ctpl[idx + 0].freq, ctpl[idx + 0].power[n], n 1480 drivers/net/wireless/ath/carl9170/phy.c ctpl[idx + 1].freq, ctpl[idx + 1].power[n]); n 1510 drivers/net/wireless/ath/carl9170/phy.c for (n = 0; n < ntargets; n++) { n 1511 drivers/net/wireless/ath/carl9170/phy.c if (ctph[n].freq == 0xff) n 1513 drivers/net/wireless/ath/carl9170/phy.c pwr_freqs[n] = ctph[n].freq; n 1515 drivers/net/wireless/ath/carl9170/phy.c ntargets = n; n 1517 drivers/net/wireless/ath/carl9170/phy.c for (n = 0; n < 8; n++) n 1518 drivers/net/wireless/ath/carl9170/phy.c ctpres[n] = carl9170_interpolate_u8(f, n 1519 drivers/net/wireless/ath/carl9170/phy.c ctph[idx + 0].freq, ctph[idx + 0].power[n], n 1520 drivers/net/wireless/ath/carl9170/phy.c ctph[idx + 1].freq, ctph[idx + 1].power[n]); n 886 drivers/net/wireless/ath/wil6210/cfg80211.c uint i, n; n 970 drivers/net/wireless/ath/wil6210/cfg80211.c n = min(request->n_channels, 4U); n 971 drivers/net/wireless/ath/wil6210/cfg80211.c for (i = 0; i < n; i++) { n 1052 drivers/net/wireless/ath/wil6210/cfg80211.c int i, n; n 1057 drivers/net/wireless/ath/wil6210/cfg80211.c n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise)); n 1058 drivers/net/wireless/ath/wil6210/cfg80211.c for (i = 0; i < n; i++) n 1063 drivers/net/wireless/ath/wil6210/cfg80211.c n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites)); n 1064 drivers/net/wireless/ath/wil6210/cfg80211.c for (i = 0; i < n; i++) n 1281 drivers/net/wireless/ath/wil6210/debugfs.c int n; n 1283 drivers/net/wireless/ath/wil6210/debugfs.c for (n = 0; n < sz / sizeof(*x); n++) n 1284 drivers/net/wireless/ath/wil6210/debugfs.c if (x[n]) n 1518 drivers/net/wireless/ath/wil6210/debugfs.c int n; n 1521 drivers/net/wireless/ath/wil6210/debugfs.c n = snprintf(buf, sizeof(buf), "mode = %s\nstate = %s\n", n 1525 drivers/net/wireless/ath/wil6210/debugfs.c n = min_t(int, n, sizeof(buf)); n 1528 drivers/net/wireless/ath/wil6210/debugfs.c buf, n); n 2073 drivers/net/wireless/ath/wil6210/debugfs.c int n; n 2075 drivers/net/wireless/ath/wil6210/debugfs.c n = snprintf(buf, sizeof(buf), n 2079 drivers/net/wireless/ath/wil6210/debugfs.c n = min_t(int, n, sizeof(buf)); n 2082 drivers/net/wireless/ath/wil6210/debugfs.c buf, n); n 2249 drivers/net/wireless/ath/wil6210/debugfs.c int n, ret, text_size = 500; n 2255 drivers/net/wireless/ath/wil6210/debugfs.c n = snprintf(text, text_size, n 2276 drivers/net/wireless/ath/wil6210/debugfs.c n = min_t(int, n, text_size); n 2278 drivers/net/wireless/ath/wil6210/debugfs.c ret = simple_read_from_buffer(user_buf, count, ppos, text, n); n 375 drivers/net/wireless/ath/wil6210/fw_inc.c int n, i; n 382 drivers/net/wireless/ath/wil6210/fw_inc.c n = size / sizeof(*block); n 384 drivers/net/wireless/ath/wil6210/fw_inc.c for (i = 0; i < n; i++) { n 437 drivers/net/wireless/ath/wil6210/fw_inc.c int n, i; n 450 drivers/net/wireless/ath/wil6210/fw_inc.c n = (size - sizeof(*d)) / sizeof(*block); n 455 drivers/net/wireless/ath/wil6210/fw_inc.c n, gw_cmd); n 474 drivers/net/wireless/ath/wil6210/fw_inc.c for (i = 0; i < n; i++) { n 501 drivers/net/wireless/ath/wil6210/fw_inc.c int n, i, k; n 514 drivers/net/wireless/ath/wil6210/fw_inc.c n = (size - sizeof(*d)) / sizeof(*block); n 519 drivers/net/wireless/ath/wil6210/fw_inc.c n, gw_cmd); n 543 drivers/net/wireless/ath/wil6210/fw_inc.c for (i = 0; i < n; i++) { n 600 drivers/net/wireless/ath/wil6210/txrx.h static inline void wil_ring_advance_head(struct wil_ring *ring, int n) n 602 drivers/net/wireless/ath/wil6210/txrx.h ring->swhead = (ring->swhead + n) % ring->size; n 245 drivers/net/wireless/ath/wil6210/wil6210.h #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n) BIT(n+1) /* n = [0..23] */ n 253 drivers/net/wireless/ath/wil6210/wil6210.h #define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */ n 1079 drivers/net/wireless/ath/wil6210/wil6210.h #define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr)) n 1080 drivers/net/wireless/ath/wil6210/wil6210.h #define ndev_to_vif(n) (struct wil6210_vif *)(netdev_priv(n)) n 1900 drivers/net/wireless/ath/wil6210/wmi.c unsigned n; n 1913 drivers/net/wireless/ath/wil6210/wmi.c for (n = 0;; n++) { n 2022 drivers/net/wireless/ath/wil6210/wmi.c n - num_immed_reply, num_immed_reply); n 209 drivers/net/wireless/broadcom/b43/phy_common.h struct b43_phy_n *n; n 97 drivers/net/wireless/broadcom/b43/phy_n.c return ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) || n 98 drivers/net/wireless/broadcom/b43/phy_n.c (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)); n 547 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = phy->n; n 578 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 635 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 1173 drivers/net/wireless/broadcom/b43/phy_n.c if (dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) { n 1235 drivers/net/wireless/broadcom/b43/phy_n.c } else if (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ) { n 1437 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 1505 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 1578 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 2032 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 2576 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 3143 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 3353 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = phy->n; n 3440 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = phy->n; n 3487 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 3516 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 3602 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 3739 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 3934 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 3984 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4046 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4278 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4397 drivers/net/wireless/broadcom/b43/phy_n.c u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; n 4421 drivers/net/wireless/broadcom/b43/phy_n.c u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; n 4592 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4651 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4720 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4775 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4829 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 4927 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5008 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5083 drivers/net/wireless/broadcom/b43/phy_n.c u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; n 5113 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5114 drivers/net/wireless/broadcom/b43/phy_n.c u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; n 5214 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5285 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5375 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5605 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5641 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 5841 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = phy->n; n 5881 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 6028 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = phy->n; n 6274 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = dev->phy.n; n 6319 drivers/net/wireless/broadcom/b43/phy_n.c dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) { n 6322 drivers/net/wireless/broadcom/b43/phy_n.c if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) { n 6470 drivers/net/wireless/broadcom/b43/phy_n.c dev->phy.n = nphy; n 6478 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = phy->n; n 6531 drivers/net/wireless/broadcom/b43/phy_n.c struct b43_phy_n *nphy = phy->n; n 6534 drivers/net/wireless/broadcom/b43/phy_n.c phy->n = NULL; n 3695 drivers/net/wireless/broadcom/b43/tables_nphy.c if ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) || n 3696 drivers/net/wireless/broadcom/b43/tables_nphy.c (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)) { n 2814 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c u32 n, idx, addr; n 2854 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { n 2861 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (c->last >= n) n 2862 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c c->last -= n; n 2864 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c c->last = c->bufsize - n; n 2871 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c line[n] = ch; n 2874 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (n > 0) { n 2875 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (line[n - 1] == '\r') n 2876 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c n--; n 2877 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c line[n] = 0; n 32 drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c #define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR) n 308 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c static uint xxd(uint x, uint n) n 310 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c return x & (n - 1); /* faster than %, but n must be power of 2 */ n 1029 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c uint n; n 1046 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c n = di->nrxpost - nrxdactive(di, rxin, rxout); n 1048 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n); n 1053 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c for (i = 0; i < n; i++) { n 1036 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c uint n = 0; n 1047 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c while (n < max_tx_num) { n 1069 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c n++; n 1072 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c return n >= max_tx_num; n 7713 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c uint n = 0; n 7722 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c if (n >= bound_limit) n 7726 drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c n++; n 38 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h #define PHY_SAT(x, n) ((x) > ((1<<((n)-1))-1) ? ((1<<((n)-1))-1) : \ n 39 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h ((x) < -(1<<((n)-1)) ? -(1<<((n)-1)) : (x))) n 40 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h #define PHY_SHIFT_ROUND(x, n) ((x) >= 0 ? ((x)+(1<<((n)-1)))>>(n) : (x)>>(n)) n 156 drivers/net/wireless/intel/ipw2x00/ipw2100.h #define IPW_BD_QUEUE_LENGTH(n) (1<<n) n 1605 drivers/net/wireless/intel/ipw2x00/ipw2200.c int n = p->eeprom_delay; n 1606 drivers/net/wireless/intel/ipw2x00/ipw2200.c return sprintf(buf, "%i\n", n); n 10308 drivers/net/wireless/intel/ipw2x00/ipw2200.c u8 n; n 10336 drivers/net/wireless/intel/ipw2x00/ipw2200.c for(n=0; n<txb->nr_frags; ++n) { n 10337 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct sk_buff *src = txb->fragments[n]; n 415 drivers/net/wireless/intel/iwlegacy/3945.h #define TFD_CTL_COUNT_SET(n) (n << 24) n 417 drivers/net/wireless/intel/iwlegacy/3945.h #define TFD_CTL_PAD_SET(n) (n << 28) n 2385 drivers/net/wireless/intel/iwlegacy/commands.h #define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) n 2404 drivers/net/wireless/intel/iwlegacy/commands.h #define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) n 46 drivers/net/wireless/intel/iwlegacy/common.h #define U32_PAD(n) ((4-(n))&0x3) n 2262 drivers/net/wireless/intel/iwlwifi/dvm/commands.h #define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) n 1066 drivers/net/wireless/intel/iwlwifi/iwl-drv.c struct iwl_fw_dbg_mem_seg_tlv *n; n 1076 drivers/net/wireless/intel/iwlwifi/iwl-drv.c n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); n 1077 drivers/net/wireless/intel/iwlwifi/iwl-drv.c if (!n) n 1079 drivers/net/wireless/intel/iwlwifi/iwl-drv.c pieces->dbg_mem_tlv = n; n 711 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c int n = 0, idx = 0; n 720 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c n++; n 723 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c sband->n_channels = n; n 725 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c return n; n 448 drivers/net/wireless/intersil/hostap/hostap_ap.c struct list_head *ptr, *n; n 455 drivers/net/wireless/intersil/hostap/hostap_ap.c for (ptr = mac_restrictions->mac_list.next, n = ptr->next; n 457 drivers/net/wireless/intersil/hostap/hostap_ap.c ptr = n, n = ptr->next) { n 500 drivers/net/wireless/intersil/hostap/hostap_ap.c struct list_head *ptr, *n; n 504 drivers/net/wireless/intersil/hostap/hostap_ap.c for (ptr = ap->sta_list.next, n = ptr->next; ptr != &ap->sta_list; n 505 drivers/net/wireless/intersil/hostap/hostap_ap.c ptr = n, n = ptr->next) { n 865 drivers/net/wireless/intersil/hostap/hostap_ap.c struct sta_info *n, *sta; n 882 drivers/net/wireless/intersil/hostap/hostap_ap.c list_for_each_entry_safe(sta, n, &ap->sta_list, list) { n 213 drivers/net/wireless/intersil/hostap/hostap_hw.c struct list_head *ptr, *n; n 218 drivers/net/wireless/intersil/hostap/hostap_hw.c list_for_each_safe(ptr, n, &local->cmd_queue) { n 2899 drivers/net/wireless/intersil/hostap/hostap_hw.c #define SHOW_REG(n) \ n 2900 drivers/net/wireless/intersil/hostap/hostap_hw.c seq_printf(m, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF)) n 3034 drivers/net/wireless/intersil/hostap/hostap_hw.c struct list_head *ptr, *n; n 3036 drivers/net/wireless/intersil/hostap/hostap_hw.c list_for_each_safe(ptr, n, &local->set_tim_list) { n 3254 drivers/net/wireless/intersil/hostap/hostap_hw.c struct list_head *ptr, *n; n 3263 drivers/net/wireless/intersil/hostap/hostap_hw.c list_for_each_safe(ptr, n, &local->hostap_interfaces) { n 3330 drivers/net/wireless/intersil/hostap/hostap_hw.c list_for_each_safe(ptr, n, &local->bss_list) { n 951 drivers/net/wireless/intersil/hostap/hostap_wlan.h #define PDEBUG(n, args...) \ n 952 drivers/net/wireless/intersil/hostap/hostap_wlan.h do { if ((n) & DEBUG_MASK) printk(KERN_DEBUG args); } while (0) n 953 drivers/net/wireless/intersil/hostap/hostap_wlan.h #define PDEBUG2(n, args...) \ n 954 drivers/net/wireless/intersil/hostap/hostap_wlan.h do { if ((n) & DEBUG_MASK) printk(args); } while (0) n 958 drivers/net/wireless/intersil/hostap/hostap_wlan.h #define PDEBUG(n, args...) n 959 drivers/net/wireless/intersil/hostap/hostap_wlan.h #define PDEBUG2(n, args...) n 464 drivers/net/wireless/intersil/orinoco/hermes.h #define HERMES_BYTES_TO_RECLEN(n) ((((n) + 1) / 2) + 1) n 465 drivers/net/wireless/intersil/orinoco/hermes.h #define HERMES_RECLEN_TO_BYTES(n) (((n) - 1) * 2) n 172 drivers/net/wireless/intersil/orinoco/orinoco.h #define DEBUG(n, args...) do { \ n 173 drivers/net/wireless/intersil/orinoco/orinoco.h if (orinoco_debug > (n)) \ n 177 drivers/net/wireless/intersil/orinoco/orinoco.h #define DEBUG(n, args...) do { } while (0) n 245 drivers/net/wireless/intersil/p54/lmac.h #define IS_QOS_QUEUE(n) (n >= P54_QUEUE_DATA) n 1756 drivers/net/wireless/intersil/prism54/isl_ioctl.c enum oid_num_t n = dwrq->flags; n 1758 drivers/net/wireless/intersil/prism54/isl_ioctl.c rvalue = mgt_get_request(netdev_priv(ndev), n, 0, NULL, &r); n 1759 drivers/net/wireless/intersil/prism54/isl_ioctl.c dwrq->length = mgt_response_to_str(n, &r, extra); n 1760 drivers/net/wireless/intersil/prism54/isl_ioctl.c if ((isl_oid[n].flags & OID_FLAG_TYPE) != OID_TYPE_U32) n 2033 drivers/net/wireless/intersil/prism54/isl_ioctl.c int n = snprintf(dest, IW_CUSTOM_MAX, n 2040 drivers/net/wireless/intersil/prism54/isl_ioctl.c WARN_ON(n >= IW_CUSTOM_MAX); n 2041 drivers/net/wireless/intersil/prism54/isl_ioctl.c *length = n; n 2067 drivers/net/wireless/intersil/prism54/isl_ioctl.c int n = strlen(str); n 2072 drivers/net/wireless/intersil/prism54/isl_ioctl.c BUG_ON(n >= IW_CUSTOM_MAX); n 2074 drivers/net/wireless/intersil/prism54/isl_ioctl.c wrqu.data.length = n; n 2209 drivers/net/wireless/intersil/prism54/isl_ioctl.c struct islpci_bss_wpa_ie *bss, *n; n 2211 drivers/net/wireless/intersil/prism54/isl_ioctl.c list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) { n 2474 drivers/net/wireless/intersil/prism54/isl_ioctl.c enum oid_num_t n = mgt_oidtonum(frame->header->oid); n 2476 drivers/net/wireless/intersil/prism54/isl_ioctl.c if (n != OID_NUM_LAST) n 2477 drivers/net/wireless/intersil/prism54/isl_ioctl.c prism54_process_trap_helper(netdev_priv(ndev), n, frame->data); n 2772 drivers/net/wireless/intersil/prism54/isl_ioctl.c #define IWPRIV_SET_U32(n,x) { n, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } n 2773 drivers/net/wireless/intersil/prism54/isl_ioctl.c #define IWPRIV_SET_SSID(n,x) { n, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } n 2774 drivers/net/wireless/intersil/prism54/isl_ioctl.c #define IWPRIV_SET_ADDR(n,x) { n, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x } n 2775 drivers/net/wireless/intersil/prism54/isl_ioctl.c #define IWPRIV_GET(n,x) { n, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, "g_"x } n 2777 drivers/net/wireless/intersil/prism54/isl_ioctl.c #define IWPRIV_U32(n,x) IWPRIV_SET_U32(n,x), IWPRIV_GET(n,x) n 2778 drivers/net/wireless/intersil/prism54/isl_ioctl.c #define IWPRIV_SSID(n,x) IWPRIV_SET_SSID(n,x), IWPRIV_GET(n,x) n 2779 drivers/net/wireless/intersil/prism54/isl_ioctl.c #define IWPRIV_ADDR(n,x) IWPRIV_SET_ADDR(n,x), IWPRIV_GET(n,x) n 406 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_set_request(islpci_private *priv, enum oid_num_t n, int extra, void *data) n 415 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(n >= OID_NUM_LAST); n 416 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(extra > isl_oid[n].range); n 422 drivers/net/wireless/intersil/prism54/oid_mgt.c dlen = isl_oid[n].size; n 423 drivers/net/wireless/intersil/prism54/oid_mgt.c cache = priv->mib[n]; n 425 drivers/net/wireless/intersil/prism54/oid_mgt.c oid = isl_oid[n].oid + extra; n 431 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, _data); n 461 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data); n 468 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_set_varlen(islpci_private *priv, enum oid_num_t n, void *data, int extra_len) n 476 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(n >= OID_NUM_LAST); n 478 drivers/net/wireless/intersil/prism54/oid_mgt.c dlen = isl_oid[n].size; n 479 drivers/net/wireless/intersil/prism54/oid_mgt.c oid = isl_oid[n].oid; n 481 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, data); n 497 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, data); n 503 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_get_request(islpci_private *priv, enum oid_num_t n, int extra, void *data, n 515 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(n >= OID_NUM_LAST); n 516 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(extra > isl_oid[n].range); n 524 drivers/net/wireless/intersil/prism54/oid_mgt.c dlen = isl_oid[n].size; n 525 drivers/net/wireless/intersil/prism54/oid_mgt.c cache = priv->mib[n]; n 527 drivers/net/wireless/intersil/prism54/oid_mgt.c oid = isl_oid[n].oid + extra; n 550 drivers/net/wireless/intersil/prism54/oid_mgt.c if ((isl_oid[n].flags & OID_FLAG_TYPE) == OID_TYPE_U32) n 559 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, n 569 drivers/net/wireless/intersil/prism54/oid_mgt.c if (reslen > isl_oid[n].size) n 573 drivers/net/wireless/intersil/prism54/oid_mgt.c oid, reslen, isl_oid[n].size); n 580 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n) n 585 drivers/net/wireless/intersil/prism54/oid_mgt.c for (i = 0; i < n; i++) { n 615 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_set(islpci_private *priv, enum oid_num_t n, void *data) n 617 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(n >= OID_NUM_LAST); n 618 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(priv->mib[n] == NULL); n 620 drivers/net/wireless/intersil/prism54/oid_mgt.c memcpy(priv->mib[n], data, isl_oid[n].size); n 621 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_cpu_to_le(isl_oid[n].flags & OID_FLAG_TYPE, priv->mib[n]); n 625 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_get(islpci_private *priv, enum oid_num_t n, void *res) n 627 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(n >= OID_NUM_LAST); n 628 drivers/net/wireless/intersil/prism54/oid_mgt.c BUG_ON(priv->mib[n] == NULL); n 631 drivers/net/wireless/intersil/prism54/oid_mgt.c memcpy(res, priv->mib[n], isl_oid[n].size); n 632 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_le_to_cpu(isl_oid[n].flags & OID_FLAG_TYPE, res); n 779 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str) n 781 drivers/net/wireless/intersil/prism54/oid_mgt.c switch (isl_oid[n].flags & OID_FLAG_TYPE) { n 878 drivers/net/wireless/intersil/prism54/oid_mgt.c for (i = 0; i < isl_oid[n].size; i++) n 304 drivers/net/wireless/marvell/libertas/cfg.c int n = 0; n 320 drivers/net/wireless/marvell/libertas/cfg.c tlv = add_ie_rates(tlv, rates_eid, &n); n 324 drivers/net/wireless/marvell/libertas/cfg.c tlv = add_ie_rates(tlv, ext_rates_eid, &n); n 332 drivers/net/wireless/marvell/libertas/cfg.c n = 4; n 336 drivers/net/wireless/marvell/libertas/cfg.c rate_tlv->header.len = cpu_to_le16(n); n 337 drivers/net/wireless/marvell/libertas/cfg.c return sizeof(rate_tlv->header) + n; n 777 drivers/net/wireless/marvell/libertas/debugfs.c #define item_size(n) (FIELD_SIZEOF(struct lbs_private, n)) n 778 drivers/net/wireless/marvell/libertas/debugfs.c #define item_addr(n) (offsetof(struct lbs_private, n)) n 39 drivers/net/wireless/marvell/mwifiex/util.h #define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n)) n 40 drivers/net/wireless/marvell/mwifiex/util.h #define item_addr(n) (offsetof(struct mwifiex_debug_info, n)) n 43 drivers/net/wireless/marvell/mwifiex/util.h #define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n)) n 44 drivers/net/wireless/marvell/mwifiex/util.h #define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n)) n 289 drivers/net/wireless/mediatek/mt76/dma.c int len, n = 0, ret = -ENOMEM; n 312 drivers/net/wireless/mediatek/mt76/dma.c tx_info.buf[n].addr = t->dma_addr; n 313 drivers/net/wireless/mediatek/mt76/dma.c tx_info.buf[n++].len = dev->drv->txwi_size; n 314 drivers/net/wireless/mediatek/mt76/dma.c tx_info.buf[n].addr = addr; n 315 drivers/net/wireless/mediatek/mt76/dma.c tx_info.buf[n++].len = len; n 318 drivers/net/wireless/mediatek/mt76/dma.c if (n == ARRAY_SIZE(tx_info.buf)) n 326 drivers/net/wireless/mediatek/mt76/dma.c tx_info.buf[n].addr = addr; n 327 drivers/net/wireless/mediatek/mt76/dma.c tx_info.buf[n++].len = iter->len; n 329 drivers/net/wireless/mediatek/mt76/dma.c tx_info.nbuf = n; n 348 drivers/net/wireless/mediatek/mt76/dma.c for (n--; n > 0; n--) n 349 drivers/net/wireless/mediatek/mt76/dma.c dma_unmap_single(dev->dev, tx_info.buf[n].addr, n 350 drivers/net/wireless/mediatek/mt76/dma.c tx_info.buf[n].len, DMA_TO_DEVICE); n 350 drivers/net/wireless/mediatek/mt76/mt76.h #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) n 104 drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c int n = ARRAY_SIZE(cal_free_bytes); n 111 drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c n -= 4; n 113 drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c for (i = 0; i < n; i++) { n 401 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c #define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n] n 64 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_TXTIME_THRESH(n) (MT_TXTIME_THRESH_BASE + ((n) * 4)) n 67 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_PAGE_COUNT(n) (MT_PAGE_COUNT_BASE + ((n) * 4)) n 79 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_GROUP_THRESH(n) (MT_GROUP_THRESH_BASE + ((n) * 4)) n 137 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_AGC(n) (MT_AGC_BASE + ((n) * 4)) n 140 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_AGC1(n) (MT_AGC1_BASE + ((n) * 4)) n 146 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_RXTD(n) (MT_RXTD_BASE + ((n) * 4)) n 156 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_PHY_CR_TSSI(phy, n) (MT_WF_PHY_CR_TSSI_BASE + \ n 158 drivers/net/wireless/mediatek/mt76/mt7603/regs.h ((n) * 4)) n 161 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_PHYCTRL(n) (MT_PHYCTRL_BASE + ((n) * 4)) n 288 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_ARB_SCR_BCNQ_OPMODE_SHIFT(n) ((n) * 2) n 310 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_BCN_START_BSSn(n) BIT(0 + (n)) n 317 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_BCN_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) n 320 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_BCN_FLUSH_BSSn(n) BIT(0 + (n)) n 321 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_BCN_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) n 324 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_CAB_START_BSSn(n) BIT(0 + (n)) n 325 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_CAB_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) n 328 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_CAB_FLUSH_BSSn(n) BIT(0 + (n)) n 329 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_CAB_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) n 331 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_CAB_COUNT(n) MT_WF_ARB(0x128 + (n) * 4) n 334 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_CAB_COUNT_B0_REG(n) MT_WF_ARB_CAB_COUNT(((n) > 12 ? 2 : \ n 335 drivers/net/wireless/mediatek/mt76/mt7603/regs.h ((n) > 4 ? 1 : 0))) n 336 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WF_ARB_CAB_COUNT_B0_SHIFT(n) (((n) > 12 ? (n) - 12 : \ n 337 drivers/net/wireless/mediatek/mt76/mt7603/regs.h ((n) > 4 ? (n) - 4 : \ n 338 drivers/net/wireless/mediatek/mt76/mt7603/regs.h (n) ? (n) + 3 : 0)) * 4) n 466 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n)) n 481 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_LPON(n) (MT_LPON_BASE + (n)) n 505 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_LPON_SBTOR(n) MT_LPON(0x0a0) n 510 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_INT_WAKEUP(n) (MT_INT_WAKEUP_BASE + (n)) n 512 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_HW_INT_STATUS(n) MT_INT_WAKEUP(0x3c + (n) * 8) n 513 drivers/net/wireless/mediatek/mt76/mt7603/regs.h #define MT_HW_INT_MASK(n) MT_INT_WAKEUP(0x40 + (n) * 8) n 190 drivers/net/wireless/mediatek/mt76/mt7615/regs.h #define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n)) n 254 drivers/net/wireless/mediatek/mt76/mt7615/regs.h #define MT_MIB_MB_SDR0(n) MT_WF_MIB(0x100 + ((n) << 4)) n 258 drivers/net/wireless/mediatek/mt76/mt7615/regs.h #define MT_MIB_SDR16(n) MT_WF_MIB(0x48 + ((n) << 9)) n 170 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c int n) n 172 drivers/net/wireless/mediatek/mt76/mt76x0/phy.c while (n-- > 0) { n 144 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c const struct mt76_reg_pair *data, int n) n 152 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c if (!n) n 155 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c cnt = min(max_vals_per_cmd, n); n 168 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n); n 173 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt); n 178 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c struct mt76_reg_pair *data, int n) n 186 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c if (!n) n 189 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c cnt = min(max_vals_per_cmd, n); n 190 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c if (cnt != n) n 206 drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c usb->mcu.rp_len = n; n 202 drivers/net/wireless/mediatek/mt76/usb.c const struct mt76_reg_pair *data, int n) n 205 drivers/net/wireless/mediatek/mt76/usb.c return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); n 207 drivers/net/wireless/mediatek/mt76/usb.c return mt76u_req_wr_rp(dev, base, data, n); n 229 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_reg_pair *data, int n) n 232 drivers/net/wireless/mediatek/mt76/usb.c return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); n 234 drivers/net/wireless/mediatek/mt76/usb.c return mt76u_req_rd_rp(dev, base, data, n); n 266 drivers/net/wireless/mediatek/mt7601u/initvals_phy.h size_t n; n 317 drivers/net/wireless/mediatek/mt7601u/mac.c u32 sum, n; n 327 drivers/net/wireless/mediatek/mt7601u/mac.c n = 0; n 341 drivers/net/wireless/mediatek/mt7601u/mac.c n += (val >> 16) + (val & 0xffff); n 347 drivers/net/wireless/mediatek/mt7601u/mac.c atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1); n 209 drivers/net/wireless/mediatek/mt7601u/mcu.c const struct mt76_reg_pair *data, int n) n 215 drivers/net/wireless/mediatek/mt7601u/mcu.c if (!n) n 218 drivers/net/wireless/mediatek/mt7601u/mcu.c cnt = min(max_vals_per_cmd, n); n 230 drivers/net/wireless/mediatek/mt7601u/mcu.c ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n); n 234 drivers/net/wireless/mediatek/mt7601u/mcu.c return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt); n 238 drivers/net/wireless/mediatek/mt7601u/mcu.c const u32 *data, int n) n 244 drivers/net/wireless/mediatek/mt7601u/mcu.c if (!n) n 247 drivers/net/wireless/mediatek/mt7601u/mcu.c cnt = min(max_regs_per_cmd, n); n 258 drivers/net/wireless/mediatek/mt7601u/mcu.c ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n); n 263 drivers/net/wireless/mediatek/mt7601u/mcu.c data + cnt, n - cnt); n 337 drivers/net/wireless/mediatek/mt7601u/mcu.c int n, ret; n 342 drivers/net/wireless/mediatek/mt7601u/mcu.c n = min(MCU_FW_URB_MAX_PAYLOAD, len); n 343 drivers/net/wireless/mediatek/mt7601u/mcu.c ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr); n 350 drivers/net/wireless/mediatek/mt7601u/mcu.c return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n); n 329 drivers/net/wireless/mediatek/mt7601u/mt7601u.h const u32 *data, int n); n 298 drivers/net/wireless/mediatek/mt7601u/phy.c return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n); n 314 drivers/net/wireless/mediatek/mt7601u/phy.c t[2].regs, t[2].n); n 319 drivers/net/wireless/mediatek/mt7601u/phy.c t[dev->bw].regs, t[dev->bw].n); n 44 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c #define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n) n 1580 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c static int up, dn, m, n, wait_cnt; n 1671 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c n = 3; n 1681 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c up, dn, m, n, wait_cnt); n 1692 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c if (up >= n) { n 1694 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c n = 3; n 1717 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c n = 3 * m; n 1734 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c n = 3*m; n 1422 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c static s32 up, dn, m, n, wait_count; n 1451 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c n = 3; n 1469 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c n = 3; n 1491 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c if (up >= n) { n 1496 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c n = 3; n 1528 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c n = 3 * m; n 1553 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c n = 3 * m; n 1784 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c static s32 up, dn, m, n, wait_count; n 1893 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c n = 3; n 1908 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c up, dn, m, n, wait_count); n 1919 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c if (up >= n) { n 1924 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c n = 3; n 1955 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c n = 3 * m; n 1980 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c n = 3 * m; n 1702 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c static long up, dn, m, n, wait_count; n 1798 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c n = 3; n 1808 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c (int)up, (int)dn, (int)m, (int)n, (int)wait_count); n 1820 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c if (up >= n) { n 1825 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c n = 3; n 1854 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c n = 3 * m; n 1876 drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c n = 3 * m; n 99 drivers/net/wireless/realtek/rtlwifi/debug.c int i, n; n 102 drivers/net/wireless/realtek/rtlwifi/debug.c for (n = 0; n <= max; ) { n 103 drivers/net/wireless/realtek/rtlwifi/debug.c seq_printf(m, "\n%8.8x ", n + page); n 104 drivers/net/wireless/realtek/rtlwifi/debug.c for (i = 0; i < 4 && n <= max; i++, n += 4) n 106 drivers/net/wireless/realtek/rtlwifi/debug.c rtl_read_dword(rtlpriv, (page | n))); n 141 drivers/net/wireless/realtek/rtlwifi/debug.c int i, n; n 144 drivers/net/wireless/realtek/rtlwifi/debug.c for (n = 0; n <= max; ) { n 145 drivers/net/wireless/realtek/rtlwifi/debug.c seq_printf(m, "\n%8.8x ", n + page); n 146 drivers/net/wireless/realtek/rtlwifi/debug.c for (i = 0; i < 4 && n <= max; i++, n += 4) n 148 drivers/net/wireless/realtek/rtlwifi/debug.c rtl_get_bbreg(hw, (page | n), 0xffffffff)); n 183 drivers/net/wireless/realtek/rtlwifi/debug.c int i, n; n 191 drivers/net/wireless/realtek/rtlwifi/debug.c for (n = 0; n <= max; ) { n 192 drivers/net/wireless/realtek/rtlwifi/debug.c seq_printf(m, "\n%8.8x ", n); n 193 drivers/net/wireless/realtek/rtlwifi/debug.c for (i = 0; i < 4 && n <= max; n += 1, i++) n 195 drivers/net/wireless/realtek/rtlwifi/debug.c rtl_get_rfreg(hw, rfpath, n, 0xffffffff)); n 3022 drivers/net/wireless/realtek/rtlwifi/wifi.h #define byte(x, n) ((x >> (8 * n)) & 0xff) n 888 drivers/net/wireless/realtek/rtw88/coex.c u8 n, type; n 924 drivers/net/wireless/realtek/rtw88/coex.c n = type - 100; n 925 drivers/net/wireless/realtek/rtw88/coex.c if (n < chip->tdma_nsant_num) n 927 drivers/net/wireless/realtek/rtw88/coex.c chip->tdma_nsant[n].para[0], n 928 drivers/net/wireless/realtek/rtw88/coex.c chip->tdma_nsant[n].para[1], n 929 drivers/net/wireless/realtek/rtw88/coex.c chip->tdma_nsant[n].para[2], n 930 drivers/net/wireless/realtek/rtw88/coex.c chip->tdma_nsant[n].para[3], n 931 drivers/net/wireless/realtek/rtw88/coex.c chip->tdma_nsant[n].para[4]); n 406 drivers/net/wireless/realtek/rtw88/debug.c int i, n; n 410 drivers/net/wireless/realtek/rtw88/debug.c for (n = 0; n <= max; ) { n 411 drivers/net/wireless/realtek/rtw88/debug.c seq_printf(m, "\n%8.8x ", n + page); n 412 drivers/net/wireless/realtek/rtw88/debug.c for (i = 0; i < 4 && n <= max; i++, n += 4) n 414 drivers/net/wireless/realtek/rtw88/debug.c rtw_read32(rtwdev, (page | n))); n 426 drivers/net/wireless/realtek/rtw88/debug.c int i, n; n 430 drivers/net/wireless/realtek/rtw88/debug.c for (n = 0; n <= max; ) { n 431 drivers/net/wireless/realtek/rtw88/debug.c seq_printf(m, "\n%8.8x ", n + page); n 432 drivers/net/wireless/realtek/rtw88/debug.c for (i = 0; i < 4 && n <= max; i++, n += 4) n 434 drivers/net/wireless/realtek/rtw88/debug.c rtw_read32(rtwdev, (page | n))); n 203 drivers/net/wireless/realtek/rtw88/tx.c u8 *n; n 212 drivers/net/wireless/realtek/rtw88/tx.c n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data; n 213 drivers/net/wireless/realtek/rtw88/tx.c if (*n == sn) { n 3114 drivers/net/wireless/rndis_wlan.c int len, retval, i, n; n 3123 drivers/net/wireless/rndis_wlan.c n = le32_to_cpu(networks_supported.num_items); n 3124 drivers/net/wireless/rndis_wlan.c if (n > 8) n 3125 drivers/net/wireless/rndis_wlan.c n = 8; n 3126 drivers/net/wireless/rndis_wlan.c for (i = 0; i < n; i++) { n 726 drivers/net/wireless/ti/wl18xx/main.c wl18xx_clk_table[clk_freq].n, wl18xx_clk_table[clk_freq].m, n 732 drivers/net/wireless/ti/wl18xx/main.c wl18xx_clk_table_coex[clk_freq].n); n 748 drivers/net/wireless/ti/wl18xx/main.c wl18xx_clk_table[clk_freq].n); n 165 drivers/net/wireless/ti/wl18xx/wl18xx.h u32 n; n 64 drivers/net/wireless/wl3501_cs.c #define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); } n 633 drivers/net/wireless/zydas/zd1211rw/zd_usb.c unsigned int l, k, n; n 638 drivers/net/wireless/zydas/zd1211rw/zd_usb.c n = l+k; n 639 drivers/net/wireless/zydas/zd1211rw/zd_usb.c if (n > length) n 644 drivers/net/wireless/zydas/zd1211rw/zd_usb.c l = (n+3) & ~3; n 415 drivers/net/xen-netback/hash.c unsigned int j, n; n 417 drivers/net/xen-netback/hash.c n = 8; n 418 drivers/net/xen-netback/hash.c if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE) n 419 drivers/net/xen-netback/hash.c n = XEN_NETBK_MAX_HASH_KEY_SIZE - i; n 421 drivers/net/xen-netback/hash.c seq_printf(m, "[%2u - %2u]: ", i, i + n - 1); n 423 drivers/net/xen-netback/hash.c for (j = 0; j < n; j++, i++) n 435 drivers/net/xen-netback/hash.c unsigned int j, n; n 437 drivers/net/xen-netback/hash.c n = 8; n 438 drivers/net/xen-netback/hash.c if (i + n >= vif->hash.size) n 439 drivers/net/xen-netback/hash.c n = vif->hash.size - i; n 441 drivers/net/xen-netback/hash.c seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); n 443 drivers/net/xen-netback/hash.c for (j = 0; j < n; j++, i++) n 356 drivers/nfc/nfcsim.c int n; n 364 drivers/nfc/nfcsim.c n = snprintf(devname, sizeof(devname), "nfc%d", idx); n 365 drivers/nfc/nfcsim.c if (n >= sizeof(devname)) { n 2663 drivers/nfc/pn533/pn533.c struct pn533_cmd *cmd, *n; n 2675 drivers/nfc/pn533/pn533.c list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) { n 71 drivers/nfc/s3fwrn5/nci.c void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n) n 74 drivers/nfc/s3fwrn5/nci.c *n = ARRAY_SIZE(s3fwrn5_nci_prop_ops); n 75 drivers/nfc/s3fwrn5/nci.h void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n); n 257 drivers/nfc/trf7970a.c #define TRF7970A_MODULATOR_DEPTH(n) ((n) & 0x7) n 267 drivers/nfc/trf7970a.c #define TRF7970A_MODULATOR_CLK(n) (((n) & 0x3) << 4) n 368 drivers/ntb/test/ntb_tool.c int n; n 382 drivers/ntb/test/ntb_tool.c n = sscanf(buf, "%c %lli", &cmd, &bits); n 386 drivers/ntb/test/ntb_tool.c if (n != 2) { n 935 drivers/ntb/test/ntb_tool.c int ret, n; n 944 drivers/ntb/test/ntb_tool.c n = sscanf(buf, "%lli:%zi", &addr, &wsize); n 945 drivers/ntb/test/ntb_tool.c if (n != 2) n 205 drivers/nvdimm/blk.c resource_size_t offset, void *iobuf, size_t n, int rw, n 212 drivers/nvdimm/blk.c dev_offset = to_dev_offset(nsblk, offset, n); n 214 drivers/nvdimm/blk.c if (unlikely(offset + n > nsblk->size)) { n 222 drivers/nvdimm/blk.c return ndbr->do_io(ndbr, dev_offset, iobuf, n, rw); n 38 drivers/nvdimm/btt.c void *buf, size_t n, unsigned long flags) n 45 drivers/nvdimm/btt.c return nvdimm_read_bytes(ndns, offset, buf, n, flags); n 49 drivers/nvdimm/btt.c void *buf, size_t n, unsigned long flags) n 56 drivers/nvdimm/btt.c return nvdimm_write_bytes(ndns, offset, buf, n, flags); n 690 drivers/nvdimm/bus.c int n) n 428 drivers/nvdimm/dimm_devs.c static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n) n 773 drivers/nvdimm/dimm_devs.c resource_size_t n) n 782 drivers/nvdimm/dimm_devs.c res = __request_region(&ndd->dpa, start, n, name, 0); n 57 drivers/nvdimm/label.c u32 tmp_nslot, n; n 60 drivers/nvdimm/label.c n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN; n 62 drivers/nvdimm/label.c return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n); n 565 drivers/nvdimm/label.c struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n) n 581 drivers/nvdimm/label.c if (n-- == 0) n 864 drivers/nvdimm/label.c static bool is_old_resource(struct resource *res, struct resource **list, int n) n 870 drivers/nvdimm/label.c for (i = 0; i < n; i++) n 134 drivers/nvdimm/label.h struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n); n 448 drivers/nvdimm/namespace_devs.c resource_size_t n) n 454 drivers/nvdimm/namespace_devs.c while (n) { n 466 drivers/nvdimm/namespace_devs.c if (n >= resource_size(res)) { n 467 drivers/nvdimm/namespace_devs.c n -= resource_size(res); n 479 drivers/nvdimm/namespace_devs.c new_start = res->start + n; n 483 drivers/nvdimm/namespace_devs.c rc = adjust_resource(res, new_start, resource_size(res) - n); n 505 drivers/nvdimm/namespace_devs.c struct nd_label_id *label_id, resource_size_t n) n 513 drivers/nvdimm/namespace_devs.c rc = scan_free(nd_region, nd_mapping, label_id, n); n 523 drivers/nvdimm/namespace_devs.c resource_size_t n) n 533 drivers/nvdimm/namespace_devs.c first_dpa = nd_mapping->start + nd_mapping->size - n; n 538 drivers/nvdimm/namespace_devs.c res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n); n 543 drivers/nvdimm/namespace_devs.c return rc ? n : 0; n 566 drivers/nvdimm/namespace_devs.c resource_size_t n, struct resource *valid) n 593 drivers/nvdimm/namespace_devs.c if (resource_size(valid) < n) n 616 drivers/nvdimm/namespace_devs.c resource_size_t n) n 622 drivers/nvdimm/namespace_devs.c const resource_size_t to_allocate = n; n 682 drivers/nvdimm/namespace_devs.c allocate = min(available, n); n 713 drivers/nvdimm/namespace_devs.c return n; n 740 drivers/nvdimm/namespace_devs.c return n; n 742 drivers/nvdimm/namespace_devs.c n -= allocate; n 743 drivers/nvdimm/namespace_devs.c if (n) { n 761 drivers/nvdimm/namespace_devs.c if ((is_pmem || !ndd->dpa.child) && n == to_allocate) n 762 drivers/nvdimm/namespace_devs.c return init_dpa_allocation(label_id, nd_region, nd_mapping, n); n 763 drivers/nvdimm/namespace_devs.c return n; n 815 drivers/nvdimm/namespace_devs.c resource_size_t n, rem = 0; n 820 drivers/nvdimm/namespace_devs.c n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); n 821 drivers/nvdimm/namespace_devs.c if (n == 0) n 823 drivers/nvdimm/namespace_devs.c rem = scan_allocate(nd_region, nd_mapping, &label_id, n); n 826 drivers/nvdimm/namespace_devs.c (unsigned long long) n - rem, n 827 drivers/nvdimm/namespace_devs.c (unsigned long long) n); n 872 drivers/nvdimm/namespace_devs.c struct nd_label_id *label_id, resource_size_t n) n 880 drivers/nvdimm/namespace_devs.c resource_size_t rem = n; n 907 drivers/nvdimm/namespace_devs.c (unsigned long long) n - rem, n 908 drivers/nvdimm/namespace_devs.c (unsigned long long) n); n 1644 drivers/nvdimm/namespace_devs.c struct attribute *a, int n) n 362 drivers/nvdimm/nd.h resource_size_t n); n 75 drivers/nvdimm/pfn_devs.c size_t n = len - 1; n 77 drivers/nvdimm/pfn_devs.c if (strncmp(buf, "pmem\n", n) == 0 n 78 drivers/nvdimm/pfn_devs.c || strncmp(buf, "pmem", n) == 0) { n 80 drivers/nvdimm/pfn_devs.c } else if (strncmp(buf, "ram\n", n) == 0 n 81 drivers/nvdimm/pfn_devs.c || strncmp(buf, "ram", n) == 0) n 83 drivers/nvdimm/pfn_devs.c else if (strncmp(buf, "none\n", n) == 0 n 84 drivers/nvdimm/pfn_devs.c || strncmp(buf, "none", n) == 0) n 283 drivers/nvdimm/pfn_devs.c static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n) n 622 drivers/nvdimm/region_devs.c static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) n 751 drivers/nvdimm/region_devs.c static ssize_t mappingN(struct device *dev, char *buf, int n) n 757 drivers/nvdimm/region_devs.c if (n >= nd_region->ndr_mappings) n 759 drivers/nvdimm/region_devs.c nd_mapping = &nd_region->mapping[n]; n 812 drivers/nvdimm/region_devs.c static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) n 817 drivers/nvdimm/region_devs.c if (n < nd_region->ndr_mappings) n 611 drivers/nvme/host/core.c unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; n 639 drivers/nvme/host/core.c if (n < segments) { n 640 drivers/nvme/host/core.c range[n].cattr = cpu_to_le32(0); n 641 drivers/nvme/host/core.c range[n].nlb = cpu_to_le32(nlb); n 642 drivers/nvme/host/core.c range[n].slba = cpu_to_le64(slba); n 644 drivers/nvme/host/core.c n++; n 647 drivers/nvme/host/core.c if (WARN_ON_ONCE(n != segments)) { n 3117 drivers/nvme/host/core.c struct attribute *a, int n) n 3271 drivers/nvme/host/core.c struct attribute *a, int n) n 499 drivers/nvme/host/multipath.c u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0; n 515 drivers/nvme/host/multipath.c unsigned nsid = le32_to_cpu(desc->nsids[n]); n 521 drivers/nvme/host/multipath.c if (++n == nr_nsids) n 135 drivers/nvme/host/pci.c int n = 0, ret; n 137 drivers/nvme/host/pci.c ret = kstrtoint(val, 10, &n); n 138 drivers/nvme/host/pci.c if (ret != 0 || n < 2) n 1255 drivers/nvme/host/tcp.c int ret, opt, rcv_pdu_size, n; n 1324 drivers/nvme/host/tcp.c n = 0; n 1326 drivers/nvme/host/tcp.c n = (qid - 1) % num_online_cpus(); n 1327 drivers/nvme/host/tcp.c queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); n 22 drivers/nvmem/imx-iim.c #define IIM_BANK_BASE(n) (0x800 + 0x400 * (n)) n 588 drivers/of/fdt.c int n; n 595 drivers/of/fdt.c for (n = 0; ; n++) { n 596 drivers/of/fdt.c fdt_get_mem_rsv(initial_boot_params, n, &base, &size); n 664 drivers/parisc/lba_pci.c #define truncate_pat_collision(r,n) (0) n 315 drivers/parisc/sba_iommu.c #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) n 64 drivers/parport/parport_cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) n 544 drivers/parport/parport_pc.c const int n = left < fifo_depth ? left : fifo_depth; n 545 drivers/parport/parport_pc.c outsb(fifo, bufp, n); n 546 drivers/parport/parport_pc.c bufp += n; n 547 drivers/parport/parport_pc.c left -= n; n 2817 drivers/parport/parport_pc.c int err, count, n, i = id->driver_data; n 2841 drivers/parport/parport_pc.c for (n = 0; n < cards[i].numports; n++) { n 2842 drivers/parport/parport_pc.c int lo = cards[i].addr[n].lo; n 2843 drivers/parport/parport_pc.c int hi = cards[i].addr[n].hi; n 578 drivers/parport/parport_serial.c int n, success = 0; n 586 drivers/parport/parport_serial.c for (n = 0; n < card->numports; n++) { n 588 drivers/parport/parport_serial.c int lo = card->addr[n].lo; n 589 drivers/parport/parport_serial.c int hi = card->addr[n].hi; n 63 drivers/pci/bus.c struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) n 67 drivers/pci/bus.c if (n < PCI_BRIDGE_RESOURCE_NUM) n 68 drivers/pci/bus.c return bus->resource[n]; n 70 drivers/pci/bus.c n -= PCI_BRIDGE_RESOURCE_NUM; n 72 drivers/pci/bus.c if (n-- == 0) n 50 drivers/pci/controller/dwc/pci-keystone.c #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) n 51 drivers/pci/controller/dwc/pci-keystone.c #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) n 55 drivers/pci/controller/dwc/pci-keystone.c #define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1))) n 56 drivers/pci/controller/dwc/pci-keystone.c #define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1))) n 65 drivers/pci/controller/dwc/pci-keystone.c #define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4)) n 66 drivers/pci/controller/dwc/pci-keystone.c #define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4)) n 67 drivers/pci/controller/dwc/pci-keystone.c #define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4)) n 70 drivers/pci/controller/dwc/pci-keystone.c #define IRQ_STATUS(n) (0x184 + ((n) << 4)) n 71 drivers/pci/controller/dwc/pci-keystone.c #define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4)) n 35 drivers/pci/controller/dwc/pcie-designware.h #define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n) n 51 drivers/pci/controller/dwc/pcie-designware.h #define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n) n 33 drivers/pci/controller/pci-mvebu.c #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) n 34 drivers/pci/controller/pci-mvebu.c #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) n 37 drivers/pci/controller/pci-mvebu.c #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) n 38 drivers/pci/controller/pci-mvebu.c #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) n 39 drivers/pci/controller/pci-mvebu.c #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) n 40 drivers/pci/controller/pci-mvebu.c #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) n 365 drivers/pci/hotplug/acpiphp_glue.c unsigned char max, n; n 378 drivers/pci/hotplug/acpiphp_glue.c n = pci_bus_max_busnr(tmp); n 379 drivers/pci/hotplug/acpiphp_glue.c if (n > max) n 380 drivers/pci/hotplug/acpiphp_glue.c max = n; n 255 drivers/pci/hotplug/cpci_hotplug_pci.c int n; n 262 drivers/pci/hotplug/cpci_hotplug_pci.c n = pci_scan_slot(slot->bus, slot->devfn); n 263 drivers/pci/hotplug/cpci_hotplug_pci.c dbg("%s: pci_scan_slot returned %d", __func__, n); n 394 drivers/pci/iov.c struct attribute *a, int n) n 80 drivers/pci/pci-driver.c struct pci_dynid *dynid, *n; n 83 drivers/pci/pci-driver.c list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { n 169 drivers/pci/pci-driver.c struct pci_dynid *dynid, *n; n 183 drivers/pci/pci-driver.c list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) { n 80 drivers/pci/pci-label.c struct attribute *attr, int n) n 225 drivers/pci/pci-label.c struct attribute *attr, int n) n 1481 drivers/pci/pci-sysfs.c struct attribute *a, int n) n 1500 drivers/pci/pci-sysfs.c struct attribute *a, int n) n 1512 drivers/pci/pci-sysfs.c struct attribute *a, int n) n 1524 drivers/pci/pci-sysfs.c struct attribute *a, int n) n 155 drivers/pci/pci.c unsigned char max, n; n 159 drivers/pci/pci.c n = pci_bus_max_busnr(tmp); n 160 drivers/pci/pci.c if (n > max) n 161 drivers/pci/pci.c max = n; n 2070 drivers/pci/pci.c struct pci_pme_device *pme_dev, *n; n 2073 drivers/pci/pci.c list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { n 3123 drivers/pci/pci.c struct hlist_node *n; n 3125 drivers/pci/pci.c hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) n 635 drivers/pci/pcie/aer.c struct attribute *a, int n) n 1210 drivers/pci/pcie/aspm.c size_t n) n 1233 drivers/pci/pcie/aspm.c return n; n 1249 drivers/pci/pcie/aspm.c size_t n) n 1263 drivers/pci/pcie/aspm.c return n; n 829 drivers/pci/probe.c struct resource_entry *window, *n; n 902 drivers/pci/probe.c resource_list_for_each_entry_safe(window, n, &resources) { n 330 drivers/pci/proc.c loff_t n = *pos; n 333 drivers/pci/proc.c if (!n--) n 162 drivers/pci/search.c struct list_head *n; n 167 drivers/pci/search.c n = from ? from->node.next : pci_root_buses.next; n 168 drivers/pci/search.c if (n != &pci_root_buses) n 169 drivers/pci/search.c b = list_entry(n, struct pci_bus, node); n 135 drivers/pci/setup-bus.c struct list_head *n; n 159 drivers/pci/setup-bus.c n = head; n 167 drivers/pci/setup-bus.c n = &dev_res->list; n 172 drivers/pci/setup-bus.c list_add_tail(&tmp->list, n); n 48 drivers/pcmcia/cirrus.h #define PD67_MEM_PAGE(n) ((n)+5) /* PCI window bits 31:24 */ n 57 drivers/pcmcia/cirrus.h #define PD67_TIME_SETUP(n) (0x3a + 3*(n)) n 58 drivers/pcmcia/cirrus.h #define PD67_TIME_CMD(n) (0x3b + 3*(n)) n 59 drivers/pcmcia/cirrus.h #define PD67_TIME_RECOV(n) (0x3c + 3*(n)) n 357 drivers/pcmcia/cistpl.c struct list_head *l, *n; n 360 drivers/pcmcia/cistpl.c list_for_each_safe(l, n, &s->cis_cache) { n 1204 drivers/pcmcia/cistpl.c int n; n 1209 drivers/pcmcia/cistpl.c for (n = 0; n < CISTPL_MAX_DEVICES; n++) { n 1212 drivers/pcmcia/cistpl.c geo->geo[n].buswidth = p[0]; n 1213 drivers/pcmcia/cistpl.c geo->geo[n].erase_block = 1 << (p[1]-1); n 1214 drivers/pcmcia/cistpl.c geo->geo[n].read_block = 1 << (p[2]-1); n 1215 drivers/pcmcia/cistpl.c geo->geo[n].write_block = 1 << (p[3]-1); n 1216 drivers/pcmcia/cistpl.c geo->geo[n].partition = 1 << (p[4]-1); n 1217 drivers/pcmcia/cistpl.c geo->geo[n].interleave = 1 << (p[5]-1); n 1220 drivers/pcmcia/cistpl.c geo->ngeo = n; n 44 drivers/pcmcia/cs.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) n 138 drivers/pcmcia/ds.c struct pcmcia_dynid *dynid, *n; n 141 drivers/pcmcia/ds.c list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { n 168 drivers/pcmcia/i82365.c #define ISA_LOCK(n, f) spin_lock_irqsave(&isa_lock, f) n 169 drivers/pcmcia/i82365.c #define ISA_UNLOCK(n, f) spin_unlock_irqrestore(&isa_lock, f) n 71 drivers/pcmcia/max1600.c int n = MAX1600_GPIO_0VPP; n 88 drivers/pcmcia/max1600.c n = MAX1600_GPIO_MAX; n 118 drivers/pcmcia/max1600.c return gpiod_set_array_value_cansleep(n, m->gpio, NULL, values); n 238 drivers/pcmcia/pxa2xx_base.c #define SKT_DEV_INFO_SIZE(n) \ n 239 drivers/pcmcia/pxa2xx_base.c (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) n 39 drivers/pcmcia/rsrc_nonstatic.c #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) n 170 drivers/pcmcia/sa11xx_base.c #define SKT_DEV_INFO_SIZE(n) \ n 171 drivers/pcmcia/sa11xx_base.c (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) n 355 drivers/pcmcia/soc_common.c int n = 0; n 358 drivers/pcmcia/soc_common.c descs[n] = skt->gpio_reset; n 359 drivers/pcmcia/soc_common.c __assign_bit(n++, values, state->flags & SS_RESET); n 362 drivers/pcmcia/soc_common.c descs[n] = skt->gpio_bus_enable; n 363 drivers/pcmcia/soc_common.c __assign_bit(n++, values, state->flags & SS_OUTPUT_ENA); n 366 drivers/pcmcia/soc_common.c if (n) n 367 drivers/pcmcia/soc_common.c gpiod_set_array_value_cansleep(n, descs, NULL, values); n 45 drivers/perf/arm-ccn.c #define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8) n 49 drivers/perf/arm-ccn.c #define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8) n 63 drivers/perf/arm-ccn.c #define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) n 67 drivers/perf/arm-ccn.c #define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4) n 71 drivers/perf/arm-ccn.c #define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n)) n 72 drivers/perf/arm-ccn.c #define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n)) n 73 drivers/perf/arm-ccn.c #define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n)) n 75 drivers/perf/arm-ccn.c #define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8) n 77 drivers/perf/arm-ccn.c #define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8) n 79 drivers/perf/arm-ccn.c #define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8) n 81 drivers/perf/arm-ccn.c #define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40) n 82 drivers/perf/arm-ccn.c #define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40) n 83 drivers/perf/arm-ccn.c #define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40) n 84 drivers/perf/arm-ccn.c #define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40) n 87 drivers/perf/arm-ccn.c #define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4) n 91 drivers/perf/arm-ccn.c #define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7) n 95 drivers/perf/arm-ccn.c #define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) n 99 drivers/perf/arm-ccn.c #define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) n 610 drivers/perf/arm_dsu_pmu.c int i = 0, n, cpu; n 613 drivers/perf/arm_dsu_pmu.c n = of_count_phandle_with_args(dev, "cpus", NULL); n 614 drivers/perf/arm_dsu_pmu.c if (n <= 0) n 616 drivers/perf/arm_dsu_pmu.c for (; i < n; i++) { n 57 drivers/perf/arm_smmuv3_pmu.c #define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride)) n 59 drivers/perf/arm_smmuv3_pmu.c #define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4) n 62 drivers/perf/arm_smmuv3_pmu.c #define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4) n 21 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_GCTL_LANE_NUM(n) ((((n) - 1) & 3) << 4) n 28 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME0_HS_TRAIL(n) (((n) & 0xff) << 24) n 29 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME0_HS_PREPARE(n) (((n) & 0xff) << 16) n 30 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME0_LP_CLK_DIV(n) ((n) & 0xff) n 33 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME1_CLK_POST(n) (((n) & 0xff) << 24) n 34 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME1_CLK_PRE(n) (((n) & 0xff) << 16) n 35 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME1_CLK_ZERO(n) (((n) & 0xff) << 8) n 36 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME1_CLK_PREPARE(n) ((n) & 0xff) n 39 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME2_CLK_TRAIL(n) ((n) & 0xff) n 44 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(n) (((n) & 0xff) << 8) n 45 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_TX_TIME4_HS_TX_ANA0(n) ((n) & 0xff) n 50 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA0_REG_DMPD(n) (((n) & 0xf) << 24) n 51 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA0_REG_SLV(n) (((n) & 7) << 12) n 52 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA0_REG_DEN(n) (((n) & 0xf) << 8) n 56 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA1_REG_CSMPS(n) (((n) & 3) << 28) n 57 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA1_REG_SVTT(n) (((n) & 0xf) << 24) n 60 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA2_EN_P2S_CPU(n) (((n) & 0xf) << 24) n 66 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA3_EN_VTTD(n) (((n) & 0xf) << 28) n 76 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_DMPLVD(n) (((n) & 0xf) << 20) n 77 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_CKDV(n) (((n) & 0x1f) << 12) n 78 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_TMSC(n) (((n) & 3) << 10) n 79 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_TMSD(n) (((n) & 3) << 8) n 80 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_TXDNSC(n) (((n) & 3) << 6) n 81 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_TXDNSD(n) (((n) & 3) << 4) n 82 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_TXPUSC(n) (((n) & 3) << 2) n 83 drivers/phy/allwinner/phy-sun6i-mipi-dphy.c #define SUN6I_DPHY_ANA4_REG_TXPUSD(n) ((n) & 3) n 132 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c u32 n[] = {0, 1}; n 140 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n[i] += (n[i ^ 1] * whole); n 142 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c if ((n[i] > max_n) || (d[i] > max_d)) { n 150 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c *pnum = n[i]; n 162 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c u32 n; n 228 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n = 0; n 232 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n = tmp; n 234 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c cfg->m_prg_hs_prepare = n; n 247 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n = (144 * (dphy_opts->hs_clk_rate / 1000000) - 47500) / 10000; n 248 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c cfg->m_prg_hs_zero = n < 1 ? 1 : n; n 251 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n = (34 * (dphy_opts->hs_clk_rate / 1000000) - 2500) / 1000; n 252 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c cfg->mc_prg_hs_zero = n < 1 ? 1 : n; n 255 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n = (103 * (dphy_opts->hs_clk_rate / 1000000) + 10000) / 10000; n 256 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c if (n > 15) n 257 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n = 15; n 258 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c if (n < 1) n 259 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c n = 1; n 260 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c cfg->m_prg_hs_trail = n; n 261 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c cfg->mc_prg_hs_trail = n; n 37 drivers/phy/marvell/phy-armada38x-comphy.c unsigned int n; n 87 drivers/phy/marvell/phy-armada38x-comphy.c "comphy%u: timed out waiting for status\n", lane->n); n 153 drivers/phy/marvell/phy-armada38x-comphy.c val = (val >> (4 * lane->n)) & 0xf; n 155 drivers/phy/marvell/phy-armada38x-comphy.c if (!gbe_mux[lane->n][lane->port] || n 156 drivers/phy/marvell/phy-armada38x-comphy.c val != gbe_mux[lane->n][lane->port]) { n 158 drivers/phy/marvell/phy-armada38x-comphy.c "comphy%u: not configured for GBE\n", lane->n); n 210 drivers/phy/marvell/phy-armada38x-comphy.c priv->lane[val].n = val; n 125 drivers/phy/marvell/phy-mvebu-a3700-comphy.c int i, n = ARRAY_SIZE(mvebu_a3700_comphy_modes); n 131 drivers/phy/marvell/phy-mvebu-a3700-comphy.c for (i = 0; i < n; i++) { n 139 drivers/phy/marvell/phy-mvebu-a3700-comphy.c if (i == n) n 20 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SERDES_CFG0(n) (0x0 + (n) * 0x1000) n 22 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SERDES_CFG0_GEN_RX(n) ((n) << 3) n 23 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SERDES_CFG0_GEN_TX(n) ((n) << 7) n 28 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SERDES_CFG1(n) (0x4 + (n) * 0x1000) n 33 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SERDES_CFG2(n) (0x8 + (n) * 0x1000) n 35 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SERDES_STATUS0(n) (0x18 + (n) * 0x1000) n 39 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_PWRPLL_CTRL(n) (0x804 + (n) * 0x1000) n 40 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_PWRPLL_CTRL_RFREQ(n) ((n) << 0) n 41 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_PWRPLL_PHY_MODE(n) ((n) << 5) n 42 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_IMP_CAL(n) (0x80c + (n) * 0x1000) n 43 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_IMP_CAL_TX_EXT(n) ((n) << 10) n 45 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_DFE_RES(n) (0x81c + (n) * 0x1000) n 47 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_COEF(n) (0x828 + (n) * 0x1000) n 50 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S0(n) (0x834 + (n) * 0x1000) n 51 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S0_TX_AMP(n) ((n) << 1) n 52 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S0_TX_EMPH(n) ((n) << 7) n 53 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S1(n) (0x838 + (n) * 0x1000) n 54 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S1_RX_MUL_PI(n) ((n) << 0) n 55 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S1_RX_MUL_PF(n) ((n) << 3) n 56 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S1_RX_MUL_FI(n) ((n) << 6) n 57 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S1_RX_MUL_FF(n) ((n) << 8) n 59 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S1_RX_DIV(n) ((n) << 11) n 60 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S2(n) (0x8f4 + (n) * 0x1000) n 61 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S2_TX_EMPH(n) ((n) << 0) n 63 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_LOOPBACK(n) (0x88c + (n) * 0x1000) n 64 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_LOOPBACK_DBUS_WIDTH(n) ((n) << 1) n 65 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_VDD_CAL0(n) (0x908 + (n) * 0x1000) n 67 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_EXT_SELV(n) (0x914 + (n) * 0x1000) n 68 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_EXT_SELV_RX_SAMPL(n) ((n) << 5) n 69 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_MISC_CTRL0(n) (0x93c + (n) * 0x1000) n 72 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_RX_CTRL1(n) (0x940 + (n) * 0x1000) n 75 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SPEED_DIV(n) (0x954 + (n) * 0x1000) n 77 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_SP_CALIB(n) (0x96c + (n) * 0x1000) n 78 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_SP_CALIB_SAMPLER(n) ((n) << 8) n 80 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TX_SLEW_RATE(n) (0x974 + (n) * 0x1000) n 81 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TX_SLEW_RATE_EMPH(n) ((n) << 5) n 82 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TX_SLEW_RATE_SLC(n) ((n) << 10) n 83 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_DTL_CTRL(n) (0x984 + (n) * 0x1000) n 85 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_FRAME_DETECT0(n) (0xa14 + (n) * 0x1000) n 86 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_FRAME_DETECT0_PATN(n) ((n) << 7) n 87 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_FRAME_DETECT3(n) (0xa20 + (n) * 0x1000) n 89 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_DME(n) (0xa28 + (n) * 0x1000) n 91 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TRAINING0(n) (0xa68 + (n) * 0x1000) n 93 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TRAINING5(n) (0xaa4 + (n) * 0x1000) n 94 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TRAINING5_RX_TIMER(n) ((n) << 0) n 95 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TX_TRAIN_PRESET(n) (0xb1c + (n) * 0x1000) n 98 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S3(n) (0xc40 + (n) * 0x1000) n 100 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S4(n) (0xc44 + (n) * 0x1000) n 101 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S4_DFE_RES(n) ((n) << 8) n 102 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TX_PRESET(n) (0xc68 + (n) * 0x1000) n 103 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_TX_PRESET_INDEX(n) ((n) << 0) n 104 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S5(n) (0xd38 + (n) * 0x1000) n 105 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_GEN1_S5_ICP(n) ((n) << 0) n 108 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_CONF1(n) (0x1000 + (n) * 0x28) n 111 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_CONF6(n) (0x1014 + (n) * 0x28) n 114 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4) n 116 drivers/phy/marvell/phy-mvebu-cp110-comphy.c #define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4) n 285 drivers/phy/marvell/phy-mvebu-cp110-comphy.c int i, n = ARRAY_SIZE(mvebu_comphy_cp110_modes); n 294 drivers/phy/marvell/phy-mvebu-cp110-comphy.c for (i = 0; i < n; i++) { n 303 drivers/phy/marvell/phy-mvebu-cp110-comphy.c if (i == n) n 14 drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h #define TX_OFF(n, x) (0x400 + (0x400 * n) + x) n 15 drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.h #define RX_OFF(n, x) (0x600 + (0x400 * n) + x) n 15 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define TX_OFF(n, x) (0x400 + (0x400 * n) + x) n 16 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define RX_OFF(n, x) (0x600 + (0x400 * n) + x) n 49 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_TX_EMP_POST1_LVL(n) TX_OFF(n, 0x08) n 50 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_TX_DRV_LVL(n) TX_OFF(n, 0x0C) n 51 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_TX_LANE_MODE(n) TX_OFF(n, 0x54) n 54 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_CDR_CONTROL1(n) RX_OFF(n, 0x0) n 55 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_CDR_CONTROL_HALF(n) RX_OFF(n, 0x8) n 56 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_RX_EQ_GAIN1_LSB(n) RX_OFF(n, 0xA8) n 57 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_RX_EQ_GAIN1_MSB(n) RX_OFF(n, 0xAC) n 58 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_RX_EQ_GAIN2_LSB(n) RX_OFF(n, 0xB0) n 59 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_RX_EQ_GAIN2_MSB(n) RX_OFF(n, 0xB4) n 60 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(n) RX_OFF(n, 0xBC) n 61 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_CDR_CONTROL_QUARTER(n) RX_OFF(n, 0xC) n 62 drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.h #define QSERDES_RX_SIGDET_CNTRL(n) RX_OFF(n, 0x100) n 388 drivers/phy/renesas/phy-rcar-gen2.c int error, n; n 402 drivers/phy/renesas/phy-rcar-gen2.c for (n = 0; n < PHYS_PER_CHANNEL; n++) { n 403 drivers/phy/renesas/phy-rcar-gen2.c struct rcar_gen2_phy *phy = &channel->phys[n]; n 406 drivers/phy/renesas/phy-rcar-gen2.c phy->number = n; n 407 drivers/phy/renesas/phy-rcar-gen2.c phy->select_value = data->select_value[channel_num][n]; n 147 drivers/phy/rockchip/phy-rockchip-typec.c #define XCVR_PSM_RCTRL(n) ((0x4001 | ((n) << 9)) << 2) n 148 drivers/phy/rockchip/phy-rockchip-typec.c #define XCVR_PSM_CAL_TMR(n) ((0x4002 | ((n) << 9)) << 2) n 149 drivers/phy/rockchip/phy-rockchip-typec.c #define XCVR_PSM_A0IN_TMR(n) ((0x4003 | ((n) << 9)) << 2) n 150 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_CAL_SCLR_MULT(n) ((0x4047 | ((n) << 9)) << 2) n 151 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_CPOST_MULT_00(n) ((0x404c | ((n) << 9)) << 2) n 152 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_CPOST_MULT_01(n) ((0x404d | ((n) << 9)) << 2) n 153 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_CPOST_MULT_10(n) ((0x404e | ((n) << 9)) << 2) n 154 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_CPOST_MULT_11(n) ((0x404f | ((n) << 9)) << 2) n 155 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_000(n) ((0x4050 | ((n) << 9)) << 2) n 156 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_001(n) ((0x4051 | ((n) << 9)) << 2) n 157 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_010(n) ((0x4052 | ((n) << 9)) << 2) n 158 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_011(n) ((0x4053 | ((n) << 9)) << 2) n 159 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_100(n) ((0x4054 | ((n) << 9)) << 2) n 160 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_101(n) ((0x4055 | ((n) << 9)) << 2) n 161 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_110(n) ((0x4056 | ((n) << 9)) << 2) n 162 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNFS_MULT_111(n) ((0x4057 | ((n) << 9)) << 2) n 163 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_000(n) ((0x4058 | ((n) << 9)) << 2) n 164 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_001(n) ((0x4059 | ((n) << 9)) << 2) n 165 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_010(n) ((0x405a | ((n) << 9)) << 2) n 166 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_011(n) ((0x405b | ((n) << 9)) << 2) n 167 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_100(n) ((0x405c | ((n) << 9)) << 2) n 168 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_101(n) ((0x405d | ((n) << 9)) << 2) n 169 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_110(n) ((0x405e | ((n) << 9)) << 2) n 170 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_TXCC_MGNLS_MULT_111(n) ((0x405f | ((n) << 9)) << 2) n 172 drivers/phy/rockchip/phy-rockchip-typec.c #define XCVR_DIAG_PLLDRC_CTRL(n) ((0x40e0 | ((n) << 9)) << 2) n 173 drivers/phy/rockchip/phy-rockchip-typec.c #define XCVR_DIAG_BIDI_CTRL(n) ((0x40e8 | ((n) << 9)) << 2) n 174 drivers/phy/rockchip/phy-rockchip-typec.c #define XCVR_DIAG_LANE_FCM_EN_MGN(n) ((0x40f2 | ((n) << 9)) << 2) n 175 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_PSC_A0(n) ((0x4100 | ((n) << 9)) << 2) n 176 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_PSC_A1(n) ((0x4101 | ((n) << 9)) << 2) n 177 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_PSC_A2(n) ((0x4102 | ((n) << 9)) << 2) n 178 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_PSC_A3(n) ((0x4103 | ((n) << 9)) << 2) n 179 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_RCVDET_CTRL(n) ((0x4120 | ((n) << 9)) << 2) n 180 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_RCVDET_EN_TMR(n) ((0x4122 | ((n) << 9)) << 2) n 181 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_RCVDET_ST_TMR(n) ((0x4123 | ((n) << 9)) << 2) n 182 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_DIAG_TX_DRV(n) ((0x41e1 | ((n) << 9)) << 2) n 246 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_PSC_A0(n) ((0x8000 | ((n) << 9)) << 2) n 247 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_PSC_A1(n) ((0x8001 | ((n) << 9)) << 2) n 248 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_PSC_A2(n) ((0x8002 | ((n) << 9)) << 2) n 249 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_PSC_A3(n) ((0x8003 | ((n) << 9)) << 2) n 250 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_PSC_CAL(n) ((0x8006 | ((n) << 9)) << 2) n 251 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_PSC_RDY(n) ((0x8007 | ((n) << 9)) << 2) n 259 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_SIGDET_HL_FILT_TMR(n) ((0x8090 | ((n) << 9)) << 2) n 284 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_REE_CTRL_DATA_MASK(n) ((0x81bb | ((n) << 9)) << 2) n 285 drivers/phy/rockchip/phy-rockchip-typec.c #define RX_DIAG_SIGDET_TUNE(n) ((0x81dc | ((n) << 9)) << 2) n 302 drivers/phy/rockchip/phy-rockchip-typec.c #define PHY_PMA_ISO_XCVR_CTRL(n) ((0xcc11 | ((n) << 6)) << 2) n 303 drivers/phy/rockchip/phy-rockchip-typec.c #define PHY_PMA_ISO_LINK_MODE(n) ((0xcc12 | ((n) << 6)) << 2) n 304 drivers/phy/rockchip/phy-rockchip-typec.c #define PHY_PMA_ISO_PWRST_CTRL(n) ((0xcc13 | ((n) << 6)) << 2) n 305 drivers/phy/rockchip/phy-rockchip-typec.c #define PHY_PMA_ISO_TX_DATA_LO(n) ((0xcc14 | ((n) << 6)) << 2) n 306 drivers/phy/rockchip/phy-rockchip-typec.c #define PHY_PMA_ISO_TX_DATA_HI(n) ((0xcc15 | ((n) << 6)) << 2) n 307 drivers/phy/rockchip/phy-rockchip-typec.c #define PHY_PMA_ISO_RX_DATA_LO(n) ((0xcc16 | ((n) << 6)) << 2) n 308 drivers/phy/rockchip/phy-rockchip-typec.c #define PHY_PMA_ISO_RX_DATA_HI(n) ((0xcc17 | ((n) << 6)) << 2) n 309 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_BIST_CTRL(n) ((0x4140 | ((n) << 9)) << 2) n 310 drivers/phy/rockchip/phy-rockchip-typec.c #define TX_BIST_UDDWR(n) ((0x4141 | ((n) << 9)) << 2) n 131 drivers/phy/ti/phy-ti-pipe3.c u8 n; n 415 drivers/phy/ti/phy-ti-pipe3.c val |= dpll_params->n << PLL_REGN_SHIFT; n 150 drivers/pinctrl/bcm/pinctrl-cygnus-mux.c #define CYGNUS_PIN_DESC(p, n, i, o, s) \ n 153 drivers/pinctrl/bcm/pinctrl-cygnus-mux.c .name = n, \ n 170 drivers/pinctrl/bcm/pinctrl-ns2-mux.c #define NS2_PIN_DESC(p, n, b, o, s, i, pu, d) \ n 173 drivers/pinctrl/bcm/pinctrl-ns2-mux.c .name = n, \ n 141 drivers/pinctrl/bcm/pinctrl-nsp-mux.c #define NSP_PIN_DESC(p, n, g) \ n 144 drivers/pinctrl/bcm/pinctrl-nsp-mux.c .name = n, \ n 2144 drivers/pinctrl/core.c struct pinctrl_gpio_range *range, *n; n 2166 drivers/pinctrl/core.c list_for_each_entry_safe(range, n, &pctldev->gpio_ranges, node) n 263 drivers/pinctrl/freescale/pinctrl-mxs.c int n; n 266 drivers/pinctrl/freescale/pinctrl-mxs.c for (n = 0; n < num_configs; n++) { n 267 drivers/pinctrl/freescale/pinctrl-mxs.c config = configs[n]; n 101 drivers/pinctrl/intel/pinctrl-baytrail.c #define COMMUNITY(p, n, map) \ n 104 drivers/pinctrl/intel/pinctrl-baytrail.c .npins = (n), \ n 181 drivers/pinctrl/intel/pinctrl-cherryview.c #define PIN_GROUP_WITH_ALT(n, p, m, i) \ n 183 drivers/pinctrl/intel/pinctrl-cherryview.c .name = (n), \ n 190 drivers/pinctrl/intel/pinctrl-cherryview.c #define PIN_GROUP_WITH_OVERRIDE(n, p, m, i, o) \ n 192 drivers/pinctrl/intel/pinctrl-cherryview.c .name = (n), \ n 23 drivers/pinctrl/intel/pinctrl-denverton.c #define DNV_GPP(n, s, e) \ n 25 drivers/pinctrl/intel/pinctrl-denverton.c .reg_num = (n), \ n 132 drivers/pinctrl/intel/pinctrl-intel.h #define PIN_GROUP(n, p, m) \ n 134 drivers/pinctrl/intel/pinctrl-intel.h .name = (n), \ n 143 drivers/pinctrl/intel/pinctrl-intel.h #define FUNCTION(n, g) \ n 145 drivers/pinctrl/intel/pinctrl-intel.h .name = (n), \ n 24 drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h #define BANK_PMX(n, f, l, r, o) \ n 26 drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h .name = n, \ n 140 drivers/pinctrl/meson/pinctrl-meson.h #define BANK_DS(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib, \ n 143 drivers/pinctrl/meson/pinctrl-meson.h .name = n, \ n 158 drivers/pinctrl/meson/pinctrl-meson.h #define BANK(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib) \ n 159 drivers/pinctrl/meson/pinctrl-meson.h BANK_DS(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib, 0, 0) n 869 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c int n, num = 0, funcsize = info->data->nr_pins; n 871 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c for (n = 0; n < info->ngroups; n++) { n 872 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c struct armada_37xx_pin_group *grp = &info->groups[n]; n 921 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c int n; n 923 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c for (n = 0; n < info->nfuncs; n++) { n 924 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c const char *name = funcs[n].name; n 928 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c funcs[n].groups = devm_kcalloc(info->dev, n 929 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c funcs[n].ngroups, n 930 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c sizeof(*(funcs[n].groups)), n 932 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c if (!funcs[n].groups) n 935 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c groups = funcs[n].groups; n 85 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned n; n 86 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < pctl->num_groups; n++) { n 87 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (pid >= pctl->groups[n].pins[0] && n 88 drivers/pinctrl/mvebu/pinctrl-mvebu.c pid < pctl->groups[n].pins[0] + n 89 drivers/pinctrl/mvebu/pinctrl-mvebu.c pctl->groups[n].npins) n 90 drivers/pinctrl/mvebu/pinctrl-mvebu.c return &pctl->groups[n]; n 98 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned n; n 99 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < pctl->num_groups; n++) { n 100 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (strcmp(name, pctl->groups[n].name) == 0) n 101 drivers/pinctrl/mvebu/pinctrl-mvebu.c return &pctl->groups[n]; n 110 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned n; n 111 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < grp->num_settings; n++) { n 112 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (config == grp->settings[n].val) { n 114 drivers/pinctrl/mvebu/pinctrl-mvebu.c grp->settings[n].variant)) n 115 drivers/pinctrl/mvebu/pinctrl-mvebu.c return &grp->settings[n]; n 125 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned n; n 126 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < grp->num_settings; n++) { n 127 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (strcmp(name, grp->settings[n].name) == 0) { n 129 drivers/pinctrl/mvebu/pinctrl-mvebu.c grp->settings[n].variant)) n 130 drivers/pinctrl/mvebu/pinctrl-mvebu.c return &grp->settings[n]; n 139 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned n; n 140 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < grp->num_settings; n++) { n 141 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (grp->settings[n].flags & n 144 drivers/pinctrl/mvebu/pinctrl-mvebu.c grp->settings[n].variant)) n 145 drivers/pinctrl/mvebu/pinctrl-mvebu.c return &grp->settings[n]; n 154 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned n; n 155 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < pctl->num_functions; n++) { n 156 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (strcmp(name, pctl->functions[n].name) == 0) n 157 drivers/pinctrl/mvebu/pinctrl-mvebu.c return &pctl->functions[n]; n 201 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned n; n 226 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < grp->num_settings; n++) { n 227 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (curr == &grp->settings[n]) n 232 drivers/pinctrl/mvebu/pinctrl-mvebu.c !(pctl->variant & grp->settings[n].variant)) n 235 drivers/pinctrl/mvebu/pinctrl-mvebu.c seq_printf(s, " %s", grp->settings[n].name); n 236 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (grp->settings[n].subname) n 237 drivers/pinctrl/mvebu/pinctrl-mvebu.c seq_printf(s, "(%s)", grp->settings[n].subname); n 238 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (grp->settings[n].flags & n 241 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (grp->settings[n].flags & MVEBU_SETTING_GPI) n 243 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (grp->settings[n].flags & MVEBU_SETTING_GPO) n 404 drivers/pinctrl/mvebu/pinctrl-mvebu.c int ret, nmaps, n; n 427 drivers/pinctrl/mvebu/pinctrl-mvebu.c n = 0; n 443 drivers/pinctrl/mvebu/pinctrl-mvebu.c (*map)[n].type = PIN_MAP_TYPE_MUX_GROUP; n 444 drivers/pinctrl/mvebu/pinctrl-mvebu.c (*map)[n].data.mux.group = group; n 445 drivers/pinctrl/mvebu/pinctrl-mvebu.c (*map)[n].data.mux.function = function; n 446 drivers/pinctrl/mvebu/pinctrl-mvebu.c n++; n 496 drivers/pinctrl/mvebu/pinctrl-mvebu.c int n, s; n 506 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < pctl->num_groups; n++) { n 507 drivers/pinctrl/mvebu/pinctrl-mvebu.c struct mvebu_pinctrl_group *grp = &pctl->groups[n]; n 533 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < pctl->num_groups; n++) { n 534 drivers/pinctrl/mvebu/pinctrl-mvebu.c struct mvebu_pinctrl_group *grp = &pctl->groups[n]; n 573 drivers/pinctrl/mvebu/pinctrl-mvebu.c unsigned gid, n, k; n 602 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < soc->ncontrols; n++) { n 603 drivers/pinctrl/mvebu/pinctrl-mvebu.c const struct mvebu_mpp_ctrl *ctrl = &soc->controls[n]; n 630 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < pctl->desc.npins; n++) n 631 drivers/pinctrl/mvebu/pinctrl-mvebu.c pdesc[n].number = n; n 647 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < soc->ncontrols; n++) { n 648 drivers/pinctrl/mvebu/pinctrl-mvebu.c const struct mvebu_mpp_ctrl *ctrl = &soc->controls[n]; n 650 drivers/pinctrl/mvebu/pinctrl-mvebu.c &soc->control_data[n] : NULL; n 686 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < soc->nmodes; n++) { n 687 drivers/pinctrl/mvebu/pinctrl-mvebu.c struct mvebu_mpp_mode *mode = &soc->modes[n]; n 745 drivers/pinctrl/mvebu/pinctrl-mvebu.c for (n = 0; n < soc->ngpioranges; n++) n 746 drivers/pinctrl/mvebu/pinctrl-mvebu.c pinctrl_add_gpio_range(pctl->pctldev, &soc->gpioranges[n]); n 270 drivers/pinctrl/pinctrl-at91-pio4.c int n, bank = -1; n 273 drivers/pinctrl/pinctrl-at91-pio4.c for (n = 0; n < atmel_pioctrl->nbanks; n++) { n 274 drivers/pinctrl/pinctrl-at91-pio4.c if (atmel_pioctrl->irqs[n] == irq) { n 275 drivers/pinctrl/pinctrl-at91-pio4.c bank = n; n 296 drivers/pinctrl/pinctrl-at91-pio4.c for_each_set_bit(n, &isr, BITS_PER_LONG) n 299 drivers/pinctrl/pinctrl-at91-pio4.c bank * ATMEL_PIO_NPINS_PER_BANK + n)); n 1692 drivers/pinctrl/pinctrl-at91.c int n; n 1710 drivers/pinctrl/pinctrl-at91.c for_each_set_bit(n, &isr, BITS_PER_LONG) { n 1712 drivers/pinctrl/pinctrl-at91.c gpio_chip->irq.domain, n)); n 61 drivers/pinctrl/pinctrl-lpc18xx.c #define LPC18XX_SCU_PINTSEL_VAL(val, n) \ n 62 drivers/pinctrl/pinctrl-lpc18xx.c ((val) << (((n) % LPC18XX_SCU_IRQ_PER_PINTSEL) * 8)) n 441 drivers/pinctrl/pinctrl-lpc18xx.c #define LPC18XX_PIN(pname, n) { \ n 442 drivers/pinctrl/pinctrl-lpc18xx.c .number = n, \ n 184 drivers/pinctrl/pinctrl-ocelot.c #define OCELOT_PIN(n) { \ n 185 drivers/pinctrl/pinctrl-ocelot.c .number = n, \ n 186 drivers/pinctrl/pinctrl-ocelot.c .name = "GPIO_"#n, \ n 187 drivers/pinctrl/pinctrl-ocelot.c .drv_data = &ocelot_pin_##n \ n 288 drivers/pinctrl/pinctrl-ocelot.c #define JAGUAR2_PIN(n) { \ n 289 drivers/pinctrl/pinctrl-ocelot.c .number = n, \ n 290 drivers/pinctrl/pinctrl-ocelot.c .name = "GPIO_"#n, \ n 291 drivers/pinctrl/pinctrl-ocelot.c .drv_data = &jaguar2_pin_##n \ n 36 drivers/pinctrl/pinctrl-st.c #define REG_PIO_PC(n) (0x20 + (n) * 0x10) n 38 drivers/pinctrl/pinctrl-st.c #define REG_PIO_SET_PC(n) (0x24 + (n) * 0x10) n 40 drivers/pinctrl/pinctrl-st.c #define REG_PIO_CLR_PC(n) (0x28 + (n) * 0x10) n 1383 drivers/pinctrl/pinctrl-st.c int n, val, ecfg; n 1399 drivers/pinctrl/pinctrl-st.c for_each_set_bit(n, &active_irqs, BITS_PER_LONG) { n 1401 drivers/pinctrl/pinctrl-st.c ecfg = ST_IRQ_EDGE_CONF(bank_edge_mask, n); n 1405 drivers/pinctrl/pinctrl-st.c val = st_gpio_get(&bank->gpio_chip, n); n 1407 drivers/pinctrl/pinctrl-st.c writel(BIT(n), n 1416 drivers/pinctrl/pinctrl-st.c generic_handle_irq(irq_find_mapping(bank->gpio_chip.irq.domain, n)); n 1438 drivers/pinctrl/pinctrl-st.c int n; n 1444 drivers/pinctrl/pinctrl-st.c for_each_set_bit(n, &status, info->nbanks) n 1445 drivers/pinctrl/pinctrl-st.c __gpio_irq_handler(&info->banks[n]); n 542 drivers/pinctrl/pinctrl-stmfx.c unsigned long n, status; n 554 drivers/pinctrl/pinctrl-stmfx.c for_each_set_bit(n, &status, gc->ngpio) { n 555 drivers/pinctrl/pinctrl-stmfx.c handle_nested_irq(irq_find_mapping(gc->irq.domain, n)); n 556 drivers/pinctrl/pinctrl-stmfx.c stmfx_pinctrl_irq_toggle_trigger(pctl, n); n 490 drivers/pinctrl/pinctrl-sx150x.c unsigned int n = d->hwirq; n 492 drivers/pinctrl/pinctrl-sx150x.c pctl->irq.masked |= BIT(n); n 499 drivers/pinctrl/pinctrl-sx150x.c unsigned int n = d->hwirq; n 501 drivers/pinctrl/pinctrl-sx150x.c pctl->irq.masked &= ~BIT(n); n 511 drivers/pinctrl/pinctrl-sx150x.c const unsigned int n = line * 2; n 513 drivers/pinctrl/pinctrl-sx150x.c SX150X_IRQ_TYPE_EDGE_FALLING) << n); n 516 drivers/pinctrl/pinctrl-sx150x.c pctl->irq.sense |= sense << n; n 523 drivers/pinctrl/pinctrl-sx150x.c unsigned int n, val = 0; n 528 drivers/pinctrl/pinctrl-sx150x.c n = d->hwirq; n 535 drivers/pinctrl/pinctrl-sx150x.c sx150x_irq_set_sense(pctl, n, val); n 542 drivers/pinctrl/pinctrl-sx150x.c unsigned long n, status; n 555 drivers/pinctrl/pinctrl-sx150x.c for_each_set_bit(n, &status, pctl->data->ngpios) n 556 drivers/pinctrl/pinctrl-sx150x.c handle_nested_irq(irq_find_mapping(pctl->gpio.irq.domain, n)); n 1000 drivers/pinctrl/pinctrl-sx150x.c int ret, n; n 1035 drivers/pinctrl/pinctrl-sx150x.c for (n = width, val = 0, idx = reg; n > 0; n -= 8, idx++) { n 1053 drivers/pinctrl/pinctrl-sx150x.c int ret, n; n 1060 drivers/pinctrl/pinctrl-sx150x.c n = (width - 1) & ~7; n 1062 drivers/pinctrl/pinctrl-sx150x.c const u8 byte = (val >> n) & 0xff; n 1069 drivers/pinctrl/pinctrl-sx150x.c n -= 8; n 1070 drivers/pinctrl/pinctrl-sx150x.c } while (n >= 0); n 521 drivers/pinctrl/pinctrl-tb10x.c static const char *tb10x_get_group_name(struct pinctrl_dev *pctl, unsigned n) n 524 drivers/pinctrl/pinctrl-tb10x.c return state->pingroups[n].name; n 527 drivers/pinctrl/pinctrl-tb10x.c static int tb10x_get_group_pins(struct pinctrl_dev *pctl, unsigned n, n 533 drivers/pinctrl/pinctrl-tb10x.c *pins = state->pingroups[n].pins; n 534 drivers/pinctrl/pinctrl-tb10x.c *num_pins = state->pingroups[n].pincnt; n 583 drivers/pinctrl/pinctrl-tb10x.c unsigned n) n 586 drivers/pinctrl/pinctrl-tb10x.c return state->pinfuncs[n].name; n 590 drivers/pinctrl/pinctrl-tb10x.c unsigned n, const char * const **groups, n 595 drivers/pinctrl/pinctrl-tb10x.c *groups = &state->pinfuncs[n].group; n 44 drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c #define SSBI_REG_ADDR_GPIO(n) (SSBI_REG_ADDR_GPIO_BASE + n) n 27 drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c #define SSBI_REG_ADDR_MPP(n) (SSBI_REG_ADDR_MPP_BASE + n) n 80 drivers/pinctrl/samsung/pinctrl-samsung.h #define PIN_GROUP(n, p, f) \ n 82 drivers/pinctrl/samsung/pinctrl-samsung.h .name = n, \ n 88 drivers/pinctrl/samsung/pinctrl-samsung.h #define PMX_FUNC(n, g) \ n 90 drivers/pinctrl/samsung/pinctrl-samsung.h .name = n, \ n 258 drivers/pinctrl/sh-pfc/core.c u32 n; n 266 drivers/pinctrl/sh-pfc/core.c for (n = 0; n < ncomb; n++) { n 267 drivers/pinctrl/sh-pfc/core.c if (config_reg->enum_ids[pos + n] == enum_id) { n 270 drivers/pinctrl/sh-pfc/core.c *valuep = n; n 642 drivers/pinctrl/sh-pfc/core.c unsigned int i, n = 0; n 646 drivers/pinctrl/sh-pfc/core.c do_reg(pfc, pfc->info->cfg_regs[i].reg, n++); n 650 drivers/pinctrl/sh-pfc/core.c do_reg(pfc, pfc->info->drive_regs[i].reg, n++); n 654 drivers/pinctrl/sh-pfc/core.c do_reg(pfc, pfc->info->bias_regs[i].puen, n++); n 656 drivers/pinctrl/sh-pfc/core.c do_reg(pfc, pfc->info->bias_regs[i].pud, n++); n 661 drivers/pinctrl/sh-pfc/core.c do_reg(pfc, pfc->info->ioctrl_regs[i].reg, n++); n 663 drivers/pinctrl/sh-pfc/core.c return n; n 668 drivers/pinctrl/sh-pfc/core.c unsigned int n; n 674 drivers/pinctrl/sh-pfc/core.c n = sh_pfc_walk_regs(pfc, sh_pfc_nop_reg); n 675 drivers/pinctrl/sh-pfc/core.c if (!n) n 678 drivers/pinctrl/sh-pfc/core.c pfc->saved_regs = devm_kmalloc_array(pfc->dev, n, n 684 drivers/pinctrl/sh-pfc/core.c dev_dbg(pfc->dev, "Allocated space to save %u regs\n", n); n 716 drivers/pinctrl/sh-pfc/core.c static bool __init is0s(const u16 *enum_ids, unsigned int n) n 720 drivers/pinctrl/sh-pfc/core.c for (i = 0; i < n; i++) n 733 drivers/pinctrl/sh-pfc/core.c unsigned int i, n, rw, fw; n 740 drivers/pinctrl/sh-pfc/core.c for (i = 0, n = 0, rw = 0; (fw = cfg_reg->var_field_width[i]); i++) { n 741 drivers/pinctrl/sh-pfc/core.c if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw)) { n 746 drivers/pinctrl/sh-pfc/core.c n += 1 << fw; n 756 drivers/pinctrl/sh-pfc/core.c if (n != cfg_reg->nr_enum_ids) { n 758 drivers/pinctrl/sh-pfc/core.c drvname, cfg_reg->reg, cfg_reg->nr_enum_ids, n); n 43 drivers/pinctrl/sh-pfc/sh_pfc.h #define SH_PFC_PIN_GROUP_ALIAS(alias, n) \ n 46 drivers/pinctrl/sh-pfc/sh_pfc.h .pins = n##_pins, \ n 47 drivers/pinctrl/sh-pfc/sh_pfc.h .mux = n##_mux, \ n 48 drivers/pinctrl/sh-pfc/sh_pfc.h .nr_pins = ARRAY_SIZE(n##_pins) + \ n 49 drivers/pinctrl/sh-pfc/sh_pfc.h BUILD_BUG_ON_ZERO(sizeof(n##_pins) != sizeof(n##_mux)), \ n 51 drivers/pinctrl/sh-pfc/sh_pfc.h #define SH_PFC_PIN_GROUP(n) SH_PFC_PIN_GROUP_ALIAS(n, n) n 66 drivers/pinctrl/sh-pfc/sh_pfc.h #define VIN_DATA_PIN_GROUP(n, s, ...) \ n 68 drivers/pinctrl/sh-pfc/sh_pfc.h .name = #n#s#__VA_ARGS__, \ n 69 drivers/pinctrl/sh-pfc/sh_pfc.h .pins = n##__VA_ARGS__##_pins.data##s, \ n 70 drivers/pinctrl/sh-pfc/sh_pfc.h .mux = n##__VA_ARGS__##_mux.data##s, \ n 71 drivers/pinctrl/sh-pfc/sh_pfc.h .nr_pins = ARRAY_SIZE(n##__VA_ARGS__##_pins.data##s), \ n 97 drivers/pinctrl/sh-pfc/sh_pfc.h #define SH_PFC_FUNCTION(n) \ n 99 drivers/pinctrl/sh-pfc/sh_pfc.h .name = #n, \ n 100 drivers/pinctrl/sh-pfc/sh_pfc.h .groups = n##_groups, \ n 101 drivers/pinctrl/sh-pfc/sh_pfc.h .nr_groups = ARRAY_SIZE(n##_groups), \ n 120 drivers/pinctrl/sh-pfc/sh_pfc.h #define SET_NR_ENUM_IDS(n) .nr_enum_ids = n, n 122 drivers/pinctrl/sh-pfc/sh_pfc.h #define SET_NR_ENUM_IDS(n) n 276 drivers/pinctrl/sirf/pinctrl-atlas7.c #define GROUP(n, p) \ n 278 drivers/pinctrl/sirf/pinctrl-atlas7.c .name = n, \ n 290 drivers/pinctrl/sirf/pinctrl-atlas7.c #define FUNCTION(n, g, m) \ n 292 drivers/pinctrl/sirf/pinctrl-atlas7.c .name = n, \ n 750 drivers/pinctrl/sirf/pinctrl-sirf.c int i, n; n 754 drivers/pinctrl/sirf/pinctrl-sirf.c for_each_set_bit(n, p + i, BITS_PER_LONG) { n 755 drivers/pinctrl/sirf/pinctrl-sirf.c u32 offset = SIRFSOC_GPIO_CTRL(i, n); n 767 drivers/pinctrl/sirf/pinctrl-sirf.c int i, n; n 771 drivers/pinctrl/sirf/pinctrl-sirf.c for_each_set_bit(n, p + i, BITS_PER_LONG) { n 772 drivers/pinctrl/sirf/pinctrl-sirf.c u32 offset = SIRFSOC_GPIO_CTRL(i, n); n 82 drivers/pinctrl/sirf/pinctrl-sirf.h #define SIRFSOC_PIN_GROUP(n, p) \ n 84 drivers/pinctrl/sirf/pinctrl-sirf.h .name = n, \ n 96 drivers/pinctrl/sirf/pinctrl-sirf.h #define SIRFSOC_PMX_FUNCTION(n, g, m) \ n 98 drivers/pinctrl/sirf/pinctrl-sirf.h .name = n, \ n 149 drivers/platform/chrome/cros_ec_spi.c static int receive_n_bytes(struct cros_ec_device *ec_dev, u8 *buf, int n) n 156 drivers/platform/chrome/cros_ec_spi.c BUG_ON(buf - ec_dev->din + n > ec_dev->din_size); n 161 drivers/platform/chrome/cros_ec_spi.c trans.len = n; n 328 drivers/platform/chrome/cros_ec_sysfs.c struct attribute *a, int n) n 47 drivers/platform/x86/hdaps.c #define KEYBD_ISSET(n) (!! (n & KEYBD_MASK)) /* keyboard used? */ n 48 drivers/platform/x86/hdaps.c #define MOUSE_ISSET(n) (!! (n & MOUSE_MASK)) /* mouse used? */ n 3600 drivers/platform/x86/sony-laptop.c unsigned int n = iterations; \ n 3601 drivers/platform/x86/sony-laptop.c while (--n && (command)) \ n 3603 drivers/platform/x86/sony-laptop.c if (!n) \ n 2423 drivers/platform/x86/thinkpad_acpi.c static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m) n 2429 drivers/platform/x86/thinkpad_acpi.c n->thinkpad_toggle = !!(d & TP_NVRAM_MASK_HKT_THINKPAD); n 2430 drivers/platform/x86/thinkpad_acpi.c n->zoom_toggle = !!(d & TP_NVRAM_MASK_HKT_ZOOM); n 2431 drivers/platform/x86/thinkpad_acpi.c n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY); n 2432 drivers/platform/x86/thinkpad_acpi.c n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE); n 2436 drivers/platform/x86/thinkpad_acpi.c n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT); n 2440 drivers/platform/x86/thinkpad_acpi.c n->displayexp_toggle = n 2445 drivers/platform/x86/thinkpad_acpi.c n->brightness_level = (d & TP_NVRAM_MASK_LEVEL_BRIGHTNESS) n 2447 drivers/platform/x86/thinkpad_acpi.c n->brightness_toggle = n 2452 drivers/platform/x86/thinkpad_acpi.c n->volume_level = (d & TP_NVRAM_MASK_LEVEL_VOLUME) n 2454 drivers/platform/x86/thinkpad_acpi.c n->mute = !!(d & TP_NVRAM_MASK_MUTE); n 2455 drivers/platform/x86/thinkpad_acpi.c n->volume_toggle = !!(d & TP_NVRAM_MASK_HKT_VOLUME); n 6348 drivers/platform/x86/thinkpad_acpi.c int n; n 6350 drivers/platform/x86/thinkpad_acpi.c n = 8; n 6357 drivers/platform/x86/thinkpad_acpi.c n = 16; n 6359 drivers/platform/x86/thinkpad_acpi.c for (i = 0 ; i < n; i++) { n 6365 drivers/platform/x86/thinkpad_acpi.c return n; n 6370 drivers/platform/x86/thinkpad_acpi.c int n, i; n 6373 drivers/platform/x86/thinkpad_acpi.c n = thermal_get_sensors(&t); n 6374 drivers/platform/x86/thinkpad_acpi.c if (n <= 0) n 6379 drivers/platform/x86/thinkpad_acpi.c for (i = 0; i < n; i++) { n 6579 drivers/platform/x86/thinkpad_acpi.c int n, i; n 6582 drivers/platform/x86/thinkpad_acpi.c n = thermal_get_sensors(&t); n 6583 drivers/platform/x86/thinkpad_acpi.c if (unlikely(n < 0)) n 6584 drivers/platform/x86/thinkpad_acpi.c return n; n 6588 drivers/platform/x86/thinkpad_acpi.c if (n > 0) { n 6589 drivers/platform/x86/thinkpad_acpi.c for (i = 0; i < (n - 1); i++) n 7355 drivers/platform/x86/thinkpad_acpi.c u8 s, n; n 7364 drivers/platform/x86/thinkpad_acpi.c n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK : n 7367 drivers/platform/x86/thinkpad_acpi.c if (n != s) { n 7368 drivers/platform/x86/thinkpad_acpi.c rc = volume_set_status_ec(n); n 7400 drivers/platform/x86/thinkpad_acpi.c u8 s, n; n 7412 drivers/platform/x86/thinkpad_acpi.c n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol; n 7414 drivers/platform/x86/thinkpad_acpi.c if (n != s) { n 7415 drivers/platform/x86/thinkpad_acpi.c rc = volume_set_status_ec(n); n 106 drivers/pnp/quirks.c unsigned int prev_option_flags = ~0, n = 0; n 117 drivers/pnp/quirks.c n = 0; n 122 drivers/pnp/quirks.c n++; n 124 drivers/pnp/quirks.c if (n == 3 && port->min == port->max) { n 141 drivers/power/supply/bd70528-charger.c const char *n; n 151 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-ov-res", .h = BD_IRQ_HND(BAT_OV_RES) }, n 152 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-ov-det", .h = BD_IRQ_HND(BAT_OV_DET) }, n 153 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-dead", .h = BD_IRQ_HND(DBAT_DET) }, n 154 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-warmed", .h = BD_IRQ_HND(COLD_RES) }, n 155 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-cold", .h = BD_IRQ_HND(COLD_DET) }, n 156 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-cooled", .h = BD_IRQ_HND(HOT_RES) }, n 157 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-hot", .h = BD_IRQ_HND(HOT_DET) }, n 158 drivers/power/supply/bd70528-charger.c { .n = "bd70528-chg-tshd", .h = BD_IRQ_HND(CHG_TSD) }, n 159 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-removed", .h = BD_IRQ_HND(BAT_RMV) }, n 160 drivers/power/supply/bd70528-charger.c { .n = "bd70528-bat-detected", .h = BD_IRQ_HND(BAT_DET) }, n 161 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-ov-res", .h = BD_IRQ_HND(DCIN2_OV_RES) }, n 162 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-ov-det", .h = BD_IRQ_HND(DCIN2_OV_DET) }, n 163 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-removed", .h = BD_IRQ_HND(DCIN2_RMV) }, n 164 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin2-detected", .h = BD_IRQ_HND(DCIN2_DET) }, n 165 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin1-removed", .h = BD_IRQ_HND(DCIN1_RMV) }, n 166 drivers/power/supply/bd70528-charger.c { .n = "bd70528-dcin1-detected", .h = BD_IRQ_HND(DCIN1_DET) }, n 170 drivers/power/supply/bd70528-charger.c irq = platform_get_irq_byname(pdev, bd70528_chg_irqs[i].n); n 173 drivers/power/supply/bd70528-charger.c bd70528_chg_irqs[i].n, irq); n 179 drivers/power/supply/bd70528-charger.c bd70528_chg_irqs[i].n, n 688 drivers/power/supply/twl4030_charger.c const char *buf, size_t n) n 709 drivers/power/supply/twl4030_charger.c return (status == 0) ? n : status; n 13 drivers/powercap/powercap_sys.c #define to_powercap_zone(n) container_of(n, struct powercap_zone, dev) n 14 drivers/powercap/powercap_sys.c #define to_powercap_control_type(n) \ n 15 drivers/powercap/powercap_sys.c container_of(n, struct powercap_control_type, dev) n 604 drivers/ps3/ps3-vuart.c struct list_buffer *lb, *n; n 627 drivers/ps3/ps3-vuart.c list_for_each_entry_safe(lb, n, &priv->rx_list.head, link) { n 723 drivers/ps3/ps3-vuart.c struct list_buffer *lb, *n; n 730 drivers/ps3/ps3-vuart.c list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) { n 79 drivers/ps3/ps3stor_lib.c unsigned long n; n 113 drivers/ps3/ps3stor_lib.c n = hweight_long(dev->accessible_regions); n 114 drivers/ps3/ps3stor_lib.c if (n > 1) n 118 drivers/ps3/ps3stor_lib.c __func__, __LINE__, n); n 169 drivers/ptp/ptp_sysfs.c struct attribute *attr, int n) n 26 drivers/pwm/pwm-clps711x.c static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v) n 29 drivers/pwm/pwm-clps711x.c u32 shift = (n + 1) * 4; n 34 drivers/pwm/pwm-imx-tpm.c #define PWM_IMX_TPM_CnSC(n) (0x20 + (n) * 0x8) n 35 drivers/pwm/pwm-imx-tpm.c #define PWM_IMX_TPM_CnV(n) (0x24 + (n) * 0x8) n 260 drivers/rapidio/rio-sysfs.c struct attribute *attr, int n) n 842 drivers/rapidio/rio.c struct list_head *n; n 846 drivers/rapidio/rio.c n = from ? from->global_list.next : rio_devices.next; n 848 drivers/rapidio/rio.c while (n && (n != &rio_devices)) { n 849 drivers/rapidio/rio.c rdev = rio_dev_g(n); n 852 drivers/rapidio/rio.c n = n->next; n 1435 drivers/rapidio/rio.c struct list_head *n; n 1440 drivers/rapidio/rio.c n = from ? from->global_list.next : rio_devices.next; n 1442 drivers/rapidio/rio.c while (n && (n != &rio_devices)) { n 1443 drivers/rapidio/rio.c rdev = rio_dev_g(n); n 1449 drivers/rapidio/rio.c n = n->next; n 2147 drivers/rapidio/rio.c int n = 0; n 2164 drivers/rapidio/rio.c n++; n 2168 drivers/rapidio/rio.c if (!n) n 2185 drivers/rapidio/rio.c work = kcalloc(n, sizeof *work, GFP_KERNEL); n 2191 drivers/rapidio/rio.c n = 0; n 2195 drivers/rapidio/rio.c work[n].mport = port; n 2196 drivers/rapidio/rio.c INIT_WORK(&work[n].work, disc_work_handler); n 2197 drivers/rapidio/rio.c queue_work(rio_wq, &work[n].work); n 2198 drivers/rapidio/rio.c n++; n 24 drivers/rapidio/switches/idt_gen2.c #define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40) n 27 drivers/rapidio/switches/idt_gen2.c #define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40) n 31 drivers/rapidio/switches/idt_gen2.c #define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100) n 63 drivers/rapidio/switches/idt_gen2.c #define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100) n 69 drivers/rapidio/switches/idt_gen2.c #define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100) n 76 drivers/rapidio/switches/idt_gen2.c #define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100) n 32 drivers/rapidio/switches/idt_gen3.c #define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4) n 33 drivers/rapidio/switches/idt_gen3.c #define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \ n 34 drivers/rapidio/switches/idt_gen3.c (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4) n 26 drivers/rapidio/switches/tsi568.c #define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) n 27 drivers/rapidio/switches/tsi568.c #define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) n 29 drivers/rapidio/switches/tsi568.c #define TSI568_SP_MODE(n) (0x11004 + 0x100*n) n 26 drivers/rapidio/switches/tsi57x.c #define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) n 27 drivers/rapidio/switches/tsi57x.c #define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) n 29 drivers/rapidio/switches/tsi57x.c #define TSI578_SP_MODE(n) (0x11004 + n*0x100) n 34 drivers/rapidio/switches/tsi57x.c #define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100) n 35 drivers/rapidio/switches/tsi57x.c #define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100) n 36 drivers/rapidio/switches/tsi57x.c #define TSI578_SP_CS_TX(n) (0x13014 + n*0x100) n 37 drivers/rapidio/switches/tsi57x.c #define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100) n 96 drivers/ras/cec.c unsigned int n; /* number of elements in the array */ n 146 drivers/ras/cec.c for (i = 0; i < ca->n; i++) { n 188 drivers/ras/cec.c int min = 0, max = ca->n - 1; n 227 drivers/ras/cec.c if (!ca->n) { n 237 drivers/ras/cec.c if (ca->n - (idx + 1)) n 240 drivers/ras/cec.c (ca->n - (idx + 1)) * sizeof(u64)); n 242 drivers/ras/cec.c ca->n--; n 250 drivers/ras/cec.c for (i = 0; i < ca->n; i++) { n 273 drivers/ras/cec.c if (!ca->n) n 289 drivers/ras/cec.c for (i = 0; i < ca->n; i++) { n 301 drivers/ras/cec.c pr_info("Sanity check dump:\n{ n: %d\n", ca->n); n 302 drivers/ras/cec.c for (i = 0; i < ca->n; i++) { n 330 drivers/ras/cec.c if (ca->n == MAX_ELEMS) n 340 drivers/ras/cec.c (ca->n - to) * sizeof(u64)); n 343 drivers/ras/cec.c ca->n++; n 445 drivers/ras/cec.c seq_printf(m, "{ n: %d\n", ca->n); n 446 drivers/ras/cec.c for (i = 0; i < ca->n; i++) { n 97 drivers/regulator/bcm590xx-regulator.c #define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR) n 98 drivers/regulator/bcm590xx-regulator.c #define BCM590XX_REG_IS_GPLDO(n) \ n 99 drivers/regulator/bcm590xx-regulator.c ((n > BCM590XX_REG_VSR) && (n < BCM590XX_REG_VBUS)) n 100 drivers/regulator/bcm590xx-regulator.c #define BCM590XX_REG_IS_VBUS(n) (n == BCM590XX_REG_VBUS) n 1511 drivers/regulator/core.c struct regulator_map *node, *n; n 1513 drivers/regulator/core.c list_for_each_entry_safe(node, n, ®ulator_map_list, list) { n 2247 drivers/regulator/core.c struct regulator_enable_gpio *pin, *n; n 2253 drivers/regulator/core.c list_for_each_entry_safe(pin, n, ®ulator_ena_gpio_list, list) { n 919 drivers/regulator/da9062-regulator.c int irq, n, ret; n 945 drivers/regulator/da9062-regulator.c n = 0; n 946 drivers/regulator/da9062-regulator.c while (n < regulators->n_regulators) { n 948 drivers/regulator/da9062-regulator.c regl = ®ulators->regulator[n]; n 950 drivers/regulator/da9062-regulator.c regl->info = &rinfo[n]; n 1006 drivers/regulator/da9062-regulator.c n++; n 669 drivers/regulator/da9063-regulator.c int i, n, num; n 699 drivers/regulator/da9063-regulator.c n = 0; n 704 drivers/regulator/da9063-regulator.c rdata = &pdata->regulator_data[n]; n 708 drivers/regulator/da9063-regulator.c n++; n 725 drivers/regulator/da9063-regulator.c int id, irq, n, n_regulators, ret, val; n 775 drivers/regulator/da9063-regulator.c n = 0; n 777 drivers/regulator/da9063-regulator.c while (n < regulators->n_regulators) { n 809 drivers/regulator/da9063-regulator.c regl = ®ulators->regulator[n]; n 861 drivers/regulator/da9063-regulator.c n++; n 257 drivers/regulator/da9211-regulator.c int i, num, n; n 279 drivers/regulator/da9211-regulator.c n = 0; n 284 drivers/regulator/da9211-regulator.c pdata->init_data[n] = da9211_matches[i].init_data; n 285 drivers/regulator/da9211-regulator.c pdata->reg_node[n] = da9211_matches[i].of_node; n 286 drivers/regulator/da9211-regulator.c pdata->gpiod_ren[n] = devm_gpiod_get_from_of_node(dev, n 292 drivers/regulator/da9211-regulator.c if (IS_ERR(pdata->gpiod_ren[n])) n 293 drivers/regulator/da9211-regulator.c pdata->gpiod_ren[n] = NULL; n 294 drivers/regulator/da9211-regulator.c n++; n 1479 drivers/regulator/qcom_spmi-regulator.c unsigned int n; n 1483 drivers/regulator/qcom_spmi-regulator.c n = 0; n 1485 drivers/regulator/qcom_spmi-regulator.c n = range->set_point_max_uV - range->set_point_min_uV; n 1486 drivers/regulator/qcom_spmi-regulator.c n = (n / range->step_uV) + 1; n 1488 drivers/regulator/qcom_spmi-regulator.c range->n_voltages = n; n 1489 drivers/regulator/qcom_spmi-regulator.c points->n_voltages += n; n 128 drivers/remoteproc/remoteproc_virtio.c struct virtqueue *vq, *n; n 131 drivers/remoteproc/remoteproc_virtio.c list_for_each_entry_safe(vq, n, &vdev->vqs, list) { n 95 drivers/rtc/rtc-cmos.c #define is_valid_irq(n) ((n) > 0) n 51 drivers/rtc/rtc-isl12022.c uint8_t *data, size_t n) n 63 drivers/rtc/rtc-isl12022.c .len = n, n 85 drivers/rtc/sysfs.c const char *buf, size_t n) n 100 drivers/rtc/sysfs.c return n; n 149 drivers/rtc/sysfs.c const char *buf, size_t n) n 210 drivers/rtc/sysfs.c return (retval < 0) ? retval : n; n 229 drivers/rtc/sysfs.c const char *buf, size_t n) n 238 drivers/rtc/sysfs.c return (retval < 0) ? retval : n; n 277 drivers/rtc/sysfs.c struct attribute *attr, int n) n 1911 drivers/s390/block/dasd.c struct list_head *l, *n; n 1920 drivers/s390/block/dasd.c list_for_each_safe(l, n, &device->ccw_queue) { n 1936 drivers/s390/block/dasd.c struct list_head *l, *n; n 1940 drivers/s390/block/dasd.c list_for_each_safe(l, n, &device->ccw_queue) { n 1990 drivers/s390/block/dasd.c struct list_head *l, *n; n 1994 drivers/s390/block/dasd.c list_for_each_safe(l, n, final_queue) { n 2138 drivers/s390/block/dasd.c struct dasd_ccw_req *cqr, *n; n 2145 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { n 2175 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, &flush_queue, devlist) n 2453 drivers/s390/block/dasd.c struct dasd_ccw_req *cqr, *n; n 2458 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { n 2499 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { n 2817 drivers/s390/block/dasd.c struct list_head *l, *n; n 2825 drivers/s390/block/dasd.c list_for_each_safe(l, n, &block->ccw_queue) { n 2936 drivers/s390/block/dasd.c struct list_head *l, *n; n 2948 drivers/s390/block/dasd.c list_for_each_safe(l, n, &final_queue) { n 2998 drivers/s390/block/dasd.c struct dasd_ccw_req *cqr, *n; n 3007 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { n 3028 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { n 3963 drivers/s390/block/dasd.c struct dasd_ccw_req *cqr, *n; n 3970 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { n 3988 drivers/s390/block/dasd.c list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { n 479 drivers/s390/block/dasd_devmap.c struct dasd_devmap *devmap, *n; n 484 drivers/s390/block/dasd_devmap.c list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) { n 2904 drivers/s390/block/dasd_eckd.c struct dasd_ccw_req *cqr, *n; n 2973 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { n 3751 drivers/s390/block/dasd_eckd.c struct dasd_ccw_req *cqr, *n; n 3798 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) { n 6303 drivers/s390/block/dasd_eckd.c struct dasd_device *dev, *n; n 6309 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &private->lcu->active_devices, n 6319 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, n 6331 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &pavgroup->baselist, n 6341 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, n 6364 drivers/s390/block/dasd_eckd.c struct dasd_device *dev, *n; n 6372 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, n 6382 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, n 6396 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, n 6406 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, n 6459 drivers/s390/block/dasd_eckd.c struct dasd_device *dev, *n; n 6463 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &private->lcu->active_devices, n 6468 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, n 6477 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &pavgroup->baselist, n 6482 drivers/s390/block/dasd_eckd.c list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, n 150 drivers/s390/block/dasd_ioctl.c struct dasd_ccw_req *cqr, *n; n 162 drivers/s390/block/dasd_ioctl.c list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { n 145 drivers/s390/char/con3270.c struct string *s, *n; n 152 drivers/s390/char/con3270.c list_for_each_entry_safe(s, n, &cp->update, update) n 171 drivers/s390/char/con3270.c struct string *s, *n; n 176 drivers/s390/char/con3270.c list_for_each_entry_safe(s, n, &cp->lines, list) { n 215 drivers/s390/char/con3270.c struct string *s, *n; n 264 drivers/s390/char/con3270.c list_for_each_entry_safe(s, n, &cp->update, update) { n 969 drivers/s390/char/tape_34xx.c struct list_head * n; n 975 drivers/s390/char/tape_34xx.c list_for_each_safe(l, n, sbid_list) { n 642 drivers/s390/char/tape_core.c struct list_head * l, *n; n 644 drivers/s390/char/tape_core.c list_for_each_safe(l, n, &device->req_queue) { n 806 drivers/s390/char/tape_core.c struct list_head *l, *n; n 815 drivers/s390/char/tape_core.c list_for_each_safe(l, n, &device->req_queue) { n 44 drivers/s390/char/tape_proc.c unsigned long n; n 46 drivers/s390/char/tape_proc.c n = (unsigned long) v - 1; n 47 drivers/s390/char/tape_proc.c if (!n) { n 51 drivers/s390/char/tape_proc.c device = tape_find_device(n); n 55 drivers/s390/char/tape_proc.c seq_printf(m, "%d\t", (int) n); n 247 drivers/s390/char/tty3270.c struct string *s, *n; n 254 drivers/s390/char/tty3270.c list_for_each_entry_safe(s, n, &tp->update, update) n 278 drivers/s390/char/tty3270.c struct string *s, *n; n 283 drivers/s390/char/tty3270.c list_for_each_entry_safe(s, n, &tp->lines, list) { n 329 drivers/s390/char/tty3270.c struct string *s, *n; n 335 drivers/s390/char/tty3270.c list_for_each_entry_safe(s, n, &tp->lines, list) { n 371 drivers/s390/char/tty3270.c struct string *s, *n; n 419 drivers/s390/char/tty3270.c list_for_each_entry_safe(s, n, &tp->update, update) { n 1114 drivers/s390/char/tty3270.c struct string *s, *n; n 1153 drivers/s390/char/tty3270.c n = tty3270_alloc_string(tp, flen); n 1154 drivers/s390/char/tty3270.c list_add(&n->list, &s->list); n 1159 drivers/s390/char/tty3270.c s = n; n 1259 drivers/s390/char/tty3270.c tty3270_insert_characters(struct tty3270 *tp, int n) n 1271 drivers/s390/char/tty3270.c if (n > tp->view.cols - tp->cx) n 1272 drivers/s390/char/tty3270.c n = tp->view.cols - tp->cx; n 1273 drivers/s390/char/tty3270.c k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n); n 1275 drivers/s390/char/tty3270.c line->cells[tp->cx + n + k] = line->cells[tp->cx + k]; n 1276 drivers/s390/char/tty3270.c line->len += n; n 1279 drivers/s390/char/tty3270.c while (n-- > 0) { n 1280 drivers/s390/char/tty3270.c line->cells[tp->cx + n].character = tp->view.ascebc[' ']; n 1281 drivers/s390/char/tty3270.c line->cells[tp->cx + n].highlight = tp->highlight; n 1282 drivers/s390/char/tty3270.c line->cells[tp->cx + n].f_color = tp->f_color; n 1290 drivers/s390/char/tty3270.c tty3270_delete_characters(struct tty3270 *tp, int n) n 1298 drivers/s390/char/tty3270.c if (line->len - tp->cx <= n) { n 1302 drivers/s390/char/tty3270.c for (i = tp->cx; i + n < line->len; i++) n 1303 drivers/s390/char/tty3270.c line->cells[i] = line->cells[i + n]; n 1304 drivers/s390/char/tty3270.c line->len -= n; n 1311 drivers/s390/char/tty3270.c tty3270_erase_characters(struct tty3270 *tp, int n) n 1317 drivers/s390/char/tty3270.c while (line->len > tp->cx && n-- > 0) { n 1323 drivers/s390/char/tty3270.c tp->cx += n; n 1362 drivers/s390/cio/chsc.c brinfo_area->n = (cnc != 0); n 236 drivers/s390/cio/chsc.h u32 n:1; n 122 drivers/s390/cio/cio.h #define to_subchannel(n) container_of(n, struct subchannel, dev) n 99 drivers/s390/cio/css.h #define to_cssdriver(n) container_of(n, struct css_driver, drv) n 20 drivers/s390/cio/eadm_sch.h #define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev)) n 21 drivers/s390/cio/eadm_sch.h #define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p)) n 28 drivers/s390/cio/io_sch.h #define to_io_private(n) ((struct io_subchannel_private *) \ n 29 drivers/s390/cio/io_sch.h dev_get_drvdata(&(n)->dev)) n 30 drivers/s390/cio/io_sch.h #define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p)) n 376 drivers/s390/cio/qdio_setup.c int n; n 378 drivers/s390/cio/qdio_setup.c for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { n 379 drivers/s390/cio/qdio_setup.c struct qaob *aob = q->u.out.aobs[n]; n 382 drivers/s390/cio/qdio_setup.c q->u.out.aobs[n] = NULL; n 20 drivers/s390/cio/scm.c #define to_scm_dev(n) container_of(n, struct scm_device, dev) n 194 drivers/s390/cio/vfio_ccw_cp.c unsigned long n) n 201 drivers/s390/cio/vfio_ccw_cp.c ret = pfn_array_alloc(&pa, iova, n); n 211 drivers/s390/cio/vfio_ccw_cp.c l = n; n 221 drivers/s390/cio/vfio_ccw_cp.c memcpy(to + (n - l), (void *)from, m); n 905 drivers/s390/crypto/ap_bus.c int i, n, b; n 918 drivers/s390/crypto/ap_bus.c for (n = 0; n < 4; n++) n 919 drivers/s390/crypto/ap_bus.c if (b & (0x08 >> n)) n 920 drivers/s390/crypto/ap_bus.c set_bit_inv(i + n, bitmap); n 934 drivers/s390/crypto/vfio_ap_ops.c int n; n 942 drivers/s390/crypto/vfio_ap_ops.c n = sprintf(bufpos, "%04lx\n", id); n 943 drivers/s390/crypto/vfio_ap_ops.c bufpos += n; n 944 drivers/s390/crypto/vfio_ap_ops.c nchars += n; n 965 drivers/s390/crypto/vfio_ap_ops.c int n; n 976 drivers/s390/crypto/vfio_ap_ops.c n = sprintf(bufpos, "%02lx.%04lx\n", apid, n 978 drivers/s390/crypto/vfio_ap_ops.c bufpos += n; n 979 drivers/s390/crypto/vfio_ap_ops.c nchars += n; n 984 drivers/s390/crypto/vfio_ap_ops.c n = sprintf(bufpos, "%02lx.\n", apid); n 985 drivers/s390/crypto/vfio_ap_ops.c bufpos += n; n 986 drivers/s390/crypto/vfio_ap_ops.c nchars += n; n 990 drivers/s390/crypto/vfio_ap_ops.c n = sprintf(bufpos, ".%04lx\n", apqi); n 991 drivers/s390/crypto/vfio_ap_ops.c bufpos += n; n 992 drivers/s390/crypto/vfio_ap_ops.c nchars += n; n 918 drivers/s390/crypto/zcrypt_ccamisc.c int rc, n; n 991 drivers/s390/crypto/zcrypt_ccamisc.c n = complete ? 0 : (clr_key_bit_size + 7) / 8; n 992 drivers/s390/crypto/zcrypt_ccamisc.c preq_vud_block->len = sizeof(struct vud_block) + n; n 996 drivers/s390/crypto/zcrypt_ccamisc.c preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n; n 999 drivers/s390/crypto/zcrypt_ccamisc.c memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n); n 1005 drivers/s390/crypto/zcrypt_ccamisc.c n = *key_token_size; n 1006 drivers/s390/crypto/zcrypt_ccamisc.c preq_key_block->len = sizeof(struct key_block) + n; n 1007 drivers/s390/crypto/zcrypt_ccamisc.c preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n; n 1680 drivers/s390/crypto/zcrypt_ccamisc.c int i, n, card, dom, curmatch, oldmatch, rc = 0; n 1696 drivers/s390/crypto/zcrypt_ccamisc.c n = 0; n 1735 drivers/s390/crypto/zcrypt_ccamisc.c if (*apqns && n < *nr_apqns) n 1736 drivers/s390/crypto/zcrypt_ccamisc.c (*apqns)[n] = (((u16)card) << 16) | ((u16) dom); n 1737 drivers/s390/crypto/zcrypt_ccamisc.c n++; n 1743 drivers/s390/crypto/zcrypt_ccamisc.c if (!n) { n 1747 drivers/s390/crypto/zcrypt_ccamisc.c *nr_apqns = n; n 1749 drivers/s390/crypto/zcrypt_ccamisc.c *apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL); n 109 drivers/s390/crypto/zcrypt_cex4.c int n = 0; n 122 drivers/s390/crypto/zcrypt_cex4.c n = snprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", n 125 drivers/s390/crypto/zcrypt_cex4.c n = snprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); n 128 drivers/s390/crypto/zcrypt_cex4.c n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: %s 0x%016llx\n", n 131 drivers/s390/crypto/zcrypt_cex4.c n += snprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n"); n 134 drivers/s390/crypto/zcrypt_cex4.c n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: %s 0x%016llx\n", n 137 drivers/s390/crypto/zcrypt_cex4.c n += snprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n"); n 139 drivers/s390/crypto/zcrypt_cex4.c return n; n 70 drivers/s390/net/ctcm_dbug.h static inline const char *strtail(const char *s, int n) n 73 drivers/s390/net/ctcm_dbug.h return (l > n) ? s + (l - n) : s; n 794 drivers/s390/net/lcs.c struct list_head *l, *n; n 799 drivers/s390/net/lcs.c list_for_each_safe(l, n, &card->lancmd_waiters) { n 541 drivers/s390/net/qeth_core.h #define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi) n 373 drivers/s390/net/qeth_core_main.c enum iucv_tx_notify n; n 377 drivers/s390/net/qeth_core_main.c n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; n 383 drivers/s390/net/qeth_core_main.c n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : n 387 drivers/s390/net/qeth_core_main.c n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : n 392 drivers/s390/net/qeth_core_main.c return n; n 1874 drivers/s390/net/qeth_l3_main.c struct neighbour *n = NULL; n 1877 drivers/s390/net/qeth_l3_main.c n = dst_neigh_lookup_skb(dst, skb); n 1879 drivers/s390/net/qeth_l3_main.c if (n) { n 1880 drivers/s390/net/qeth_l3_main.c int cast_type = n->type; n 1882 drivers/s390/net/qeth_l3_main.c neigh_release(n); n 2096 drivers/s390/net/qeth_l3_main.c static int qeth_l3_neigh_setup_noarp(struct neighbour *n) n 2098 drivers/s390/net/qeth_l3_main.c n->nud_state = NUD_NOARP; n 2099 drivers/s390/net/qeth_l3_main.c memcpy(n->ha, "FAKELL", 6); n 2100 drivers/s390/net/qeth_l3_main.c n->output = n->ops->connected_output; n 100 drivers/s390/net/smsgiucv_app.c struct smsg_app_event *p, *n; n 111 drivers/s390/net/smsgiucv_app.c list_for_each_entry_safe(p, n, &event_queue, list) { n 495 drivers/s390/virtio/virtio_ccw.c struct virtqueue *vq, *n; n 505 drivers/s390/virtio/virtio_ccw.c list_for_each_entry_safe(vq, n, &vdev->vqs, list) n 92 drivers/sbus/char/openprom.c int n, bufsize; n 103 drivers/sbus/char/openprom.c n = bufsize = 0; n 104 drivers/sbus/char/openprom.c while ((n < 2) && (bufsize < OPROMMAXPARAM)) { n 110 drivers/sbus/char/openprom.c n++; n 113 drivers/sbus/char/openprom.c if (!n) { n 363 drivers/sbus/char/openprom.c static struct device_node *get_node(phandle n, DATA *data) n 365 drivers/sbus/char/openprom.c struct device_node *dp = of_find_node_by_phandle(n); n 202 drivers/scsi/NCR5380.c unsigned long n = hostdata->poll_loops; n 211 drivers/scsi/NCR5380.c } while (n--); n 1144 drivers/scsi/aic7xxx/aic79xx_osm.c int i, n; n 1176 drivers/scsi/aic7xxx/aic79xx_osm.c n = 0; n 1183 drivers/scsi/aic7xxx/aic79xx_osm.c n = strlen(options[i].name); n 1184 drivers/scsi/aic7xxx/aic79xx_osm.c if (strncmp(options[i].name, p, n) == 0) n 1190 drivers/scsi/aic7xxx/aic79xx_osm.c if (strncmp(p, "global_tag_depth", n) == 0) { n 1191 drivers/scsi/aic7xxx/aic79xx_osm.c ahd_linux_setup_tag_info_global(p + n); n 1192 drivers/scsi/aic7xxx/aic79xx_osm.c } else if (strncmp(p, "tag_info", n) == 0) { n 1193 drivers/scsi/aic7xxx/aic79xx_osm.c s = ahd_parse_brace_option("tag_info", p + n, end, n 1195 drivers/scsi/aic7xxx/aic79xx_osm.c } else if (strncmp(p, "slewrate", n) == 0) { n 1197 drivers/scsi/aic7xxx/aic79xx_osm.c p + n, end, 1, ahd_linux_setup_iocell_info, n 1199 drivers/scsi/aic7xxx/aic79xx_osm.c } else if (strncmp(p, "precomp", n) == 0) { n 1201 drivers/scsi/aic7xxx/aic79xx_osm.c p + n, end, 1, ahd_linux_setup_iocell_info, n 1203 drivers/scsi/aic7xxx/aic79xx_osm.c } else if (strncmp(p, "amplitude", n) == 0) { n 1205 drivers/scsi/aic7xxx/aic79xx_osm.c p + n, end, 1, ahd_linux_setup_iocell_info, n 1207 drivers/scsi/aic7xxx/aic79xx_osm.c } else if (p[n] == ':') { n 1208 drivers/scsi/aic7xxx/aic79xx_osm.c *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); n 1209 drivers/scsi/aic7xxx/aic79xx_osm.c } else if (!strncmp(p, "verbose", n)) { n 1022 drivers/scsi/aic7xxx/aic7xxx_osm.c int i, n; n 1051 drivers/scsi/aic7xxx/aic7xxx_osm.c n = 0; n 1058 drivers/scsi/aic7xxx/aic7xxx_osm.c n = strlen(options[i].name); n 1059 drivers/scsi/aic7xxx/aic7xxx_osm.c if (strncmp(options[i].name, p, n) == 0) n 1065 drivers/scsi/aic7xxx/aic7xxx_osm.c if (strncmp(p, "global_tag_depth", n) == 0) { n 1066 drivers/scsi/aic7xxx/aic7xxx_osm.c ahc_linux_setup_tag_info_global(p + n); n 1067 drivers/scsi/aic7xxx/aic7xxx_osm.c } else if (strncmp(p, "tag_info", n) == 0) { n 1068 drivers/scsi/aic7xxx/aic7xxx_osm.c s = ahc_parse_brace_option("tag_info", p + n, end, n 1070 drivers/scsi/aic7xxx/aic7xxx_osm.c } else if (p[n] == ':') { n 1071 drivers/scsi/aic7xxx/aic7xxx_osm.c *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); n 1072 drivers/scsi/aic7xxx/aic7xxx_osm.c } else if (strncmp(p, "verbose", n) == 0) { n 346 drivers/scsi/aic94xx/aic94xx_hwi.h struct list_head *n, *pos; n 349 drivers/scsi/aic94xx/aic94xx_hwi.h list_for_each_safe(pos, n, &list) { n 846 drivers/scsi/aic94xx/aic94xx_init.c struct list_head *n, *pos; n 856 drivers/scsi/aic94xx/aic94xx_init.c list_for_each_safe(pos, n, &pending) { n 970 drivers/scsi/atp870u.c unsigned long n; n 1065 drivers/scsi/atp870u.c for (n = 0; n < 0x30000; n++) n 1068 drivers/scsi/atp870u.c if (n < 0x30000) n 1069 drivers/scsi/atp870u.c for (n = 0; n < 0x30000; n++) n 1308 drivers/scsi/atp870u.c unsigned int n; n 1324 drivers/scsi/atp870u.c n = 0x3f09; n 1325 drivers/scsi/atp870u.c while (n < 0x4000) { n 1327 drivers/scsi/atp870u.c atp_writew_base(atpdev, 0x34, n); n 1328 drivers/scsi/atp870u.c n += 0x0002; n 1336 drivers/scsi/atp870u.c atp_writew_base(atpdev, 0x34, n); n 1337 drivers/scsi/atp870u.c n += 0x0002; n 1342 drivers/scsi/atp870u.c atp_writew_base(atpdev, 0x34, n); n 1343 drivers/scsi/atp870u.c n += 0x0002; n 1348 drivers/scsi/atp870u.c atp_writew_base(atpdev, 0x34, n); n 1349 drivers/scsi/atp870u.c n += 0x0002; n 1354 drivers/scsi/atp870u.c n += 0x0018; n 1360 drivers/scsi/atp870u.c n = 1 << k; n 1362 drivers/scsi/atp870u.c atpdev->ultra_map[0] |= n; n 1365 drivers/scsi/atp870u.c atpdev->async[0] |= n; n 1393 drivers/scsi/atp870u.c unsigned int n; n 1407 drivers/scsi/atp870u.c n = 0x1f80; n 1408 drivers/scsi/atp870u.c while (n < 0x2000) { n 1409 drivers/scsi/atp870u.c atp_writew_base(atpdev, 0x3c, n); n 1415 drivers/scsi/atp870u.c atp_writew_base(atpdev, 0x3c, n++); n 1419 drivers/scsi/atp870u.c atp_writew_base(atpdev, 0x3c, n++); n 1422 drivers/scsi/atp870u.c n += 8; n 1431 drivers/scsi/atp870u.c n = 1 << k; n 1433 drivers/scsi/atp870u.c atpdev->ultra_map[c] |= n; n 1436 drivers/scsi/atp870u.c atpdev->async[c] |= n; n 1714 drivers/scsi/atp870u.c unsigned char i, j, k, rmb, n; n 1846 drivers/scsi/atp870u.c n = mbuf[7]; n 2112 drivers/scsi/atp870u.c if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { n 72 drivers/scsi/bfa/bfa_fcpim.h bfa_ioim_get_index(u32 n) { n 74 drivers/scsi/bfa/bfa_fcpim.h if (n >= (1UL)<<22) n 76 drivers/scsi/bfa/bfa_fcpim.h n >>= 8; n 77 drivers/scsi/bfa/bfa_fcpim.h if (n >= (1UL)<<16) { n 78 drivers/scsi/bfa/bfa_fcpim.h n >>= 16; n 81 drivers/scsi/bfa/bfa_fcpim.h if (n >= 1 << 8) { n 82 drivers/scsi/bfa/bfa_fcpim.h n >>= 8; n 85 drivers/scsi/bfa/bfa_fcpim.h if (n >= 1 << 4) { n 86 drivers/scsi/bfa/bfa_fcpim.h n >>= 4; n 89 drivers/scsi/bfa/bfa_fcpim.h if (n >= 1 << 2) { n 90 drivers/scsi/bfa/bfa_fcpim.h n >>= 2; n 93 drivers/scsi/bfa/bfa_fcpim.h if (n >= 1 << 1) n 96 drivers/scsi/bfa/bfa_fcpim.h return (n == 0) ? (0) : pos; n 6976 drivers/scsi/bfa/bfa_ioc.c u32 n = FLASH_BLOCKING_OP_MAX; n 6979 drivers/scsi/bfa/bfa_ioc.c if (--n <= 0) n 6996 drivers/scsi/bfa/bfa_ioc.c u32 n; n 7009 drivers/scsi/bfa/bfa_ioc.c n = s / fifo_sz; n 7010 drivers/scsi/bfa/bfa_ioc.c l = (n + 1) * fifo_sz - s; n 7021 drivers/scsi/bfa/bfa_ioc.c n = BFA_FLASH_BLOCKING_OP_MAX; n 7023 drivers/scsi/bfa/bfa_ioc.c if (--n <= 0) { n 528 drivers/scsi/csiostor/csio_hw.c uint32_t n, const uint8_t *data) n 534 drivers/scsi/csiostor/csio_hw.c if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE) n 547 drivers/scsi/csiostor/csio_hw.c for (left = n; left; left -= c) { n 567 drivers/scsi/csiostor/csio_hw.c if (memcmp(data - n, (uint8_t *)buf + offset, n)) { n 3823 drivers/scsi/csiostor/csio_hw.c int n, ret = 0; n 3854 drivers/scsi/csiostor/csio_hw.c for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) { n 3856 drivers/scsi/csiostor/csio_hw.c fl_sg->flbufs[n].vaddr, n 3857 drivers/scsi/csiostor/csio_hw.c fl_sg->flbufs[n].len); n 3858 drivers/scsi/csiostor/csio_hw.c off += fl_sg->flbufs[n].len; n 115 drivers/scsi/csiostor/csio_init.h struct list_head *reqlist, int n) n 120 drivers/scsi/csiostor/csio_init.h csio_put_scsi_ioreq_list(scsim, reqlist, n); n 127 drivers/scsi/csiostor/csio_init.h struct list_head *reqlist, int n) n 132 drivers/scsi/csiostor/csio_init.h csio_put_scsi_ddp_list(scsim, reqlist, n); n 499 drivers/scsi/csiostor/csio_isr.c int i, j, k, n, min, cnt; n 540 drivers/scsi/csiostor/csio_isr.c n = (j % info->max_cpus) + k; n 541 drivers/scsi/csiostor/csio_isr.c hw->sqset[i][j].intr_idx = n; n 2318 drivers/scsi/csiostor/csio_scsi.c int n = 0; n 2334 drivers/scsi/csiostor/csio_scsi.c for (n = 0; n < num_buf; n++) { n 215 drivers/scsi/csiostor/csio_scsi.h int n) n 218 drivers/scsi/csiostor/csio_scsi.h scm->stats.n_free_ioreq += n; n 245 drivers/scsi/csiostor/csio_scsi.h int n) n 248 drivers/scsi/csiostor/csio_scsi.h scm->stats.n_free_ddp += n; n 123 drivers/scsi/csiostor/csio_wr.c int n = flq->credits; n 125 drivers/scsi/csiostor/csio_wr.c while (n--) { n 130 drivers/scsi/csiostor/csio_wr.c csio_err(hw, "Could only fill %d buffers!\n", n + 1); n 151 drivers/scsi/csiostor/csio_wr.c csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n) n 154 drivers/scsi/csiostor/csio_wr.c flq->inc_idx += n; n 155 drivers/scsi/csiostor/csio_wr.c flq->pidx += n; n 1053 drivers/scsi/csiostor/csio_wr.c int n; n 1073 drivers/scsi/csiostor/csio_wr.c for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) { n 558 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c static inline unsigned int sgl_len(unsigned int n) n 560 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c n--; n 561 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c return (3 * n) / 2 + (n & 1) + 2; n 1691 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c struct neighbour *n = NULL; n 1716 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c n = dst_neigh_lookup(csk->dst, daddr); n 1718 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c if (!n) { n 1723 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c if (!(n->nud_state & NUD_VALID)) n 1724 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c neigh_event_send(n, NULL); n 1739 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); n 1741 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); n 1828 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c neigh_release(n); n 1839 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c if (n) n 1840 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c neigh_release(n); n 609 drivers/scsi/cxgbi/libcxgbi.c struct neighbour *n; n 626 drivers/scsi/cxgbi/libcxgbi.c n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); n 627 drivers/scsi/cxgbi/libcxgbi.c if (!n) { n 631 drivers/scsi/cxgbi/libcxgbi.c ndev = n->dev; n 649 drivers/scsi/cxgbi/libcxgbi.c n->dev->name, ndev->name, mtu); n 688 drivers/scsi/cxgbi/libcxgbi.c neigh_release(n); n 693 drivers/scsi/cxgbi/libcxgbi.c neigh_release(n); n 725 drivers/scsi/cxgbi/libcxgbi.c struct neighbour *n; n 744 drivers/scsi/cxgbi/libcxgbi.c n = dst_neigh_lookup(dst, &daddr6->sin6_addr); n 746 drivers/scsi/cxgbi/libcxgbi.c if (!n) { n 753 drivers/scsi/cxgbi/libcxgbi.c ndev = n->dev; n 813 drivers/scsi/cxgbi/libcxgbi.c neigh_release(n); n 817 drivers/scsi/cxgbi/libcxgbi.c if (n) n 818 drivers/scsi/cxgbi/libcxgbi.c neigh_release(n); n 71 drivers/scsi/cxgbi/libcxgbi.h #define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0) n 396 drivers/scsi/cxgbi/libcxgbi.h int n = 0; n 400 drivers/scsi/cxgbi/libcxgbi.h n += skb->csum; n 403 drivers/scsi/cxgbi/libcxgbi.h return n; n 48 drivers/scsi/esas2r/esas2r_flash.c #define esas2r_nvramcalc_cksum(n) \ n 49 drivers/scsi/esas2r/esas2r_flash.c (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \ n 51 drivers/scsi/esas2r/esas2r_flash.c #define esas2r_nvramcalc_xor_cksum(n) \ n 52 drivers/scsi/esas2r/esas2r_flash.c (esas2r_calc_byte_xor_cksum((u8 *)(n), \ n 1261 drivers/scsi/esas2r/esas2r_flash.c struct esas2r_sas_nvram *n = nvram; n 1272 drivers/scsi/esas2r/esas2r_flash.c if (n == NULL) n 1273 drivers/scsi/esas2r/esas2r_flash.c n = a->nvram; n 1276 drivers/scsi/esas2r/esas2r_flash.c if (n->version > SASNVR_VERSION) { n 1281 drivers/scsi/esas2r/esas2r_flash.c memcpy(&sas_address_bytes[0], n->sas_addr, 8); n 1292 drivers/scsi/esas2r/esas2r_flash.c if (n->spin_up_delay > SASNVR_SPINUP_MAX) n 1293 drivers/scsi/esas2r/esas2r_flash.c n->spin_up_delay = SASNVR_SPINUP_MAX; n 1295 drivers/scsi/esas2r/esas2r_flash.c n->version = SASNVR_VERSION; n 1296 drivers/scsi/esas2r/esas2r_flash.c n->checksum = n->checksum - esas2r_nvramcalc_cksum(n); n 1297 drivers/scsi/esas2r/esas2r_flash.c memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram)); n 1300 drivers/scsi/esas2r/esas2r_flash.c n = a->nvram; n 1306 drivers/scsi/esas2r/esas2r_flash.c esas2r_nvramcalc_xor_cksum(n), n 1316 drivers/scsi/esas2r/esas2r_flash.c a->uncached_phys + (u64)((u8 *)n - a->uncached)); n 1322 drivers/scsi/esas2r/esas2r_flash.c + (u64)((u8 *)n - a->uncached)); n 1332 drivers/scsi/esas2r/esas2r_flash.c struct esas2r_sas_nvram *n = a->nvram; n 1335 drivers/scsi/esas2r/esas2r_flash.c if (n->signature[0] != 'E' n 1336 drivers/scsi/esas2r/esas2r_flash.c || n->signature[1] != 'S' n 1337 drivers/scsi/esas2r/esas2r_flash.c || n->signature[2] != 'A' n 1338 drivers/scsi/esas2r/esas2r_flash.c || n->signature[3] != 'S') { n 1340 drivers/scsi/esas2r/esas2r_flash.c } else if (esas2r_nvramcalc_cksum(n)) { n 1342 drivers/scsi/esas2r/esas2r_flash.c } else if (n->version > SASNVR_VERSION) { n 1364 drivers/scsi/esas2r/esas2r_flash.c struct esas2r_sas_nvram *n = a->nvram; n 1368 drivers/scsi/esas2r/esas2r_flash.c *n = default_sas_nvram; n 1369 drivers/scsi/esas2r/esas2r_flash.c n->sas_addr[3] |= 0x0F; n 1370 drivers/scsi/esas2r/esas2r_flash.c n->sas_addr[4] = HIBYTE(LOWORD(time)); n 1371 drivers/scsi/esas2r/esas2r_flash.c n->sas_addr[5] = LOBYTE(LOWORD(time)); n 1372 drivers/scsi/esas2r/esas2r_flash.c n->sas_addr[6] = a->pcid->bus->number; n 1373 drivers/scsi/esas2r/esas2r_flash.c n->sas_addr[7] = a->pcid->devfn; n 2874 drivers/scsi/esp_scsi.c unsigned int n = ESP_FIFO_SIZE; n 2879 drivers/scsi/esp_scsi.c if (n > esp_count) n 2880 drivers/scsi/esp_scsi.c n = esp_count; n 2881 drivers/scsi/esp_scsi.c writesb(esp->fifo_reg, src, n); n 2882 drivers/scsi/esp_scsi.c src += n; n 2883 drivers/scsi/esp_scsi.c esp_count -= n; n 2902 drivers/scsi/esp_scsi.c n = ESP_FIFO_SIZE - n 2905 drivers/scsi/esp_scsi.c if (n > esp_count) n 2906 drivers/scsi/esp_scsi.c n = esp_count; n 2907 drivers/scsi/esp_scsi.c writesb(esp->fifo_reg, src, n); n 2908 drivers/scsi/esp_scsi.c src += n; n 2909 drivers/scsi/esp_scsi.c esp_count -= n; n 237 drivers/scsi/fnic/fnic_isr.c unsigned int n = ARRAY_SIZE(fnic->rq); n 250 drivers/scsi/fnic/fnic_isr.c if (fnic->rq_count >= n && n 253 drivers/scsi/fnic/fnic_isr.c fnic->cq_count >= n + m + o) { n 254 drivers/scsi/fnic/fnic_isr.c int vecs = n + m + o + 1; n 258 drivers/scsi/fnic/fnic_isr.c fnic->rq_count = n; n 262 drivers/scsi/fnic/fnic_isr.c fnic->cq_count = n + m + o; n 1272 drivers/scsi/hpsa.c int n = h->ndevices; n 1277 drivers/scsi/hpsa.c if (n >= HPSA_MAX_DEVICES) { n 1309 drivers/scsi/hpsa.c for (i = 0; i < n; i++) { n 1331 drivers/scsi/hpsa.c h->dev[n] = device; n 10026 drivers/scsi/ipr.c int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; n 10029 drivers/scsi/ipr.c snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, n 85 drivers/scsi/ips.h #define MDELAY(n) \ n 87 drivers/scsi/ips.h mdelay(n); \ n 616 drivers/scsi/libsas/sas_ata.c struct domain_device *dev, *n; n 627 drivers/scsi/libsas/sas_ata.c list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { n 644 drivers/scsi/libsas/sas_ata.c struct domain_device *dev, *n; n 646 drivers/scsi/libsas/sas_ata.c list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { n 784 drivers/scsi/libsas/sas_ata.c struct scsi_cmnd *cmd, *n; n 791 drivers/scsi/libsas/sas_ata.c list_for_each_entry_safe(cmd, n, work_q, eh_entry) { n 209 drivers/scsi/libsas/sas_discover.c struct domain_device *dev, *n; n 220 drivers/scsi/libsas/sas_discover.c list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { n 348 drivers/scsi/libsas/sas_discover.c struct domain_device *dev, *n; n 350 drivers/scsi/libsas/sas_discover.c list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) { n 388 drivers/scsi/libsas/sas_discover.c struct domain_device *dev, *n; n 390 drivers/scsi/libsas/sas_discover.c list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) { n 396 drivers/scsi/libsas/sas_discover.c list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) n 1865 drivers/scsi/libsas/sas_expander.c struct domain_device *child, *n; n 1867 drivers/scsi/libsas/sas_expander.c list_for_each_entry_safe(child, n, &ex->children, siblings) { n 1882 drivers/scsi/libsas/sas_expander.c struct domain_device *child, *n, *found = NULL; n 1884 drivers/scsi/libsas/sas_expander.c list_for_each_entry_safe(child, n, n 234 drivers/scsi/libsas/sas_scsi_host.c struct scsi_cmnd *cmd, *n; n 236 drivers/scsi/libsas/sas_scsi_host.c list_for_each_entry_safe(cmd, n, error_q, eh_entry) { n 246 drivers/scsi/libsas/sas_scsi_host.c struct scsi_cmnd *cmd, *n; n 248 drivers/scsi/libsas/sas_scsi_host.c list_for_each_entry_safe(cmd, n, error_q, eh_entry) { n 259 drivers/scsi/libsas/sas_scsi_host.c struct scsi_cmnd *cmd, *n; n 261 drivers/scsi/libsas/sas_scsi_host.c list_for_each_entry_safe(cmd, n, error_q, eh_entry) { n 558 drivers/scsi/libsas/sas_scsi_host.c struct scsi_cmnd *cmd, *n; n 567 drivers/scsi/libsas/sas_scsi_host.c list_for_each_entry_safe(cmd, n, work_q, eh_entry) { n 584 drivers/scsi/libsas/sas_scsi_host.c list_for_each_entry_safe(cmd, n, work_q, eh_entry) { n 684 drivers/scsi/libsas/sas_scsi_host.c list_for_each_entry_safe(cmd, n, work_q, eh_entry) n 1463 drivers/scsi/lpfc/lpfc_ct.c int n; n 1470 drivers/scsi/lpfc/lpfc_ct.c n = scnprintf(symbol, size, "%d", vport->phba->brd_no); n 1471 drivers/scsi/lpfc/lpfc_ct.c return n; n 11119 drivers/scsi/lpfc/lpfc_init.c unsigned int n; n 11131 drivers/scsi/lpfc/lpfc_init.c n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); n 11132 drivers/scsi/lpfc/lpfc_init.c if (n == cpu) n 226 drivers/scsi/mac_esp.c unsigned int n; n 233 drivers/scsi/mac_esp.c n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW); n 234 drivers/scsi/mac_esp.c addr = start_addr + esp_count - n; n 235 drivers/scsi/mac_esp.c esp_count = n; n 131 drivers/scsi/mac_scsi.c : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) n 158 drivers/scsi/mac_scsi.c : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) n 209 drivers/scsi/mac_scsi.c : "+a" (addr), "+r" (n), "+r" (result) : "a" (io)) n 213 drivers/scsi/mac_scsi.c static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) n 218 drivers/scsi/mac_scsi.c if (n >= 1) { n 223 drivers/scsi/mac_scsi.c if (n >= 1 && ((unsigned long)addr & 1)) { n 228 drivers/scsi/mac_scsi.c while (n >= 32) n 230 drivers/scsi/mac_scsi.c while (n >= 2) n 234 drivers/scsi/mac_scsi.c if (n == 1) n 240 drivers/scsi/mac_scsi.c static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) n 245 drivers/scsi/mac_scsi.c if (n >= 1) { n 250 drivers/scsi/mac_scsi.c if (n >= 1 && ((unsigned long)addr & 1)) { n 255 drivers/scsi/mac_scsi.c while (n >= 32) n 257 drivers/scsi/mac_scsi.c while (n >= 2) n 261 drivers/scsi/mac_scsi.c if (n == 1) n 1649 drivers/scsi/megaraid/megaraid_sas_base.c int i,n; n 1689 drivers/scsi/megaraid/megaraid_sas_base.c for (n = 0; n < sgcount; n++) { n 1692 drivers/scsi/megaraid/megaraid_sas_base.c le32_to_cpu(mfi_sgl->sge64[n].length), n 1693 drivers/scsi/megaraid/megaraid_sas_base.c le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); n 1696 drivers/scsi/megaraid/megaraid_sas_base.c le32_to_cpu(mfi_sgl->sge32[n].length), n 1697 drivers/scsi/megaraid/megaraid_sas_base.c le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); n 827 drivers/scsi/mesh.c int i, n; n 829 drivers/scsi/mesh.c n = mr->fifo_count; n 830 drivers/scsi/mesh.c if (n != 0) { n 832 drivers/scsi/mesh.c ms->n_msgin = i + n; n 833 drivers/scsi/mesh.c for (; n > 0; --n) n 840 drivers/scsi/mesh.c int b, n; n 842 drivers/scsi/mesh.c n = 1; n 847 drivers/scsi/mesh.c n = ms->n_msgin < 2? 2: ms->msgin[1] + 2; n 850 drivers/scsi/mesh.c n = 2; n 853 drivers/scsi/mesh.c return n; n 1448 drivers/scsi/mesh.c int seq, n, t; n 1462 drivers/scsi/mesh.c n = msgin_length(ms); n 1463 drivers/scsi/mesh.c if (ms->n_msgin < n) { n 1464 drivers/scsi/mesh.c out_8(&mr->count_lo, n - ms->n_msgin); n 91 drivers/scsi/mvsas/mv_sas.c unsigned long i = 0, j = 0, n = 0, num = 0; n 107 drivers/scsi/mvsas/mv_sas.c phyno[n] = (j >= mvi->chip->n_phy) ? n 110 drivers/scsi/mvsas/mv_sas.c n++; n 53 drivers/scsi/mvsas/mv_sas.h #define bit(n) ((u64)1 << n) n 330 drivers/scsi/ncr53c8xx.c #define __m_calloc(mp, s, n) __m_calloc2(mp, s, n, MEMO_WARN) n 501 drivers/scsi/ncr53c8xx.c #define _m_calloc_dma(np, s, n) __m_calloc_dma(np->dev, s, n) n 502 drivers/scsi/ncr53c8xx.c #define _m_free_dma(np, p, s, n) __m_free_dma(np->dev, p, s, n) n 503 drivers/scsi/ncr53c8xx.c #define m_calloc_dma(s, n) _m_calloc_dma(np, s, n) n 504 drivers/scsi/ncr53c8xx.c #define m_free_dma(p, s, n) _m_free_dma(np, p, s, n) n 1059 drivers/scsi/ncr53c8xx.h #define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n)) n 1060 drivers/scsi/ncr53c8xx.h #define SCR_COPY_F(n) (0xc0000000 | (n)) n 1160 drivers/scsi/ncr53c8xx.h #define SCR_LOAD_R(reg, how, n) \ n 1161 drivers/scsi/ncr53c8xx.h (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) n 1163 drivers/scsi/ncr53c8xx.h #define SCR_STORE_R(reg, how, n) \ n 1164 drivers/scsi/ncr53c8xx.h (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) n 1166 drivers/scsi/ncr53c8xx.h #define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n) n 1167 drivers/scsi/ncr53c8xx.h #define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n) n 1168 drivers/scsi/ncr53c8xx.h #define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n) n 1169 drivers/scsi/ncr53c8xx.h #define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n) n 1171 drivers/scsi/ncr53c8xx.h #define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n) n 1172 drivers/scsi/ncr53c8xx.h #define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n) n 1173 drivers/scsi/ncr53c8xx.h #define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n) n 1174 drivers/scsi/ncr53c8xx.h #define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n) n 39 drivers/scsi/pmcraid.h #define PMC_BIT8(n) (1 << (7-n)) n 40 drivers/scsi/pmcraid.h #define PMC_BIT16(n) (1 << (15-n)) n 41 drivers/scsi/pmcraid.h #define PMC_BIT32(n) (1 << (31-n)) n 1054 drivers/scsi/pmcraid.h #define DRV_IOCTL(n, size) \ n 1055 drivers/scsi/pmcraid.h _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) n 1057 drivers/scsi/pmcraid.h #define FMW_IOCTL(n, size) \ n 1058 drivers/scsi/pmcraid.h _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) n 685 drivers/scsi/qla2xxx/qla_init.c u16 i, n, found = 0, loop_id; n 722 drivers/scsi/qla2xxx/qla_init.c n = ea->data[0] / sizeof(struct get_name_list_extended); n 726 drivers/scsi/qla2xxx/qla_init.c __func__, __LINE__, fcport->port_name, n, n 730 drivers/scsi/qla2xxx/qla_init.c for (i = 0; i < n; i++) { n 893 drivers/scsi/qla2xxx/qla_init.c for (i = 0; i < n; i++) { n 970 drivers/scsi/qla2xxx/qla_init.c u16 i, n = 0, loop_id; n 992 drivers/scsi/qla2xxx/qla_init.c n = sp->u.iocb_cmd.u.mbx.in_mb[1] / n 997 drivers/scsi/qla2xxx/qla_init.c for (i = 0; i < n; i++) { n 1031 drivers/scsi/qla2xxx/qla_init.c for (i = 0; i < n; i++) { n 2862 drivers/scsi/qla2xxx/qla_init.c ulong n; n 2873 drivers/scsi/qla2xxx/qla_init.c n = timeout_msec / delta_msec; n 2874 drivers/scsi/qla2xxx/qla_init.c while (n--) { n 2893 drivers/scsi/qla2xxx/qla_init.c n = timeout_msec / delta_msec; n 2894 drivers/scsi/qla2xxx/qla_init.c while (n--) { n 7375 drivers/scsi/qla2xxx/qla_init.c uint n = sizeof(*image_status) / sizeof(*p); n 7378 drivers/scsi/qla2xxx/qla_init.c for ( ; n--; p++) n 15 drivers/scsi/qla2xxx/qla_nx.c #define MASK(n) ((1ULL<<(n))-1) n 1148 drivers/scsi/qla2xxx/qla_nx.c unsigned offset, n; n 1216 drivers/scsi/qla2xxx/qla_nx.c if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || n 1217 drivers/scsi/qla2xxx/qla_nx.c qla82xx_rom_fast_read(ha, 4, &n) != 0) { n 1219 drivers/scsi/qla2xxx/qla_nx.c "Error Reading crb_init area: n: %08x.\n", n); n 1226 drivers/scsi/qla2xxx/qla_nx.c offset = n & 0xffffU; n 1227 drivers/scsi/qla2xxx/qla_nx.c n = (n >> 16) & 0xffffU; n 1230 drivers/scsi/qla2xxx/qla_nx.c if (n >= 1024) { n 1232 drivers/scsi/qla2xxx/qla_nx.c "Card flash not initialized:n=0x%x.\n", n); n 1237 drivers/scsi/qla2xxx/qla_nx.c "%d CRB init values found in ROM.\n", n); n 1239 drivers/scsi/qla2xxx/qla_nx.c buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL); n 1246 drivers/scsi/qla2xxx/qla_nx.c for (i = 0; i < n; i++) { n 1257 drivers/scsi/qla2xxx/qla_nx.c for (i = 0; i < n; i++) { n 1174 drivers/scsi/qla2xxx/qla_target.c struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; n 1177 drivers/scsi/qla2xxx/qla_target.c loop_id = le16_to_cpu(n->u.isp24.nport_handle); n 179 drivers/scsi/qla4xxx/ql4_def.h #define SET_BITVAL(o, n, v) { \ n 181 drivers/scsi/qla4xxx/ql4_def.h n |= v; \ n 183 drivers/scsi/qla4xxx/ql4_def.h n &= ~v; \ n 18 drivers/scsi/qla4xxx/ql4_nx.c #define MASK(n) DMA_BIT_MASK(n) n 988 drivers/scsi/qla4xxx/ql4_nx.c unsigned offset, n; n 1056 drivers/scsi/qla4xxx/ql4_nx.c if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || n 1057 drivers/scsi/qla4xxx/ql4_nx.c qla4_82xx_rom_fast_read(ha, 4, &n) != 0) { n 1059 drivers/scsi/qla4xxx/ql4_nx.c "[ERROR] Reading crb_init area: n: %08x\n", n); n 1066 drivers/scsi/qla4xxx/ql4_nx.c offset = n & 0xffffU; n 1067 drivers/scsi/qla4xxx/ql4_nx.c n = (n >> 16) & 0xffffU; n 1070 drivers/scsi/qla4xxx/ql4_nx.c if (n >= 1024) { n 1073 drivers/scsi/qla4xxx/ql4_nx.c DRIVER_NAME, __func__, n); n 1078 drivers/scsi/qla4xxx/ql4_nx.c "%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n); n 1080 drivers/scsi/qla4xxx/ql4_nx.c buf = kmalloc_array(n, sizeof(struct crb_addr_pair), GFP_KERNEL); n 1087 drivers/scsi/qla4xxx/ql4_nx.c for (i = 0; i < n; i++) { n 1099 drivers/scsi/qla4xxx/ql4_nx.c for (i = 0; i < n; i++) { n 903 drivers/scsi/qlogicpti.c int i, n; n 917 drivers/scsi/qlogicpti.c n = sg_count; n 918 drivers/scsi/qlogicpti.c if (n > 4) n 919 drivers/scsi/qlogicpti.c n = 4; n 920 drivers/scsi/qlogicpti.c for_each_sg(sg, s, n, i) { n 941 drivers/scsi/qlogicpti.c n = sg_count; n 942 drivers/scsi/qlogicpti.c if (n > 7) n 943 drivers/scsi/qlogicpti.c n = 7; n 944 drivers/scsi/qlogicpti.c for_each_sg(sg, s, n, i) { n 948 drivers/scsi/qlogicpti.c sg_count -= n; n 1028 drivers/scsi/scsi_debug.c int act_len, n; n 1042 drivers/scsi/scsi_debug.c n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len); n 1043 drivers/scsi/scsi_debug.c scsi_set_resid(scp, min(scsi_get_resid(scp), n)); n 1394 drivers/scsi/scsi_debug.c int alloc_len, n, ret; n 1430 drivers/scsi/scsi_debug.c n = 4; n 1431 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* this page */ n 1432 drivers/scsi/scsi_debug.c arr[n++] = 0x80; /* unit serial number */ n 1433 drivers/scsi/scsi_debug.c arr[n++] = 0x83; /* device identification */ n 1434 drivers/scsi/scsi_debug.c arr[n++] = 0x84; /* software interface ident. */ n 1435 drivers/scsi/scsi_debug.c arr[n++] = 0x85; /* management network addresses */ n 1436 drivers/scsi/scsi_debug.c arr[n++] = 0x86; /* extended inquiry */ n 1437 drivers/scsi/scsi_debug.c arr[n++] = 0x87; /* mode page policy */ n 1438 drivers/scsi/scsi_debug.c arr[n++] = 0x88; /* SCSI ports */ n 1440 drivers/scsi/scsi_debug.c arr[n++] = 0x89; /* ATA information */ n 1441 drivers/scsi/scsi_debug.c arr[n++] = 0xb0; /* Block limits */ n 1442 drivers/scsi/scsi_debug.c arr[n++] = 0xb1; /* Block characteristics */ n 1443 drivers/scsi/scsi_debug.c arr[n++] = 0xb2; /* Logical Block Prov */ n 1445 drivers/scsi/scsi_debug.c arr[3] = n - 4; /* number of supported VPD pages */ n 1484 drivers/scsi/scsi_debug.c n = inquiry_vpd_89(&arr[4]); n 1485 drivers/scsi/scsi_debug.c put_unaligned_be16(n, arr + 2); n 1525 drivers/scsi/scsi_debug.c n = 62; n 1527 drivers/scsi/scsi_debug.c put_unaligned_be16(0x600, arr + n); n 1528 drivers/scsi/scsi_debug.c n += 2; n 1530 drivers/scsi/scsi_debug.c put_unaligned_be16(0x525, arr + n); n 1531 drivers/scsi/scsi_debug.c n += 2; n 1533 drivers/scsi/scsi_debug.c put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */ n 1701 drivers/scsi/scsi_debug.c int n, ret, alen, rlen; n 1724 drivers/scsi/scsi_debug.c n = 4; n 1726 drivers/scsi/scsi_debug.c arr[n++] = host_no % 3; /* Asymm access state */ n 1727 drivers/scsi/scsi_debug.c arr[n++] = 0x0F; /* claim: all states are supported */ n 1729 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* Active/Optimized path */ n 1730 drivers/scsi/scsi_debug.c arr[n++] = 0x01; /* only support active/optimized paths */ n 1732 drivers/scsi/scsi_debug.c put_unaligned_be16(port_group_a, arr + n); n 1733 drivers/scsi/scsi_debug.c n += 2; n 1734 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Reserved */ n 1735 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Status code */ n 1736 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Vendor unique */ n 1737 drivers/scsi/scsi_debug.c arr[n++] = 0x1; /* One port per group */ n 1738 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Reserved */ n 1739 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Reserved */ n 1740 drivers/scsi/scsi_debug.c put_unaligned_be16(port_a, arr + n); n 1741 drivers/scsi/scsi_debug.c n += 2; n 1742 drivers/scsi/scsi_debug.c arr[n++] = 3; /* Port unavailable */ n 1743 drivers/scsi/scsi_debug.c arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ n 1744 drivers/scsi/scsi_debug.c put_unaligned_be16(port_group_b, arr + n); n 1745 drivers/scsi/scsi_debug.c n += 2; n 1746 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Reserved */ n 1747 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Status code */ n 1748 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Vendor unique */ n 1749 drivers/scsi/scsi_debug.c arr[n++] = 0x1; /* One port per group */ n 1750 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Reserved */ n 1751 drivers/scsi/scsi_debug.c arr[n++] = 0; /* Reserved */ n 1752 drivers/scsi/scsi_debug.c put_unaligned_be16(port_b, arr + n); n 1753 drivers/scsi/scsi_debug.c n += 2; n 1755 drivers/scsi/scsi_debug.c rlen = n - 4; n 1764 drivers/scsi/scsi_debug.c rlen = min(alen,n); n 2383 drivers/scsi/scsi_debug.c int ppc, sp, pcode, subpcode, alloc_len, len, n; n 2401 drivers/scsi/scsi_debug.c n = 4; n 2402 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* this page */ n 2403 drivers/scsi/scsi_debug.c arr[n++] = 0xd; /* Temperature */ n 2404 drivers/scsi/scsi_debug.c arr[n++] = 0x2f; /* Informational exceptions */ n 2405 drivers/scsi/scsi_debug.c arr[3] = n - 4; n 2422 drivers/scsi/scsi_debug.c n = 4; n 2423 drivers/scsi/scsi_debug.c arr[n++] = 0x0; n 2424 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* 0,0 page */ n 2425 drivers/scsi/scsi_debug.c arr[n++] = 0x0; n 2426 drivers/scsi/scsi_debug.c arr[n++] = 0xff; /* this page */ n 2427 drivers/scsi/scsi_debug.c arr[n++] = 0xd; n 2428 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* Temperature */ n 2429 drivers/scsi/scsi_debug.c arr[n++] = 0x2f; n 2430 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* Informational exceptions */ n 2431 drivers/scsi/scsi_debug.c arr[3] = n - 4; n 2434 drivers/scsi/scsi_debug.c n = 4; n 2435 drivers/scsi/scsi_debug.c arr[n++] = 0xd; n 2436 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* Temperature */ n 2437 drivers/scsi/scsi_debug.c arr[3] = n - 4; n 2440 drivers/scsi/scsi_debug.c n = 4; n 2441 drivers/scsi/scsi_debug.c arr[n++] = 0x2f; n 2442 drivers/scsi/scsi_debug.c arr[n++] = 0x0; /* Informational exceptions */ n 2443 drivers/scsi/scsi_debug.c arr[3] = n - 4; n 2795 drivers/scsi/scsi_debug.c int i, j, n; n 2801 drivers/scsi/scsi_debug.c for (j = 0, n = 0; j < 16; j++) { n 2805 drivers/scsi/scsi_debug.c n += scnprintf(b + n, sizeof(b) - n, n 2808 drivers/scsi/scsi_debug.c n += scnprintf(b + n, sizeof(b) - n, n 3618 drivers/scsi/scsi_debug.c int k, j, n, res; n 3681 drivers/scsi/scsi_debug.c n = j * sz_lun; n 3682 drivers/scsi/scsi_debug.c res = p_fill_from_dev_buffer(scp, arr, n, off_rsp); n 3685 drivers/scsi/scsi_debug.c off_rsp += n; n 4734 drivers/scsi/scsi_debug.c int n; n 4736 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 4737 drivers/scsi/scsi_debug.c sdebug_ptype = n; n 4751 drivers/scsi/scsi_debug.c int n; n 4753 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 4754 drivers/scsi/scsi_debug.c sdebug_dsense = n; n 4768 drivers/scsi/scsi_debug.c int n; n 4770 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 4771 drivers/scsi/scsi_debug.c n = (n > 0); n 4773 drivers/scsi/scsi_debug.c if (sdebug_fake_rw != n) { n 4774 drivers/scsi/scsi_debug.c if ((0 == n) && (NULL == fake_storep)) { n 4785 drivers/scsi/scsi_debug.c sdebug_fake_rw = n; n 4800 drivers/scsi/scsi_debug.c int n; n 4802 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 4803 drivers/scsi/scsi_debug.c sdebug_no_lun_0 = n; n 4817 drivers/scsi/scsi_debug.c int n; n 4819 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 4820 drivers/scsi/scsi_debug.c sdebug_num_tgts = n; n 4869 drivers/scsi/scsi_debug.c int n; n 4872 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 4873 drivers/scsi/scsi_debug.c if (n > 256) { n 4877 drivers/scsi/scsi_debug.c changed = (sdebug_max_luns != n); n 4878 drivers/scsi/scsi_debug.c sdebug_max_luns = n; n 4910 drivers/scsi/scsi_debug.c int j, n, k, a; n 4913 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && n 4914 drivers/scsi/scsi_debug.c (n <= SDEBUG_CANQUEUE)) { n 4923 drivers/scsi/scsi_debug.c sdebug_max_queue = n; n 4926 drivers/scsi/scsi_debug.c else if (k >= n) n 4956 drivers/scsi/scsi_debug.c int n; n 4959 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 4960 drivers/scsi/scsi_debug.c changed = (sdebug_virtual_gb != n); n 4961 drivers/scsi/scsi_debug.c sdebug_virtual_gb = n; n 5019 drivers/scsi/scsi_debug.c int n; n 5021 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 5022 drivers/scsi/scsi_debug.c sdebug_vpd_use_hostno = n; n 5036 drivers/scsi/scsi_debug.c int n; n 5038 drivers/scsi/scsi_debug.c if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) { n 5039 drivers/scsi/scsi_debug.c if (n > 0) n 5111 drivers/scsi/scsi_debug.c int n; n 5113 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 5114 drivers/scsi/scsi_debug.c sdebug_removable = (n > 0); n 5129 drivers/scsi/scsi_debug.c int n; n 5131 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 5132 drivers/scsi/scsi_debug.c sdebug_host_lock = (n > 0); n 5146 drivers/scsi/scsi_debug.c int n; n 5148 drivers/scsi/scsi_debug.c if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { n 5149 drivers/scsi/scsi_debug.c sdebug_strict = (n > 0); n 5169 drivers/scsi/scsi_debug.c int ret, n; n 5171 drivers/scsi/scsi_debug.c ret = kstrtoint(buf, 0, &n); n 5174 drivers/scsi/scsi_debug.c sdebug_cdb_len = n; n 5613 drivers/scsi/scsi_debug.c int n, len, sb; n 5620 drivers/scsi/scsi_debug.c for (k = 0, n = 0; k < len && n < sb; ++k) n 5621 drivers/scsi/scsi_debug.c n += scnprintf(b + n, sb - n, "%02x ", n 386 drivers/scsi/scsi_proc.c loff_t n = *pos; n 389 drivers/scsi/scsi_proc.c if (!n--) n 159 drivers/scsi/snic/snic_isr.c unsigned int n = ARRAY_SIZE(snic->wq); n 161 drivers/scsi/snic/snic_isr.c unsigned int vecs = n + m + 1; n 170 drivers/scsi/snic/snic_isr.c if (snic->wq_count < n || snic->cq_count < n + m) n 176 drivers/scsi/snic/snic_isr.c snic->wq_count = n; n 177 drivers/scsi/snic/snic_isr.c snic->cq_count = n + m; n 868 drivers/scsi/sr.c int rc, n; n 910 drivers/scsi/sr.c n = data.header_length + data.block_descriptor_length; n 911 drivers/scsi/sr.c cd->cdi.speed = ((buffer[n + 8] << 8) + buffer[n + 9]) / 176; n 913 drivers/scsi/sr.c cd->readcd_cdda = buffer[n + 5] & 0x01; n 917 drivers/scsi/sr.c ((buffer[n + 14] << 8) + buffer[n + 15]) / 176, n 919 drivers/scsi/sr.c buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */ n 920 drivers/scsi/sr.c buffer[n + 3] & 0x20 ? "dvd-ram " : "", n 921 drivers/scsi/sr.c buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */ n 922 drivers/scsi/sr.c buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */ n 923 drivers/scsi/sr.c buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */ n 924 drivers/scsi/sr.c loadmech[buffer[n + 6] >> 5]); n 925 drivers/scsi/sr.c if ((buffer[n + 6] >> 5) == 0) n 928 drivers/scsi/sr.c if ((buffer[n + 2] & 0x8) == 0) n 931 drivers/scsi/sr.c if ((buffer[n + 3] & 0x20) == 0) n 934 drivers/scsi/sr.c if ((buffer[n + 3] & 0x10) == 0) n 937 drivers/scsi/sr.c if ((buffer[n + 3] & 0x2) == 0) n 940 drivers/scsi/sr.c if ((buffer[n + 3] & 0x1) == 0) n 943 drivers/scsi/sr.c if ((buffer[n + 6] & 0x8) == 0) n 947 drivers/scsi/sr.c if ((buffer[n + 6] >> 5) == mechtype_individual_changer || n 948 drivers/scsi/sr.c (buffer[n + 6] >> 5) == mechtype_cartridge_changer) n 177 drivers/scsi/st.c #define TAPE_MINOR(d, m, n) (((d & ~(255 >> (ST_NBR_MODE_BITS + 1))) << (ST_NBR_MODE_BITS + 1)) | \ n 178 drivers/scsi/st.c (d & (255 >> (ST_NBR_MODE_BITS + 1))) | (m << ST_MODE_SHIFT) | ((n != 0) << 7) ) n 540 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_COPY(n) (0xc0000000 | SCR_NO_FLUSH | (n)) n 541 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_COPY_F(n) (0xc0000000 | (n)) n 643 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_LOAD_R(reg, how, n) \ n 644 drivers/scsi/sym53c8xx_2/sym_defs.h (0xe1000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) n 646 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_STORE_R(reg, how, n) \ n 647 drivers/scsi/sym53c8xx_2/sym_defs.h (0xe0000000 | how | (SCR_REG_OFS2(REG(reg))) | (n)) n 649 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_LOAD_ABS(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2, n) n 650 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_LOAD_REL(reg, n) SCR_LOAD_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2, n) n 651 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_LOAD_ABS_F(reg, n) SCR_LOAD_R(reg, 0, n) n 652 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_LOAD_REL_F(reg, n) SCR_LOAD_R(reg, SCR_DSA_REL2, n) n 654 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_STORE_ABS(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2, n) n 655 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_STORE_REL(reg, n) SCR_STORE_R(reg, SCR_NO_FLUSH2|SCR_DSA_REL2,n) n 656 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_STORE_ABS_F(reg, n) SCR_STORE_R(reg, 0, n) n 657 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_STORE_REL_F(reg, n) SCR_STORE_R(reg, SCR_DSA_REL2, n) n 53 drivers/scsi/sym53c8xx_2/sym_hipd.c static void sym_printl_hex(u_char *p, int n) n 55 drivers/scsi/sym53c8xx_2/sym_hipd.c while (n-- > 0) n 1584 drivers/scsi/sym53c8xx_2/sym_hipd.c int i, n; n 1587 drivers/scsi/sym53c8xx_2/sym_hipd.c n = 0; n 1603 drivers/scsi/sym53c8xx_2/sym_hipd.c ++n; n 1611 drivers/scsi/sym53c8xx_2/sym_hipd.c return n; n 3685 drivers/scsi/sym53c8xx_2/sym_hipd.c int n; n 3689 drivers/scsi/sym53c8xx_2/sym_hipd.c n = dp_ofs + (tmp & 0xffffff); n 3690 drivers/scsi/sym53c8xx_2/sym_hipd.c if (n > 0) { n 3694 drivers/scsi/sym53c8xx_2/sym_hipd.c dp_ofs = n; n 1179 drivers/scsi/sym53c8xx_2/sym_hipd.h #define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n) n 1180 drivers/scsi/sym53c8xx_2/sym_hipd.h #define _sym_mfree_dma(np, p, l, n) \ n 1181 drivers/scsi/sym53c8xx_2/sym_hipd.h __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n) n 1182 drivers/scsi/sym53c8xx_2/sym_hipd.h #define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n) n 1183 drivers/scsi/sym53c8xx_2/sym_hipd.h #define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n) n 161 drivers/scsi/sym53c8xx_2/sym_malloc.c #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN) n 157 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) n 158 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) n 159 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) n 17 drivers/scsi/ufs/ufshcd-dwc.c const struct ufshcd_dme_attr_val *v, int n) n 22 drivers/scsi/ufs/ufshcd-dwc.c for (attr_node = 0; attr_node < n; attr_node++) { n 22 drivers/scsi/ufs/ufshcd-dwc.h const struct ufshcd_dme_attr_val *v, int n); n 33 drivers/scsi/vmw_pvscsi.h #define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ n 113 drivers/sh/intc/handle.c unsigned int fn, n, mode, bit; n 138 drivers/sh/intc/handle.c n = *fld_idx + 1; n 140 drivers/sh/intc/handle.c BUG_ON(n * pr->field_width > pr->reg_width); n 142 drivers/sh/intc/handle.c bit = pr->reg_width - (n * pr->field_width); n 99 drivers/slimbus/qcom-ctrl.c int n; n 137 drivers/slimbus/qcom-ctrl.c if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) { n 143 drivers/slimbus/qcom-ctrl.c ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n; n 157 drivers/slimbus/qcom-ctrl.c ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n; n 310 drivers/slimbus/qcom-ctrl.c if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) { n 317 drivers/slimbus/qcom-ctrl.c ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n; n 433 drivers/slimbus/qcom-ctrl.c ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n; n 537 drivers/slimbus/qcom-ctrl.c ctrl->tx.n = QCOM_TX_MSGS; n 539 drivers/slimbus/qcom-ctrl.c ctrl->rx.n = QCOM_RX_MSGS; n 578 drivers/slimbus/qcom-ctrl.c ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz, n 585 drivers/slimbus/qcom-ctrl.c ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz, n 48 drivers/soc/fsl/dpio/qbman-portal.c #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) n 49 drivers/soc/fsl/dpio/qbman-portal.c #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6)) n 50 drivers/soc/fsl/dpio/qbman-portal.c #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6)) n 56 drivers/soc/fsl/dpio/qbman-portal.c #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6)) n 57 drivers/soc/fsl/dpio/qbman-portal.c #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6)) n 44 drivers/soc/fsl/qbman/bman.c #define BM_REG_SCN(n) (0x3400 + ((n) << 6)) n 64 drivers/soc/fsl/qbman/bman.c #define BM_REG_SCN(n) (0x0200 + ((n) << 2)) n 41 drivers/soc/fsl/qbman/bman_ccsr.c #define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) n 42 drivers/soc/fsl/qbman/bman_ccsr.c #define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) n 2382 drivers/soc/fsl/qbman/qman.c #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) n 41 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) n 42 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) n 43 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) n 45 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) n 46 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) n 47 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) n 55 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) n 57 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) n 58 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) n 59 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) n 60 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) n 61 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ n 67 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) n 68 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) n 70 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_MCP(n) (0x0b04 + ((n) * 0x04)) n 88 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10)) n 89 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10)) n 90 drivers/soc/fsl/qbman/qman_ccsr.c #define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10)) n 251 drivers/soc/fsl/qbman/qman_priv.h #define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) n 252 drivers/soc/fsl/qbman/qman_priv.h #define QM_SDQCR_SPECIFICWQ_WQ(n) (n) n 256 drivers/soc/fsl/qbman/qman_priv.h #define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) n 266 drivers/soc/fsl/qbman/qman_priv.h #define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ n 97 drivers/soc/imx/gpcv2.c #define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40) n 98 drivers/soc/imx/gpcv2.c #define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc) n 41 drivers/soc/qcom/llcc-slice.c #define MAX_CAP_TO_BYTES(n) (n * SZ_1K) n 42 drivers/soc/qcom/llcc-slice.c #define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K) n 43 drivers/soc/qcom/llcc-slice.c #define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K) n 44 drivers/soc/qcom/llcc-slice.c #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n) n 45 drivers/soc/qcom/llcc-slice.c #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n) n 529 drivers/soc/qcom/rpmh-rsc.c u32 n; n 533 drivers/soc/qcom/rpmh-rsc.c int i, ret, n, st = 0; n 559 drivers/soc/qcom/rpmh-rsc.c n = of_property_count_u32_elems(dn, "qcom,tcs-config"); n 560 drivers/soc/qcom/rpmh-rsc.c if (n != 2 * TCS_TYPE_NR) n 572 drivers/soc/qcom/rpmh-rsc.c i * 2 + 1, &tcs_cfg[i].n); n 575 drivers/soc/qcom/rpmh-rsc.c if (tcs_cfg[i].n > MAX_TCS_PER_TYPE) n 585 drivers/soc/qcom/rpmh-rsc.c tcs->num_tcs = tcs_cfg[i].n; n 205 drivers/soc/qcom/rpmh.c const struct tcs_cmd *cmd, u32 n) n 207 drivers/soc/qcom/rpmh.c if (!cmd || !n || n > MAX_RPMH_PAYLOAD) n 210 drivers/soc/qcom/rpmh.c memcpy(req->cmd, cmd, n * sizeof(*cmd)); n 214 drivers/soc/qcom/rpmh.c req->msg.num_cmds = n; n 231 drivers/soc/qcom/rpmh.c const struct tcs_cmd *cmd, u32 n) n 241 drivers/soc/qcom/rpmh.c ret = __fill_rpmh_msg(rpm_msg, state, cmd, n); n 262 drivers/soc/qcom/rpmh.c const struct tcs_cmd *cmd, u32 n) n 268 drivers/soc/qcom/rpmh.c if (!cmd || !n || n > MAX_RPMH_PAYLOAD) n 271 drivers/soc/qcom/rpmh.c memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd)); n 272 drivers/soc/qcom/rpmh.c rpm_msg.msg.num_cmds = n; n 347 drivers/soc/qcom/rpmh.c const struct tcs_cmd *cmd, u32 *n) n 358 drivers/soc/qcom/rpmh.c if (!cmd || !n) n 361 drivers/soc/qcom/rpmh.c while (n[count] > 0) n 379 drivers/soc/qcom/rpmh.c __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]); n 380 drivers/soc/qcom/rpmh.c cmd += n[i]; n 44 drivers/soc/qcom/trace-rpmh.h TP_PROTO(struct rsc_drv *d, int m, int n, u32 h, n 47 drivers/soc/qcom/trace-rpmh.h TP_ARGS(d, m, n, h, c), n 52 drivers/soc/qcom/trace-rpmh.h __field(int, n) n 62 drivers/soc/qcom/trace-rpmh.h __entry->n = n; n 70 drivers/soc/qcom/trace-rpmh.h __get_str(name), __entry->m, __entry->n, __entry->hdr, n 141 drivers/soundwire/cadence_master.c #define CDNS_DPN_B0_CONFIG(n) (0x100 + CDNS_DP_SIZE * (n)) n 142 drivers/soundwire/cadence_master.c #define CDNS_DPN_B0_CH_EN(n) (0x104 + CDNS_DP_SIZE * (n)) n 143 drivers/soundwire/cadence_master.c #define CDNS_DPN_B0_SAMPLE_CTRL(n) (0x108 + CDNS_DP_SIZE * (n)) n 144 drivers/soundwire/cadence_master.c #define CDNS_DPN_B0_OFFSET_CTRL(n) (0x10C + CDNS_DP_SIZE * (n)) n 145 drivers/soundwire/cadence_master.c #define CDNS_DPN_B0_HCTRL(n) (0x110 + CDNS_DP_SIZE * (n)) n 146 drivers/soundwire/cadence_master.c #define CDNS_DPN_B0_ASYNC_CTRL(n) (0x114 + CDNS_DP_SIZE * (n)) n 148 drivers/soundwire/cadence_master.c #define CDNS_DPN_B1_CONFIG(n) (0x118 + CDNS_DP_SIZE * (n)) n 149 drivers/soundwire/cadence_master.c #define CDNS_DPN_B1_CH_EN(n) (0x11C + CDNS_DP_SIZE * (n)) n 150 drivers/soundwire/cadence_master.c #define CDNS_DPN_B1_SAMPLE_CTRL(n) (0x120 + CDNS_DP_SIZE * (n)) n 151 drivers/soundwire/cadence_master.c #define CDNS_DPN_B1_OFFSET_CTRL(n) (0x124 + CDNS_DP_SIZE * (n)) n 152 drivers/soundwire/cadence_master.c #define CDNS_DPN_B1_HCTRL(n) (0x128 + CDNS_DP_SIZE * (n)) n 153 drivers/soundwire/cadence_master.c #define CDNS_DPN_B1_ASYNC_CTRL(n) (0x12C + CDNS_DP_SIZE * (n)) n 176 drivers/soundwire/cadence_master.c #define CDNS_PDI_CONFIG(n) (0x1100 + (n) * 16) n 68 drivers/spi/atmel-quadspi.c #define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK) n 70 drivers/spi/atmel-quadspi.c #define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK) n 72 drivers/spi/atmel-quadspi.c #define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK) n 90 drivers/spi/atmel-quadspi.c #define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK) n 92 drivers/spi/atmel-quadspi.c #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) n 123 drivers/spi/atmel-quadspi.c #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) n 38 drivers/spi/spi-ath79.c #define AR71XX_SPI_IOC_CS(n) BIT(16 + (n)) n 65 drivers/spi/spi-axi-spi-engine.c #define SPI_ENGINE_CMD_TRANSFER(flags, n) \ n 66 drivers/spi/spi-axi-spi-engine.c SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n)) n 151 drivers/spi/spi-axi-spi-engine.c unsigned int n = min(len, 256U); n 160 drivers/spi/spi-axi-spi-engine.c SPI_ENGINE_CMD_TRANSFER(flags, n - 1)); n 161 drivers/spi/spi-axi-spi-engine.c len -= n; n 176 drivers/spi/spi-axi-spi-engine.c unsigned int n = min(t, 256U); n 178 drivers/spi/spi-axi-spi-engine.c spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1)); n 179 drivers/spi/spi-axi-spi-engine.c t -= n; n 290 drivers/spi/spi-axi-spi-engine.c unsigned int n, m, i; n 293 drivers/spi/spi-axi-spi-engine.c n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM); n 294 drivers/spi/spi-axi-spi-engine.c while (n && spi_engine->cmd_length) { n 295 drivers/spi/spi-axi-spi-engine.c m = min(n, spi_engine->cmd_length); n 301 drivers/spi/spi-axi-spi-engine.c n -= m; n 310 drivers/spi/spi-axi-spi-engine.c unsigned int n, m, i; n 313 drivers/spi/spi-axi-spi-engine.c n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM); n 314 drivers/spi/spi-axi-spi-engine.c while (n && spi_engine->tx_length) { n 315 drivers/spi/spi-axi-spi-engine.c m = min(n, spi_engine->tx_length); n 321 drivers/spi/spi-axi-spi-engine.c n -= m; n 332 drivers/spi/spi-axi-spi-engine.c unsigned int n, m, i; n 335 drivers/spi/spi-axi-spi-engine.c n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL); n 336 drivers/spi/spi-axi-spi-engine.c while (n && spi_engine->rx_length) { n 337 drivers/spi/spi-axi-spi-engine.c m = min(n, spi_engine->rx_length); n 343 drivers/spi/spi-axi-spi-engine.c n -= m; n 156 drivers/spi/spi-coldfire-qspi.c unsigned i, n, offset = 0; n 158 drivers/spi/spi-coldfire-qspi.c n = min(count, 16u); n 161 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 166 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 172 drivers/spi/spi-coldfire-qspi.c count -= n; n 188 drivers/spi/spi-coldfire-qspi.c n = min(count, 8u); n 192 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 195 drivers/spi/spi-coldfire-qspi.c qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); n 197 drivers/spi/spi-coldfire-qspi.c count -= n; n 209 drivers/spi/spi-coldfire-qspi.c mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); n 215 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 223 drivers/spi/spi-coldfire-qspi.c unsigned i, n, offset = 0; n 225 drivers/spi/spi-coldfire-qspi.c n = min(count, 16u); n 228 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 233 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 239 drivers/spi/spi-coldfire-qspi.c count -= n; n 255 drivers/spi/spi-coldfire-qspi.c n = min(count, 8u); n 259 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 262 drivers/spi/spi-coldfire-qspi.c qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); n 264 drivers/spi/spi-coldfire-qspi.c count -= n; n 276 drivers/spi/spi-coldfire-qspi.c mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); n 282 drivers/spi/spi-coldfire-qspi.c for (i = 0; i < n; ++i) n 31 drivers/spi/spi-efm32.c #define REG_FRAME_DATABITS(n) ((n) - 3) n 68 drivers/spi/spi-efm32.c #define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n)) n 168 drivers/spi/spi-meson-spifc.c int n; n 171 drivers/spi/spi-meson-spifc.c n = max_t(int, parent / speed - 1, 1); n 174 drivers/spi/spi-meson-spifc.c speed, n); n 176 drivers/spi/spi-meson-spifc.c value = (n << CLOCK_DIV_SHIFT) & CLOCK_DIV_MASK; n 177 drivers/spi/spi-meson-spifc.c value |= (n << CLOCK_CNT_LOW_SHIFT) & CLOCK_CNT_LOW_MASK; n 178 drivers/spi/spi-meson-spifc.c value |= (((n + 1) / 2 - 1) << CLOCK_CNT_HIGH_SHIFT) & n 394 drivers/spi/spi-rspi.c unsigned int n; n 396 drivers/spi/spi-rspi.c n = min(len, QSPI_BUFFER_SIZE); n 408 drivers/spi/spi-rspi.c return n; n 413 drivers/spi/spi-rspi.c unsigned int n; n 415 drivers/spi/spi-rspi.c n = min(len, QSPI_BUFFER_SIZE); n 426 drivers/spi/spi-rspi.c return n; n 429 drivers/spi/spi-rspi.c #define set_config_register(spi, n) spi->ops->set_config_register(spi, n) n 494 drivers/spi/spi-rspi.c unsigned int n) n 496 drivers/spi/spi-rspi.c while (n-- > 0) { n 736 drivers/spi/spi-rspi.c unsigned int i, n; n 740 drivers/spi/spi-rspi.c n = qspi_set_send_trigger(rspi, len); n 747 drivers/spi/spi-rspi.c for (i = 0; i < n; i++) n 755 drivers/spi/spi-rspi.c for (i = 0; i < n; i++) n 758 drivers/spi/spi-rspi.c len -= n; n 782 drivers/spi/spi-rspi.c unsigned int n = xfer->len; n 792 drivers/spi/spi-rspi.c while (n > 0) { n 793 drivers/spi/spi-rspi.c len = qspi_set_send_trigger(rspi, n); n 802 drivers/spi/spi-rspi.c n -= len; n 814 drivers/spi/spi-rspi.c unsigned int n = xfer->len; n 824 drivers/spi/spi-rspi.c while (n > 0) { n 825 drivers/spi/spi-rspi.c len = qspi_set_receive_trigger(rspi, n); n 834 drivers/spi/spi-rspi.c n -= len; n 920 drivers/spi/spi-sh-msiof.c int n; n 1026 drivers/spi/spi-sh-msiof.c n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf, n 1028 drivers/spi/spi-sh-msiof.c if (n < 0) n 1029 drivers/spi/spi-sh-msiof.c return n; n 1032 drivers/spi/spi-sh-msiof.c tx_buf += n * bytes_per_word; n 1034 drivers/spi/spi-sh-msiof.c rx_buf += n * bytes_per_word; n 1035 drivers/spi/spi-sh-msiof.c words -= n; n 26 drivers/spi/spi-synquacer.c #define SYNQUACER_HSSPI_REG_PCC(n) (SYNQUACER_HSSPI_REG_PCC0 + (n) * 4) n 46 drivers/spi/spi-tegra114.c #define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n))) n 75 drivers/spi/spi-ti-qspi.c #define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n)) n 90 drivers/spi/spi-ti-qspi.c #define QSPI_EN_CS(n) (n << 28) n 91 drivers/spi/spi-ti-qspi.c #define QSPI_WLEN(n) ((n - 1) << 19) n 98 drivers/spi/spi-ti-qspi.c #define QSPI_FLEN(n) ((n - 1) << 0) n 108 drivers/spi/spi-ti-qspi.c #define QSPI_DD(m, n) (m << (3 + n * 8)) n 109 drivers/spi/spi-ti-qspi.c #define QSPI_CKPHA(n) (1 << (2 + n * 8)) n 110 drivers/spi/spi-ti-qspi.c #define QSPI_CSPOL(n) (1 << (1 + n * 8)) n 111 drivers/spi/spi-ti-qspi.c #define QSPI_CKPOL(n) (1 << (n * 8)) n 117 drivers/spi/spi-ti-qspi.c #define MEM_CS_EN(n) ((n + 1) << 8) n 190 drivers/spi/spi-txx9.c int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; n 192 drivers/spi/spi-txx9.c n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); n 196 drivers/spi/spi-txx9.c txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1); n 740 drivers/spi/spi.c int spi_register_board_info(struct spi_board_info const *info, unsigned n) n 745 drivers/spi/spi.c if (!n) n 748 drivers/spi/spi.c bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); n 752 drivers/spi/spi.c for (i = 0; i < n; i++, bi++, info++) { n 205 drivers/spi/spidev.c unsigned n, total, tx_total, rx_total; n 223 drivers/spi/spidev.c for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n 224 drivers/spi/spidev.c n; n 225 drivers/spi/spidev.c n--, k_tmp++, u_tmp++) { n 294 drivers/spi/spidev.c for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { n 495 drivers/spi/spidev.c unsigned n_ioc, n; n 524 drivers/spi/spidev.c for (n = 0; n < n_ioc; n++) { n 525 drivers/spi/spidev.c ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf); n 526 drivers/spi/spidev.c ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf); n 194 drivers/spmi/spmi-pmic-arb.c u16 n); n 195 drivers/spmi/spmi-pmic-arb.c void __iomem *(*acc_enable)(struct spmi_pmic_arb *pmic_arb, u16 n); n 196 drivers/spmi/spmi-pmic-arb.c void __iomem *(*irq_status)(struct spmi_pmic_arb *pmic_arb, u16 n); n 197 drivers/spmi/spmi-pmic-arb.c void __iomem *(*irq_clear)(struct spmi_pmic_arb *pmic_arb, u16 n); n 198 drivers/spmi/spmi-pmic-arb.c u32 (*apid_map_offset)(u16 n); n 1010 drivers/spmi/spmi-pmic-arb.c pmic_arb_owner_acc_status_v1(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) n 1012 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x20 * m + 0x4 * n; n 1016 drivers/spmi/spmi-pmic-arb.c pmic_arb_owner_acc_status_v2(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) n 1018 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x100000 + 0x1000 * m + 0x4 * n; n 1022 drivers/spmi/spmi-pmic-arb.c pmic_arb_owner_acc_status_v3(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) n 1024 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x200000 + 0x1000 * m + 0x4 * n; n 1028 drivers/spmi/spmi-pmic-arb.c pmic_arb_owner_acc_status_v5(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) n 1030 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x10000 * m + 0x4 * n; n 1034 drivers/spmi/spmi-pmic-arb.c pmic_arb_acc_enable_v1(struct spmi_pmic_arb *pmic_arb, u16 n) n 1036 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x200 + 0x4 * n; n 1040 drivers/spmi/spmi-pmic-arb.c pmic_arb_acc_enable_v2(struct spmi_pmic_arb *pmic_arb, u16 n) n 1042 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x1000 * n; n 1046 drivers/spmi/spmi-pmic-arb.c pmic_arb_acc_enable_v5(struct spmi_pmic_arb *pmic_arb, u16 n) n 1048 drivers/spmi/spmi-pmic-arb.c return pmic_arb->wr_base + 0x100 + 0x10000 * n; n 1052 drivers/spmi/spmi-pmic-arb.c pmic_arb_irq_status_v1(struct spmi_pmic_arb *pmic_arb, u16 n) n 1054 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x600 + 0x4 * n; n 1058 drivers/spmi/spmi-pmic-arb.c pmic_arb_irq_status_v2(struct spmi_pmic_arb *pmic_arb, u16 n) n 1060 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x4 + 0x1000 * n; n 1064 drivers/spmi/spmi-pmic-arb.c pmic_arb_irq_status_v5(struct spmi_pmic_arb *pmic_arb, u16 n) n 1066 drivers/spmi/spmi-pmic-arb.c return pmic_arb->wr_base + 0x104 + 0x10000 * n; n 1070 drivers/spmi/spmi-pmic-arb.c pmic_arb_irq_clear_v1(struct spmi_pmic_arb *pmic_arb, u16 n) n 1072 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0xA00 + 0x4 * n; n 1076 drivers/spmi/spmi-pmic-arb.c pmic_arb_irq_clear_v2(struct spmi_pmic_arb *pmic_arb, u16 n) n 1078 drivers/spmi/spmi-pmic-arb.c return pmic_arb->intr + 0x8 + 0x1000 * n; n 1082 drivers/spmi/spmi-pmic-arb.c pmic_arb_irq_clear_v5(struct spmi_pmic_arb *pmic_arb, u16 n) n 1084 drivers/spmi/spmi-pmic-arb.c return pmic_arb->wr_base + 0x108 + 0x10000 * n; n 1087 drivers/spmi/spmi-pmic-arb.c static u32 pmic_arb_apid_map_offset_v2(u16 n) n 1089 drivers/spmi/spmi-pmic-arb.c return 0x800 + 0x4 * n; n 1092 drivers/spmi/spmi-pmic-arb.c static u32 pmic_arb_apid_map_offset_v5(u16 n) n 1094 drivers/spmi/spmi-pmic-arb.c return 0x900 + 0x4 * n; n 392 drivers/ssb/driver_chipcommon.c u32 *plltype, u32 *n, u32 *m) n 394 drivers/ssb/driver_chipcommon.c *n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N); n 415 drivers/ssb/driver_chipcommon.c u32 *plltype, u32 *n, u32 *m) n 417 drivers/ssb/driver_chipcommon.c *n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N); n 609 drivers/ssb/driver_chipcommon.c u32 i, n; n 675 drivers/ssb/driver_chipcommon.c n = (cc->capabilities & SSB_CHIPCO_CAP_NRUART); n 676 drivers/ssb/driver_chipcommon.c for (i = 0; i < n; i++) { n 108 drivers/ssb/driver_extif.c u32 *pll_type, u32 *n, u32 *m) n 111 drivers/ssb/driver_extif.c *n = extif_read32(extif, SSB_EXTIF_CLOCK_N); n 268 drivers/ssb/driver_mipscore.c u32 pll_type, n, m, rate = 0; n 274 drivers/ssb/driver_mipscore.c ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m); n 276 drivers/ssb/driver_mipscore.c ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m); n 283 drivers/ssb/driver_mipscore.c rate = ssb_calc_clock_rate(pll_type, n, m); n 558 drivers/ssb/main.c struct ssb_bus *bus, *n; n 562 drivers/ssb/main.c list_for_each_entry_safe(bus, n, &attach_queue, list) { n 842 drivers/ssb/main.c u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m) n 846 drivers/ssb/main.c n1 = (n & SSB_CHIPCO_CLK_N1); n 847 drivers/ssb/main.c n2 = ((n & SSB_CHIPCO_CLK_N2) >> SSB_CHIPCO_CLK_N2_SHIFT); n 160 drivers/ssb/ssb_private.h extern u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m); n 22 drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c #define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n)) n 493 drivers/staging/comedi/comedi.h unsigned int n; n 1009 drivers/staging/comedi/comedi.h #define _TERM_N(base, n, x) ((base) + ((x) & ((n) - 1))) n 68 drivers/staging/comedi/comedi_compat32.c unsigned int n; n 326 drivers/staging/comedi/comedi_compat32.c err |= __get_user(temp.uint, &insn32->n); n 327 drivers/staging/comedi/comedi_compat32.c err |= __put_user(temp.uint, &insn->n); n 347 drivers/staging/comedi/comedi_compat32.c unsigned int n_insns, n; n 377 drivers/staging/comedi/comedi_compat32.c for (n = 0; n < n_insns; n++) { n 378 drivers/staging/comedi/comedi_compat32.c rc = get_compat_insn(&s->insn[n], &insn32[n]); n 1185 drivers/staging/comedi/comedi_fops.c if (insn->n < 1) n 1193 drivers/staging/comedi/comedi_fops.c if (insn->n == 1) n 1210 drivers/staging/comedi/comedi_fops.c if (insn->n == 2) n 1222 drivers/staging/comedi/comedi_fops.c if (insn->n == 3) n 1228 drivers/staging/comedi/comedi_fops.c if (insn->n == 5) n 1232 drivers/staging/comedi/comedi_fops.c if (insn->n == 6) n 1236 drivers/staging/comedi/comedi_fops.c if (insn->n >= 4) n 1247 drivers/staging/comedi/comedi_fops.c pr_warn("Assuming n=%i is correct\n", insn->n); n 1256 drivers/staging/comedi/comedi_fops.c if (insn->n < 1) n 1263 drivers/staging/comedi/comedi_fops.c if (insn->n == 3) n 1272 drivers/staging/comedi/comedi_fops.c if (insn->n >= 2) n 1321 drivers/staging/comedi/comedi_fops.c if (insn->n != 2) { n 1335 drivers/staging/comedi/comedi_fops.c if (insn->n != 1 || data[0] >= 100000) { n 1343 drivers/staging/comedi/comedi_fops.c if (insn->n != 1) { n 1379 drivers/staging/comedi/comedi_fops.c data[1] = (insn->n - 2) / 2; n 1444 drivers/staging/comedi/comedi_fops.c for (i = 0; i < insn->n; ++i) { n 1462 drivers/staging/comedi/comedi_fops.c if (insn->n != 2) { n 1552 drivers/staging/comedi/comedi_fops.c if (insns[i].n > MAX_SAMPLES) { n 1558 drivers/staging/comedi/comedi_fops.c max_n_data_required = max(max_n_data_required, insns[i].n); n 1572 drivers/staging/comedi/comedi_fops.c insns[i].n * sizeof(unsigned int))) { n 1584 drivers/staging/comedi/comedi_fops.c insns[i].n * sizeof(unsigned int))) { n 1630 drivers/staging/comedi/comedi_fops.c n_data = max(n_data, insn.n); n 1633 drivers/staging/comedi/comedi_fops.c if (insn.n > MAX_SAMPLES) { n 1634 drivers/staging/comedi/comedi_fops.c insn.n = MAX_SAMPLES; n 1647 drivers/staging/comedi/comedi_fops.c insn.n * sizeof(unsigned int))) { n 1658 drivers/staging/comedi/comedi_fops.c insn.n * sizeof(unsigned int))) { n 1663 drivers/staging/comedi/comedi_fops.c ret = insn.n; n 2447 drivers/staging/comedi/comedi_fops.c unsigned int n, m; n 2501 drivers/staging/comedi/comedi_fops.c n = min_t(size_t, m, nbytes); n 2503 drivers/staging/comedi/comedi_fops.c if (n == 0) { n 2523 drivers/staging/comedi/comedi_fops.c n1 = min(n, async->prealloc_bufsz - wp); n 2524 drivers/staging/comedi/comedi_fops.c n2 = n - n1; n 2531 drivers/staging/comedi/comedi_fops.c n -= m; n 2534 drivers/staging/comedi/comedi_fops.c comedi_buf_write_free(s, n); n 2536 drivers/staging/comedi/comedi_fops.c count += n; n 2537 drivers/staging/comedi/comedi_fops.c nbytes -= n; n 2539 drivers/staging/comedi/comedi_fops.c buf += n; n 2583 drivers/staging/comedi/comedi_fops.c unsigned int n, m; n 2623 drivers/staging/comedi/comedi_fops.c n = min_t(size_t, m, nbytes); n 2625 drivers/staging/comedi/comedi_fops.c if (n == 0) { n 2657 drivers/staging/comedi/comedi_fops.c n1 = min(n, async->prealloc_bufsz - rp); n 2658 drivers/staging/comedi/comedi_fops.c n2 = n - n1; n 2665 drivers/staging/comedi/comedi_fops.c n -= m; n 2669 drivers/staging/comedi/comedi_fops.c comedi_buf_read_alloc(s, n); n 2670 drivers/staging/comedi/comedi_fops.c comedi_buf_read_free(s, n); n 2672 drivers/staging/comedi/comedi_fops.c count += n; n 2673 drivers/staging/comedi/comedi_fops.c nbytes -= n; n 2675 drivers/staging/comedi/comedi_fops.c buf += n; n 599 drivers/staging/comedi/comedidev.h int n, n 972 drivers/staging/comedi/comedidev.h unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n); n 973 drivers/staging/comedi/comedidev.h unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n); n 976 drivers/staging/comedi/comedidev.h unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n); n 977 drivers/staging/comedi/comedidev.h unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n); n 264 drivers/staging/comedi/drivers.c for (i = 0; i < insn->n; i++) n 267 drivers/staging/comedi/drivers.c return insn->n; n 358 drivers/staging/comedi/drivers.c return insn->n; n 622 drivers/staging/comedi/drivers.c _insn.n = 2; n 168 drivers/staging/comedi/drivers/addi_apci_1032.c return insn->n; n 286 drivers/staging/comedi/drivers/addi_apci_1032.c return insn->n; n 525 drivers/staging/comedi/drivers/addi_apci_1500.c return insn->n; n 550 drivers/staging/comedi/drivers/addi_apci_1500.c return insn->n; n 565 drivers/staging/comedi/drivers/addi_apci_1500.c return insn->n; n 696 drivers/staging/comedi/drivers/addi_apci_1500.c return insn->n; n 712 drivers/staging/comedi/drivers/addi_apci_1500.c if (insn->n) n 715 drivers/staging/comedi/drivers/addi_apci_1500.c return insn->n; n 732 drivers/staging/comedi/drivers/addi_apci_1500.c for (i = 0; i < insn->n; i++) { n 741 drivers/staging/comedi/drivers/addi_apci_1500.c return insn->n; n 74 drivers/staging/comedi/drivers/addi_apci_1516.c return insn->n; n 89 drivers/staging/comedi/drivers/addi_apci_1516.c return insn->n; n 270 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 285 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 295 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 397 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 529 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 540 drivers/staging/comedi/drivers/addi_apci_1564.c if (insn->n) { n 541 drivers/staging/comedi/drivers/addi_apci_1564.c unsigned int val = data[insn->n - 1]; n 546 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 558 drivers/staging/comedi/drivers/addi_apci_1564.c for (i = 0; i < insn->n; i++) n 561 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 611 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 624 drivers/staging/comedi/drivers/addi_apci_1564.c if (insn->n) { n 625 drivers/staging/comedi/drivers/addi_apci_1564.c unsigned int val = data[insn->n - 1]; n 630 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 644 drivers/staging/comedi/drivers/addi_apci_1564.c for (i = 0; i < insn->n; i++) n 647 drivers/staging/comedi/drivers/addi_apci_1564.c return insn->n; n 72 drivers/staging/comedi/drivers/addi_apci_16xx.c return insn->n; n 85 drivers/staging/comedi/drivers/addi_apci_16xx.c return insn->n; n 55 drivers/staging/comedi/drivers/addi_apci_2032.c return insn->n; n 64 drivers/staging/comedi/drivers/addi_apci_2032.c return insn->n; n 130 drivers/staging/comedi/drivers/addi_apci_2032.c unsigned int n; n 134 drivers/staging/comedi/drivers/addi_apci_2032.c for (n = 0; n < cmd->chanlist_len; n++) n 135 drivers/staging/comedi/drivers/addi_apci_2032.c enabled_isns |= 1 << CR_CHAN(cmd->chanlist[n]); n 35 drivers/staging/comedi/drivers/addi_apci_2200.c return insn->n; n 50 drivers/staging/comedi/drivers/addi_apci_2200.c return insn->n; n 733 drivers/staging/comedi/drivers/addi_apci_3120.c for (i = 0; i < insn->n; i++) { n 744 drivers/staging/comedi/drivers/addi_apci_3120.c return insn->n; n 768 drivers/staging/comedi/drivers/addi_apci_3120.c for (i = 0; i < insn->n; i++) { n 782 drivers/staging/comedi/drivers/addi_apci_3120.c return insn->n; n 795 drivers/staging/comedi/drivers/addi_apci_3120.c return insn->n; n 813 drivers/staging/comedi/drivers/addi_apci_3120.c return insn->n; n 890 drivers/staging/comedi/drivers/addi_apci_3120.c return insn->n; n 900 drivers/staging/comedi/drivers/addi_apci_3120.c for (i = 0; i < insn->n; i++) n 903 drivers/staging/comedi/drivers/addi_apci_3120.c return insn->n; n 131 drivers/staging/comedi/drivers/addi_apci_3501.c for (i = 0; i < insn->n; i++) { n 152 drivers/staging/comedi/drivers/addi_apci_3501.c return insn->n; n 162 drivers/staging/comedi/drivers/addi_apci_3501.c return insn->n; n 177 drivers/staging/comedi/drivers/addi_apci_3501.c return insn->n; n 264 drivers/staging/comedi/drivers/addi_apci_3501.c if (insn->n) { n 267 drivers/staging/comedi/drivers/addi_apci_3501.c for (i = 0; i < insn->n; i++) n 271 drivers/staging/comedi/drivers/addi_apci_3501.c return insn->n; n 444 drivers/staging/comedi/drivers/addi_apci_3xxx.c for (i = 0; i < insn->n; i++) { n 457 drivers/staging/comedi/drivers/addi_apci_3xxx.c return insn->n; n 619 drivers/staging/comedi/drivers/addi_apci_3xxx.c for (i = 0; i < insn->n; i++) { n 636 drivers/staging/comedi/drivers/addi_apci_3xxx.c return insn->n; n 646 drivers/staging/comedi/drivers/addi_apci_3xxx.c return insn->n; n 661 drivers/staging/comedi/drivers/addi_apci_3xxx.c return insn->n; n 694 drivers/staging/comedi/drivers/addi_apci_3xxx.c return insn->n; n 722 drivers/staging/comedi/drivers/addi_apci_3xxx.c return insn->n; n 59 drivers/staging/comedi/drivers/addi_watchdog.c return insn->n; n 70 drivers/staging/comedi/drivers/addi_watchdog.c for (i = 0; i < insn->n; i++) n 73 drivers/staging/comedi/drivers/addi_watchdog.c return insn->n; n 90 drivers/staging/comedi/drivers/addi_watchdog.c for (i = 0; i < insn->n; i++) { n 95 drivers/staging/comedi/drivers/addi_watchdog.c return insn->n; n 64 drivers/staging/comedi/drivers/adl_pci6208.c for (i = 0; i < insn->n; i++) { n 79 drivers/staging/comedi/drivers/adl_pci6208.c return insn->n; n 94 drivers/staging/comedi/drivers/adl_pci6208.c return insn->n; n 107 drivers/staging/comedi/drivers/adl_pci6208.c return insn->n; n 131 drivers/staging/comedi/drivers/adl_pci7x3x.c return insn->n; n 143 drivers/staging/comedi/drivers/adl_pci7x3x.c return insn->n; n 40 drivers/staging/comedi/drivers/adl_pci8164.c for (i = 0; i < insn->n; i++) n 43 drivers/staging/comedi/drivers/adl_pci8164.c return insn->n; n 55 drivers/staging/comedi/drivers/adl_pci8164.c for (i = 0; i < insn->n; i++) n 58 drivers/staging/comedi/drivers/adl_pci8164.c return insn->n; n 552 drivers/staging/comedi/drivers/adl_pci9111.c for (i = 0; i < insn->n; i++) { n 578 drivers/staging/comedi/drivers/adl_pci9111.c for (i = 0; i < insn->n; i++) { n 584 drivers/staging/comedi/drivers/adl_pci9111.c return insn->n; n 594 drivers/staging/comedi/drivers/adl_pci9111.c return insn->n; n 607 drivers/staging/comedi/drivers/adl_pci9111.c return insn->n; n 1343 drivers/staging/comedi/drivers/adl_pci9118.c for (i = 0; i < insn->n; i++) { n 1357 drivers/staging/comedi/drivers/adl_pci9118.c return insn->n; n 1369 drivers/staging/comedi/drivers/adl_pci9118.c for (i = 0; i < insn->n; i++) { n 1375 drivers/staging/comedi/drivers/adl_pci9118.c return insn->n; n 1390 drivers/staging/comedi/drivers/adl_pci9118.c return insn->n; n 1409 drivers/staging/comedi/drivers/adl_pci9118.c return insn->n; n 128 drivers/staging/comedi/drivers/adq12b.c for (i = 0; i < insn->n; i++) { n 139 drivers/staging/comedi/drivers/adq12b.c return insn->n; n 149 drivers/staging/comedi/drivers/adq12b.c return insn->n; n 174 drivers/staging/comedi/drivers/adq12b.c return insn->n; n 347 drivers/staging/comedi/drivers/adv_pci1710.c for (i = 0; i < insn->n; i++) { n 371 drivers/staging/comedi/drivers/adv_pci1710.c return ret ? ret : insn->n; n 650 drivers/staging/comedi/drivers/adv_pci1710.c for (i = 0; i < insn->n; i++) { n 657 drivers/staging/comedi/drivers/adv_pci1710.c return insn->n; n 667 drivers/staging/comedi/drivers/adv_pci1710.c return insn->n; n 680 drivers/staging/comedi/drivers/adv_pci1710.c return insn->n; n 717 drivers/staging/comedi/drivers/adv_pci1710.c return insn->n; n 87 drivers/staging/comedi/drivers/adv_pci1720.c for (i = 0; i < insn->n; i++) { n 99 drivers/staging/comedi/drivers/adv_pci1720.c return insn->n; n 109 drivers/staging/comedi/drivers/adv_pci1720.c return insn->n; n 86 drivers/staging/comedi/drivers/adv_pci1723.c for (i = 0; i < insn->n; i++) { n 93 drivers/staging/comedi/drivers/adv_pci1723.c return insn->n; n 116 drivers/staging/comedi/drivers/adv_pci1723.c return insn->n; n 129 drivers/staging/comedi/drivers/adv_pci1723.c return insn->n; n 102 drivers/staging/comedi/drivers/adv_pci1724.c for (i = 0; i < insn->n; ++i) { n 115 drivers/staging/comedi/drivers/adv_pci1724.c return insn->n; n 156 drivers/staging/comedi/drivers/adv_pci1760.c return insn->n; n 174 drivers/staging/comedi/drivers/adv_pci1760.c return insn->n; n 296 drivers/staging/comedi/drivers/adv_pci1760.c return insn->n; n 224 drivers/staging/comedi/drivers/adv_pci_dio.c return insn->n; n 239 drivers/staging/comedi/drivers/adv_pci_dio.c return insn->n; n 262 drivers/staging/comedi/drivers/adv_pci_dio.c return insn->n; n 281 drivers/staging/comedi/drivers/adv_pci_dio.c return insn->n; n 134 drivers/staging/comedi/drivers/aio_aio12_8.c for (i = 0; i < insn->n; i++) { n 152 drivers/staging/comedi/drivers/aio_aio12_8.c return insn->n; n 167 drivers/staging/comedi/drivers/aio_aio12_8.c for (i = 0; i < insn->n; i++) { n 173 drivers/staging/comedi/drivers/aio_aio12_8.c return insn->n; n 196 drivers/staging/comedi/drivers/aio_aio12_8.c return insn->n; n 152 drivers/staging/comedi/drivers/aio_iiro_16.c return insn->n; n 162 drivers/staging/comedi/drivers/aio_iiro_16.c return insn->n; n 181 drivers/staging/comedi/drivers/amplc_dio200_common.c return insn->n; n 202 drivers/staging/comedi/drivers/amplc_dio200_common.c unsigned int n; n 208 drivers/staging/comedi/drivers/amplc_dio200_common.c for (n = 0; n < cmd->chanlist_len; n++) n 209 drivers/staging/comedi/drivers/amplc_dio200_common.c isn_bits |= (1U << CR_CHAN(cmd->chanlist[n])); n 245 drivers/staging/comedi/drivers/amplc_dio200_common.c unsigned int n, ch; n 248 drivers/staging/comedi/drivers/amplc_dio200_common.c for (n = 0; n < cmd->chanlist_len; n++) { n 249 drivers/staging/comedi/drivers/amplc_dio200_common.c ch = CR_CHAN(cmd->chanlist[n]); n 251 drivers/staging/comedi/drivers/amplc_dio200_common.c val |= (1U << n); n 536 drivers/staging/comedi/drivers/amplc_dio200_common.c return insn->n; n 645 drivers/staging/comedi/drivers/amplc_dio200_common.c return insn->n; n 672 drivers/staging/comedi/drivers/amplc_dio200_common.c return insn->n; n 703 drivers/staging/comedi/drivers/amplc_dio200_common.c unsigned int n; n 705 drivers/staging/comedi/drivers/amplc_dio200_common.c for (n = 0; n < insn->n; n++) n 706 drivers/staging/comedi/drivers/amplc_dio200_common.c data[n] = dio200_read32(dev, DIO200_TS_COUNT); n 707 drivers/staging/comedi/drivers/amplc_dio200_common.c return n; n 766 drivers/staging/comedi/drivers/amplc_dio200_common.c return ret < 0 ? ret : insn->n; n 780 drivers/staging/comedi/drivers/amplc_dio200_common.c unsigned int n; n 787 drivers/staging/comedi/drivers/amplc_dio200_common.c for (n = 0; n < dev->n_subdevices; n++) { n 788 drivers/staging/comedi/drivers/amplc_dio200_common.c s = &dev->subdevices[n]; n 789 drivers/staging/comedi/drivers/amplc_dio200_common.c switch (board->sdtype[n]) { n 793 drivers/staging/comedi/drivers/amplc_dio200_common.c board->sdinfo[n]); n 800 drivers/staging/comedi/drivers/amplc_dio200_common.c board->sdinfo[n]); n 809 drivers/staging/comedi/drivers/amplc_dio200_common.c board->sdinfo[n]); n 64 drivers/staging/comedi/drivers/amplc_pc236_common.c return insn->n; n 56 drivers/staging/comedi/drivers/amplc_pc263.c return insn->n; n 428 drivers/staging/comedi/drivers/amplc_pci224.c for (i = 0; i < insn->n; i++) { n 434 drivers/staging/comedi/drivers/amplc_pci224.c return insn->n; n 513 drivers/staging/comedi/drivers/amplc_pci224.c unsigned int i, n; n 554 drivers/staging/comedi/drivers/amplc_pci224.c for (n = 0; n < num_scans; n++) { n 727 drivers/staging/comedi/drivers/amplc_pci230.c unsigned int n; n 801 drivers/staging/comedi/drivers/amplc_pci230.c for (n = 0; n < insn->n; n++) { n 815 drivers/staging/comedi/drivers/amplc_pci230.c data[n] = pci230_ai_read(dev); n 819 drivers/staging/comedi/drivers/amplc_pci230.c return n; n 840 drivers/staging/comedi/drivers/amplc_pci230.c for (i = 0; i < insn->n; i++) { n 846 drivers/staging/comedi/drivers/amplc_pci230.c return insn->n; n 1101 drivers/staging/comedi/drivers/amplc_pci230.c unsigned int i, n; n 1143 drivers/staging/comedi/drivers/amplc_pci230.c for (n = 0; n < num_scans; n++) { n 46 drivers/staging/comedi/drivers/amplc_pci263.c return insn->n; n 158 drivers/staging/comedi/drivers/c6xdigio.c for (i = 0; i < insn->n; i++) { n 172 drivers/staging/comedi/drivers/c6xdigio.c return insn->n; n 186 drivers/staging/comedi/drivers/c6xdigio.c for (i = 0; i < insn->n; i++) n 189 drivers/staging/comedi/drivers/c6xdigio.c return insn->n; n 201 drivers/staging/comedi/drivers/c6xdigio.c for (i = 0; i < insn->n; i++) { n 208 drivers/staging/comedi/drivers/c6xdigio.c return insn->n; n 180 drivers/staging/comedi/drivers/cb_das16_cs.c for (i = 0; i < insn->n; i++) { n 205 drivers/staging/comedi/drivers/cb_das16_cs.c for (i = 0; i < insn->n; i++) { n 241 drivers/staging/comedi/drivers/cb_das16_cs.c return insn->n; n 254 drivers/staging/comedi/drivers/cb_das16_cs.c return insn->n; n 286 drivers/staging/comedi/drivers/cb_das16_cs.c return insn->n; n 323 drivers/staging/comedi/drivers/cb_das16_cs.c return insn->n; n 335 drivers/staging/comedi/drivers/cb_pcidas.c int n; n 360 drivers/staging/comedi/drivers/cb_pcidas.c for (n = 0; n < insn->n; n++) { n 370 drivers/staging/comedi/drivers/cb_pcidas.c data[n] = inw(devpriv->pcibar2 + PCIDAS_AI_DATA_REG); n 374 drivers/staging/comedi/drivers/cb_pcidas.c return n; n 399 drivers/staging/comedi/drivers/cb_pcidas.c return insn->n; n 423 drivers/staging/comedi/drivers/cb_pcidas.c for (i = 0; i < insn->n; i++) { n 430 drivers/staging/comedi/drivers/cb_pcidas.c return insn->n; n 458 drivers/staging/comedi/drivers/cb_pcidas.c for (i = 0; i < insn->n; i++) { n 465 drivers/staging/comedi/drivers/cb_pcidas.c return insn->n; n 492 drivers/staging/comedi/drivers/cb_pcidas.c for (i = 0; i < insn->n; i++) { n 517 drivers/staging/comedi/drivers/cb_pcidas.c return insn->n; n 566 drivers/staging/comedi/drivers/cb_pcidas.c if (insn->n) { n 567 drivers/staging/comedi/drivers/cb_pcidas.c unsigned int val = data[insn->n - 1]; n 577 drivers/staging/comedi/drivers/cb_pcidas.c return insn->n; n 603 drivers/staging/comedi/drivers/cb_pcidas.c if (insn->n) { n 604 drivers/staging/comedi/drivers/cb_pcidas.c unsigned int val = data[insn->n - 1]; n 612 drivers/staging/comedi/drivers/cb_pcidas.c return insn->n; n 636 drivers/staging/comedi/drivers/cb_pcidas.c if (insn->n) { n 637 drivers/staging/comedi/drivers/cb_pcidas.c unsigned int val = data[insn->n - 1]; n 645 drivers/staging/comedi/drivers/cb_pcidas.c return insn->n; n 1757 drivers/staging/comedi/drivers/cb_pcidas64.c unsigned int bits = 0, n; n 1857 drivers/staging/comedi/drivers/cb_pcidas64.c for (n = 0; n < insn->n; n++) { n 1871 drivers/staging/comedi/drivers/cb_pcidas64.c data[n] = readl(dev->mmio + ADC_FIFO_REG) & 0xffff; n 1873 drivers/staging/comedi/drivers/cb_pcidas64.c data[n] = readw(devpriv->main_iobase + PIPE1_READ_REG); n 1876 drivers/staging/comedi/drivers/cb_pcidas64.c return n; n 3113 drivers/staging/comedi/drivers/cb_pcidas64.c for (i = 0; i < insn->n; i++) { n 3130 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 3450 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 3463 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 3479 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 3492 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 3639 drivers/staging/comedi/drivers/cb_pcidas64.c if (insn->n) { n 3640 drivers/staging/comedi/drivers/cb_pcidas64.c unsigned int val = data[insn->n - 1]; n 3648 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 3692 drivers/staging/comedi/drivers/cb_pcidas64.c if (insn->n) { n 3693 drivers/staging/comedi/drivers/cb_pcidas64.c unsigned int val = data[insn->n - 1]; n 3701 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 3774 drivers/staging/comedi/drivers/cb_pcidas64.c if (insn->n) { n 3777 drivers/staging/comedi/drivers/cb_pcidas64.c for (i = 0; i < insn->n; i++) n 3781 drivers/staging/comedi/drivers/cb_pcidas64.c return insn->n; n 72 drivers/staging/comedi/drivers/cb_pcidda.c #define DESELECT_CALDAC_BIT(n) (0x4 << (n)) n 321 drivers/staging/comedi/drivers/cb_pcidda.c for (i = 0; i < insn->n; i++) n 324 drivers/staging/comedi/drivers/cb_pcidda.c return insn->n; n 179 drivers/staging/comedi/drivers/cb_pcimdas.c int n; n 203 drivers/staging/comedi/drivers/cb_pcimdas.c for (n = 0; n < insn->n; n++) { n 213 drivers/staging/comedi/drivers/cb_pcimdas.c data[n] = inw(devpriv->daqio + PCIMDAS_AI_REG); n 217 drivers/staging/comedi/drivers/cb_pcimdas.c return n; n 230 drivers/staging/comedi/drivers/cb_pcimdas.c for (i = 0; i < insn->n; i++) { n 236 drivers/staging/comedi/drivers/cb_pcimdas.c return insn->n; n 251 drivers/staging/comedi/drivers/cb_pcimdas.c return insn->n; n 266 drivers/staging/comedi/drivers/cb_pcimdas.c return insn->n; n 305 drivers/staging/comedi/drivers/cb_pcimdas.c return insn->n; n 94 drivers/staging/comedi/drivers/cb_pcimdda.c for (i = 0; i < insn->n; i++) { n 111 drivers/staging/comedi/drivers/cb_pcimdda.c return insn->n; n 491 drivers/staging/comedi/drivers/comedi_8254.c for (i = 0; i < insn->n; i++) n 494 drivers/staging/comedi/drivers/comedi_8254.c return insn->n; n 508 drivers/staging/comedi/drivers/comedi_8254.c if (insn->n) n 509 drivers/staging/comedi/drivers/comedi_8254.c comedi_8254_write(i8254, chan, data[insn->n - 1]); n 511 drivers/staging/comedi/drivers/comedi_8254.c return insn->n; n 552 drivers/staging/comedi/drivers/comedi_8254.c return insn->n; n 91 drivers/staging/comedi/drivers/comedi_8255.c return insn->n; n 139 drivers/staging/comedi/drivers/comedi_8255.c return insn->n; n 121 drivers/staging/comedi/drivers/comedi_bond.c return insn->n; n 165 drivers/staging/comedi/drivers/comedi_bond.c ret = insn->n; n 82 drivers/staging/comedi/drivers/comedi_parport.c return insn->n; n 104 drivers/staging/comedi/drivers/comedi_parport.c return insn->n; n 114 drivers/staging/comedi/drivers/comedi_parport.c return insn->n; n 133 drivers/staging/comedi/drivers/comedi_parport.c return insn->n; n 142 drivers/staging/comedi/drivers/comedi_parport.c return insn->n; n 423 drivers/staging/comedi/drivers/comedi_test.c for (i = 0; i < insn->n; i++) n 426 drivers/staging/comedi/drivers/comedi_test.c return insn->n; n 623 drivers/staging/comedi/drivers/comedi_test.c for (i = 0; i < insn->n; i++) n 626 drivers/staging/comedi/drivers/comedi_test.c return insn->n; n 40 drivers/staging/comedi/drivers/contec_pci_dio.c return insn->n; n 49 drivers/staging/comedi/drivers/contec_pci_dio.c return insn->n; n 78 drivers/staging/comedi/drivers/dac02.c for (i = 0; i < insn->n; i++) { n 99 drivers/staging/comedi/drivers/dac02.c return insn->n; n 348 drivers/staging/comedi/drivers/daqboard2000.c for (i = 0; i < insn->n; i++) { n 402 drivers/staging/comedi/drivers/daqboard2000.c for (i = 0; i < insn->n; i++) { n 415 drivers/staging/comedi/drivers/daqboard2000.c return insn->n; n 174 drivers/staging/comedi/drivers/das08.c int n; n 202 drivers/staging/comedi/drivers/das08.c for (n = 0; n < insn->n; n++) { n 218 drivers/staging/comedi/drivers/das08.c data[n] = (lsb >> 4) | (msb << 4); n 220 drivers/staging/comedi/drivers/das08.c data[n] = (msb << 8) + lsb; n 239 drivers/staging/comedi/drivers/das08.c data[n] = BIT(15) + magnitude; n 241 drivers/staging/comedi/drivers/das08.c data[n] = BIT(15) - magnitude; n 248 drivers/staging/comedi/drivers/das08.c return n; n 258 drivers/staging/comedi/drivers/das08.c return insn->n; n 278 drivers/staging/comedi/drivers/das08.c return insn->n; n 288 drivers/staging/comedi/drivers/das08.c return insn->n; n 300 drivers/staging/comedi/drivers/das08.c return insn->n; n 334 drivers/staging/comedi/drivers/das08.c for (i = 0; i < insn->n; i++) { n 340 drivers/staging/comedi/drivers/das08.c return insn->n; n 836 drivers/staging/comedi/drivers/das16.c for (i = 0; i < insn->n; i++) { n 853 drivers/staging/comedi/drivers/das16.c return insn->n; n 864 drivers/staging/comedi/drivers/das16.c for (i = 0; i < insn->n; i++) { n 875 drivers/staging/comedi/drivers/das16.c return insn->n; n 885 drivers/staging/comedi/drivers/das16.c return insn->n; n 898 drivers/staging/comedi/drivers/das16.c return insn->n; n 332 drivers/staging/comedi/drivers/das16m1.c for (i = 0; i < insn->n; i++) { n 348 drivers/staging/comedi/drivers/das16m1.c return insn->n; n 358 drivers/staging/comedi/drivers/das16m1.c return insn->n; n 371 drivers/staging/comedi/drivers/das16m1.c return insn->n; n 948 drivers/staging/comedi/drivers/das1800.c int n; n 966 drivers/staging/comedi/drivers/das1800.c for (n = 0; n < insn->n; n++) { n 977 drivers/staging/comedi/drivers/das1800.c data[n] = dpnt; n 981 drivers/staging/comedi/drivers/das1800.c return ret ? ret : insn->n; n 997 drivers/staging/comedi/drivers/das1800.c for (i = 0; i < insn->n; i++) { n 1018 drivers/staging/comedi/drivers/das1800.c return insn->n; n 1029 drivers/staging/comedi/drivers/das1800.c return insn->n; n 1042 drivers/staging/comedi/drivers/das1800.c return insn->n; n 400 drivers/staging/comedi/drivers/das6402.c for (i = 0; i < insn->n; i++) { n 413 drivers/staging/comedi/drivers/das6402.c return insn->n; n 448 drivers/staging/comedi/drivers/das6402.c for (i = 0; i < insn->n; i++) { n 473 drivers/staging/comedi/drivers/das6402.c return insn->n; n 499 drivers/staging/comedi/drivers/das6402.c return insn->n; n 512 drivers/staging/comedi/drivers/das6402.c return insn->n; n 549 drivers/staging/comedi/drivers/das800.c for (i = 0; i < insn->n; i++) { n 563 drivers/staging/comedi/drivers/das800.c return insn->n; n 573 drivers/staging/comedi/drivers/das800.c return insn->n; n 595 drivers/staging/comedi/drivers/das800.c return insn->n; n 219 drivers/staging/comedi/drivers/dmm32at.c for (i = 0; i < insn->n; i++) { n 230 drivers/staging/comedi/drivers/dmm32at.c return insn->n; n 459 drivers/staging/comedi/drivers/dmm32at.c for (i = 0; i < insn->n; i++) { n 479 drivers/staging/comedi/drivers/dmm32at.c return insn->n; n 454 drivers/staging/comedi/drivers/dt2801.c for (i = 0; i < insn->n; i++) { n 505 drivers/staging/comedi/drivers/dt2801.c return insn->n; n 522 drivers/staging/comedi/drivers/dt2801.c return insn->n; n 481 drivers/staging/comedi/drivers/dt2811.c for (i = 0; i < insn->n; i++) { n 492 drivers/staging/comedi/drivers/dt2811.c return insn->n; n 504 drivers/staging/comedi/drivers/dt2811.c for (i = 0; i < insn->n; i++) { n 512 drivers/staging/comedi/drivers/dt2811.c return insn->n; n 522 drivers/staging/comedi/drivers/dt2811.c return insn->n; n 535 drivers/staging/comedi/drivers/dt2811.c return insn->n; n 72 drivers/staging/comedi/drivers/dt2814.c int n, hi, lo; n 76 drivers/staging/comedi/drivers/dt2814.c for (n = 0; n < insn->n; n++) { n 88 drivers/staging/comedi/drivers/dt2814.c data[n] = (hi << 4) | (lo >> 4); n 91 drivers/staging/comedi/drivers/dt2814.c return n; n 79 drivers/staging/comedi/drivers/dt2815.c for (i = 0; i < insn->n; i++) n 94 drivers/staging/comedi/drivers/dt2815.c for (i = 0; i < insn->n; i++) { n 67 drivers/staging/comedi/drivers/dt2817.c return insn->n; n 98 drivers/staging/comedi/drivers/dt2817.c return insn->n; n 316 drivers/staging/comedi/drivers/dt282x.c static int dt282x_prep_ai_dma(struct comedi_device *dev, int dma_index, int n) n 325 drivers/staging/comedi/drivers/dt282x.c if (n == 0) n 326 drivers/staging/comedi/drivers/dt282x.c n = desc->maxsize; n 327 drivers/staging/comedi/drivers/dt282x.c if (n > devpriv->ntrig * 2) n 328 drivers/staging/comedi/drivers/dt282x.c n = devpriv->ntrig * 2; n 329 drivers/staging/comedi/drivers/dt282x.c devpriv->ntrig -= n / 2; n 331 drivers/staging/comedi/drivers/dt282x.c desc->size = n; n 336 drivers/staging/comedi/drivers/dt282x.c return n; n 339 drivers/staging/comedi/drivers/dt282x.c static int dt282x_prep_ao_dma(struct comedi_device *dev, int dma_index, int n) n 345 drivers/staging/comedi/drivers/dt282x.c desc->size = n; n 350 drivers/staging/comedi/drivers/dt282x.c return n; n 566 drivers/staging/comedi/drivers/dt282x.c static void dt282x_load_changain(struct comedi_device *dev, int n, n 572 drivers/staging/comedi/drivers/dt282x.c outw(DT2821_CHANCSR_LLE | DT2821_CHANCSR_NUMB(n), n 574 drivers/staging/comedi/drivers/dt282x.c for (i = 0; i < n; i++) { n 583 drivers/staging/comedi/drivers/dt282x.c outw(DT2821_CHANCSR_NUMB(n), dev->iobase + DT2821_CHANCSR_REG); n 638 drivers/staging/comedi/drivers/dt282x.c for (i = 0; i < insn->n; i++) { n 806 drivers/staging/comedi/drivers/dt282x.c for (i = 0; i < insn->n; i++) { n 822 drivers/staging/comedi/drivers/dt282x.c return insn->n; n 971 drivers/staging/comedi/drivers/dt282x.c return insn->n; n 1001 drivers/staging/comedi/drivers/dt282x.c return insn->n; n 518 drivers/staging/comedi/drivers/dt3000.c for (i = 0; i < insn->n; i++) n 533 drivers/staging/comedi/drivers/dt3000.c for (i = 0; i < insn->n; i++) { n 539 drivers/staging/comedi/drivers/dt3000.c return insn->n; n 574 drivers/staging/comedi/drivers/dt3000.c return insn->n; n 587 drivers/staging/comedi/drivers/dt3000.c return insn->n; n 598 drivers/staging/comedi/drivers/dt3000.c for (i = 0; i < insn->n; i++) { n 556 drivers/staging/comedi/drivers/dt9812.c return insn->n; n 569 drivers/staging/comedi/drivers/dt9812.c return insn->n; n 582 drivers/staging/comedi/drivers/dt9812.c for (i = 0; i < insn->n; i++) { n 589 drivers/staging/comedi/drivers/dt9812.c return insn->n; n 615 drivers/staging/comedi/drivers/dt9812.c for (i = 0; i < insn->n; i++) { n 626 drivers/staging/comedi/drivers/dt9812.c return insn->n; n 68 drivers/staging/comedi/drivers/dyna_pci10xx.c int n; n 79 drivers/staging/comedi/drivers/dyna_pci10xx.c for (n = 0; n < insn->n; n++) { n 93 drivers/staging/comedi/drivers/dyna_pci10xx.c data[n] = d; n 98 drivers/staging/comedi/drivers/dyna_pci10xx.c return ret ? ret : n; n 108 drivers/staging/comedi/drivers/dyna_pci10xx.c int n; n 111 drivers/staging/comedi/drivers/dyna_pci10xx.c for (n = 0; n < insn->n; n++) { n 114 drivers/staging/comedi/drivers/dyna_pci10xx.c outw_p(data[n], dev->iobase); n 118 drivers/staging/comedi/drivers/dyna_pci10xx.c return n; n 139 drivers/staging/comedi/drivers/dyna_pci10xx.c return insn->n; n 159 drivers/staging/comedi/drivers/dyna_pci10xx.c return insn->n; n 61 drivers/staging/comedi/drivers/fl512.c for (i = 0; i < insn->n; i++) { n 74 drivers/staging/comedi/drivers/fl512.c return insn->n; n 86 drivers/staging/comedi/drivers/fl512.c for (i = 0; i < insn->n; i++) { n 96 drivers/staging/comedi/drivers/fl512.c return insn->n; n 476 drivers/staging/comedi/drivers/gsc_hpdi.c return insn->n; n 111 drivers/staging/comedi/drivers/icp_multi.c int n; n 123 drivers/staging/comedi/drivers/icp_multi.c for (n = 0; n < insn->n; n++) { n 135 drivers/staging/comedi/drivers/icp_multi.c data[n] = (readw(dev->mmio + ICP_MULTI_AI) >> 4) & 0x0fff; n 138 drivers/staging/comedi/drivers/icp_multi.c return ret ? ret : n; n 169 drivers/staging/comedi/drivers/icp_multi.c for (i = 0; i < insn->n; i++) { n 187 drivers/staging/comedi/drivers/icp_multi.c return insn->n; n 197 drivers/staging/comedi/drivers/icp_multi.c return insn->n; n 210 drivers/staging/comedi/drivers/icp_multi.c return insn->n; n 152 drivers/staging/comedi/drivers/ii_pci20kc.c for (i = 0; i < insn->n; i++) { n 165 drivers/staging/comedi/drivers/ii_pci20kc.c return insn->n; n 233 drivers/staging/comedi/drivers/ii_pci20kc.c for (i = 0; i < insn->n; i++) { n 250 drivers/staging/comedi/drivers/ii_pci20kc.c return insn->n; n 341 drivers/staging/comedi/drivers/ii_pci20kc.c return insn->n; n 372 drivers/staging/comedi/drivers/ii_pci20kc.c return insn->n; n 286 drivers/staging/comedi/drivers/jr3_pci.c for (i = 0; i < insn->n; i++) n 289 drivers/staging/comedi/drivers/jr3_pci.c return insn->n; n 50 drivers/staging/comedi/drivers/ke_counter.c for (i = 0; i < insn->n; i++) { n 60 drivers/staging/comedi/drivers/ke_counter.c return insn->n; n 72 drivers/staging/comedi/drivers/ke_counter.c for (i = 0; i < insn->n; i++) { n 84 drivers/staging/comedi/drivers/ke_counter.c return insn->n; n 145 drivers/staging/comedi/drivers/ke_counter.c return insn->n; n 158 drivers/staging/comedi/drivers/ke_counter.c return insn->n; n 507 drivers/staging/comedi/drivers/me4000.c for (i = 0; i < insn->n; i++) { n 523 drivers/staging/comedi/drivers/me4000.c return ret ? ret : insn->n; n 1038 drivers/staging/comedi/drivers/me4000.c return insn->n; n 1092 drivers/staging/comedi/drivers/me4000.c return insn->n; n 179 drivers/staging/comedi/drivers/me_daq.c return insn->n; n 212 drivers/staging/comedi/drivers/me_daq.c return insn->n; n 272 drivers/staging/comedi/drivers/me_daq.c for (i = 0; i < insn->n; i++) { n 292 drivers/staging/comedi/drivers/me_daq.c return ret ? ret : insn->n; n 326 drivers/staging/comedi/drivers/me_daq.c for (i = 0; i < insn->n; i++) { n 336 drivers/staging/comedi/drivers/me_daq.c return insn->n; n 91 drivers/staging/comedi/drivers/mf6x4.c return insn->n; n 104 drivers/staging/comedi/drivers/mf6x4.c return insn->n; n 134 drivers/staging/comedi/drivers/mf6x4.c for (i = 0; i < insn->n; i++) { n 151 drivers/staging/comedi/drivers/mf6x4.c return insn->n; n 170 drivers/staging/comedi/drivers/mf6x4.c for (i = 0; i < insn->n; i++) { n 176 drivers/staging/comedi/drivers/mf6x4.c return insn->n; n 215 drivers/staging/comedi/drivers/mpc624.c for (i = 0; i < insn->n; i++) { n 232 drivers/staging/comedi/drivers/mpc624.c return insn->n; n 111 drivers/staging/comedi/drivers/multiq3.c for (i = 0; i < insn->n; i++) { n 128 drivers/staging/comedi/drivers/multiq3.c return insn->n; n 140 drivers/staging/comedi/drivers/multiq3.c for (i = 0; i < insn->n; i++) { n 149 drivers/staging/comedi/drivers/multiq3.c return insn->n; n 158 drivers/staging/comedi/drivers/multiq3.c return insn->n; n 171 drivers/staging/comedi/drivers/multiq3.c return insn->n; n 183 drivers/staging/comedi/drivers/multiq3.c for (i = 0; i < insn->n; i++) { n 217 drivers/staging/comedi/drivers/multiq3.c return insn->n; n 248 drivers/staging/comedi/drivers/multiq3.c return insn->n; n 141 drivers/staging/comedi/drivers/ni_6527.c return insn->n; n 157 drivers/staging/comedi/drivers/ni_6527.c return insn->n; n 184 drivers/staging/comedi/drivers/ni_6527.c return insn->n; n 268 drivers/staging/comedi/drivers/ni_6527.c return insn->n; n 316 drivers/staging/comedi/drivers/ni_6527.c if (insn->n != 3) n 354 drivers/staging/comedi/drivers/ni_6527.c return insn->n; n 411 drivers/staging/comedi/drivers/ni_65xx.c return insn->n; n 467 drivers/staging/comedi/drivers/ni_65xx.c return insn->n; n 556 drivers/staging/comedi/drivers/ni_65xx.c return insn->n; n 567 drivers/staging/comedi/drivers/ni_65xx.c if (insn->n != 3) n 602 drivers/staging/comedi/drivers/ni_65xx.c return insn->n; n 558 drivers/staging/comedi/drivers/ni_660x.c return insn->n; n 733 drivers/staging/comedi/drivers/ni_660x.c return insn->n; n 91 drivers/staging/comedi/drivers/ni_670x.c for (i = 0; i < insn->n; i++) { n 101 drivers/staging/comedi/drivers/ni_670x.c return insn->n; n 114 drivers/staging/comedi/drivers/ni_670x.c return insn->n; n 130 drivers/staging/comedi/drivers/ni_670x.c return insn->n; n 582 drivers/staging/comedi/drivers/ni_at_a2150.c unsigned int n; n 613 drivers/staging/comedi/drivers/ni_at_a2150.c for (n = 0; n < 36; n++) { n 622 drivers/staging/comedi/drivers/ni_at_a2150.c for (n = 0; n < insn->n; n++) { n 627 drivers/staging/comedi/drivers/ni_at_a2150.c data[n] = inw(dev->iobase + FIFO_DATA_REG); n 628 drivers/staging/comedi/drivers/ni_at_a2150.c data[n] ^= 0x8000; n 634 drivers/staging/comedi/drivers/ni_at_a2150.c return n; n 138 drivers/staging/comedi/drivers/ni_at_ao.c for (i = 0; i < insn->n; i++) { n 150 drivers/staging/comedi/drivers/ni_at_ao.c return insn->n; n 163 drivers/staging/comedi/drivers/ni_at_ao.c return insn->n; n 196 drivers/staging/comedi/drivers/ni_at_ao.c return insn->n; n 239 drivers/staging/comedi/drivers/ni_at_ao.c if (insn->n) { n 240 drivers/staging/comedi/drivers/ni_at_ao.c unsigned int val = data[insn->n - 1]; n 262 drivers/staging/comedi/drivers/ni_at_ao.c return insn->n; n 489 drivers/staging/comedi/drivers/ni_atmio16d.c for (i = 0; i < insn->n; i++) { n 524 drivers/staging/comedi/drivers/ni_atmio16d.c for (i = 0; i < insn->n; i++) { n 535 drivers/staging/comedi/drivers/ni_atmio16d.c return insn->n; n 548 drivers/staging/comedi/drivers/ni_atmio16d.c return insn->n; n 577 drivers/staging/comedi/drivers/ni_atmio16d.c return insn->n; n 93 drivers/staging/comedi/drivers/ni_daq_700.c return insn->n; n 110 drivers/staging/comedi/drivers/ni_daq_700.c return insn->n; n 135 drivers/staging/comedi/drivers/ni_daq_700.c int n; n 158 drivers/staging/comedi/drivers/ni_daq_700.c for (n = 0; n < insn->n; n++) { n 179 drivers/staging/comedi/drivers/ni_daq_700.c data[n] = d; n 181 drivers/staging/comedi/drivers/ni_daq_700.c return n; n 258 drivers/staging/comedi/drivers/ni_labpc_common.c for (i = 0; i < insn->n; i++) { n 269 drivers/staging/comedi/drivers/ni_labpc_common.c return insn->n; n 937 drivers/staging/comedi/drivers/ni_labpc_common.c for (i = 0; i < insn->n; i++) n 940 drivers/staging/comedi/drivers/ni_labpc_common.c return insn->n; n 1132 drivers/staging/comedi/drivers/ni_labpc_common.c if (insn->n > 0) { n 1133 drivers/staging/comedi/drivers/ni_labpc_common.c unsigned int val = data[insn->n - 1]; n 1141 drivers/staging/comedi/drivers/ni_labpc_common.c return insn->n; n 1174 drivers/staging/comedi/drivers/ni_labpc_common.c if (insn->n > 0) { n 1175 drivers/staging/comedi/drivers/ni_labpc_common.c unsigned int val = data[insn->n - 1]; n 1185 drivers/staging/comedi/drivers/ni_labpc_common.c return insn->n; n 949 drivers/staging/comedi/drivers/ni_mio_common.c struct comedi_subdevice *s, int n) n 956 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < n; i++) { n 1040 drivers/staging/comedi/drivers/ni_mio_common.c struct comedi_subdevice *s, int n) n 1049 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < n / 2; i++) { n 1058 drivers/staging/comedi/drivers/ni_mio_common.c if (n % 2) { n 1068 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < n / 2; i++) { n 1076 drivers/staging/comedi/drivers/ni_mio_common.c if (n % 2) { n 1085 drivers/staging/comedi/drivers/ni_mio_common.c if (n > ARRAY_SIZE(devpriv->ai_fifo_buffer)) { n 1091 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < n; i++) { n 1095 drivers/staging/comedi/drivers/ni_mio_common.c comedi_buf_write_samples(s, devpriv->ai_fifo_buffer, n); n 1103 drivers/staging/comedi/drivers/ni_mio_common.c int n; n 1105 drivers/staging/comedi/drivers/ni_mio_common.c n = board->ai_fifo_depth / 2; n 1107 drivers/staging/comedi/drivers/ni_mio_common.c ni_ai_fifo_read(dev, s, n); n 1829 drivers/staging/comedi/drivers/ni_mio_common.c int i, n; n 1839 drivers/staging/comedi/drivers/ni_mio_common.c for (n = 0; n < num_adc_stages_611x; n++) { n 1844 drivers/staging/comedi/drivers/ni_mio_common.c for (n = 0; n < insn->n; n++) { n 1870 drivers/staging/comedi/drivers/ni_mio_common.c data[n] = d & 0xffff; n 1873 drivers/staging/comedi/drivers/ni_mio_common.c for (n = 0; n < insn->n; n++) { n 1898 drivers/staging/comedi/drivers/ni_mio_common.c data[n] = (((d >> 16) & 0xFFFF) + signbits) & 0xFFFF; n 1901 drivers/staging/comedi/drivers/ni_mio_common.c for (n = 0; n < insn->n; n++) { n 1916 drivers/staging/comedi/drivers/ni_mio_common.c data[n] = d; n 1920 drivers/staging/comedi/drivers/ni_mio_common.c data[n] = d & 0xffff; n 1924 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 2468 drivers/staging/comedi/drivers/ni_mio_common.c if (insn->n < 1) n 2700 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < insn->n; i++) { n 2731 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 3464 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 3487 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 3513 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 3526 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 3853 drivers/staging/comedi/drivers/ni_mio_common.c int err = insn->n; n 3856 drivers/staging/comedi/drivers/ni_mio_common.c if (insn->n != 2) n 3931 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4088 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < insn->n; i++) n 4091 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4101 drivers/staging/comedi/drivers/ni_mio_common.c if (insn->n) { n 4102 drivers/staging/comedi/drivers/ni_mio_common.c unsigned int val = data[insn->n - 1]; n 4114 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4150 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4402 drivers/staging/comedi/drivers/ni_mio_common.c if (insn->n) { n 4405 drivers/staging/comedi/drivers/ni_mio_common.c data[insn->n - 1]); n 4408 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4419 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < insn->n; i++) n 4422 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4518 drivers/staging/comedi/drivers/ni_mio_common.c if (insn->n) { n 4520 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < insn->n; i++) n 4523 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4534 drivers/staging/comedi/drivers/ni_mio_common.c for (i = 0; i < insn->n; i++) n 4537 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4690 drivers/staging/comedi/drivers/ni_mio_common.c if (insn->n < 1) n 4731 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 4812 drivers/staging/comedi/drivers/ni_mio_common.c int n, retval; n 4829 drivers/staging/comedi/drivers/ni_mio_common.c for (n = 0; n < insn->n; n++) { n 4833 drivers/staging/comedi/drivers/ni_mio_common.c data[n] = sample; n 4835 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 5244 drivers/staging/comedi/drivers/ni_mio_common.c return insn->n; n 489 drivers/staging/comedi/drivers/ni_pcidio.c return insn->n; n 502 drivers/staging/comedi/drivers/ni_pcidio.c return insn->n; n 852 drivers/staging/comedi/drivers/ni_pcidio.c int n; n 858 drivers/staging/comedi/drivers/ni_pcidio.c for (n = 2; n >= 0; n--) { n 860 drivers/staging/comedi/drivers/ni_pcidio.c fw_file[n], n 861 drivers/staging/comedi/drivers/ni_pcidio.c pci_6534_load_fpga, n); n 862 drivers/staging/comedi/drivers/ni_pcidio.c if (ret == 0 && n == 2) n 272 drivers/staging/comedi/drivers/ni_routes.c unsigned int n; n 284 drivers/staging/comedi/drivers/ni_routes.c for (n = 0; n < valid_routes->n_route_sets; ++n) { n 285 drivers/staging/comedi/drivers/ni_routes.c struct ni_route_set *rs = &valid_routes->routes[n]; n 293 drivers/staging/comedi/drivers/ni_routes.c sort(valid_routes->routes[n].src, valid_routes->routes[n].n_src, n 1500 drivers/staging/comedi/drivers/ni_tio.c return ret ? ret : insn->n; n 1644 drivers/staging/comedi/drivers/ni_tio.c for (i = 0; i < insn->n; i++) { n 1659 drivers/staging/comedi/drivers/ni_tio.c return insn->n; n 1686 drivers/staging/comedi/drivers/ni_tio.c if (insn->n < 1) n 1688 drivers/staging/comedi/drivers/ni_tio.c load_val = data[insn->n - 1]; n 1719 drivers/staging/comedi/drivers/ni_tio.c return insn->n; n 352 drivers/staging/comedi/drivers/ni_usb6501.c return insn->n; n 386 drivers/staging/comedi/drivers/ni_usb6501.c return insn->n; n 414 drivers/staging/comedi/drivers/ni_usb6501.c return ret ? ret : insn->n; n 426 drivers/staging/comedi/drivers/ni_usb6501.c for (i = 0; i < insn->n; i++) { n 433 drivers/staging/comedi/drivers/ni_usb6501.c return insn->n; n 443 drivers/staging/comedi/drivers/ni_usb6501.c if (insn->n) { n 444 drivers/staging/comedi/drivers/ni_usb6501.c u32 val = data[insn->n - 1]; n 451 drivers/staging/comedi/drivers/ni_usb6501.c return insn->n; n 260 drivers/staging/comedi/drivers/pcl711.c for (i = 0; i < insn->n; i++) { n 270 drivers/staging/comedi/drivers/pcl711.c return insn->n; n 373 drivers/staging/comedi/drivers/pcl711.c for (i = 0; i < insn->n; i++) { n 379 drivers/staging/comedi/drivers/pcl711.c return insn->n; n 394 drivers/staging/comedi/drivers/pcl711.c return insn->n; n 414 drivers/staging/comedi/drivers/pcl711.c return insn->n; n 154 drivers/staging/comedi/drivers/pcl726.c return insn->n; n 241 drivers/staging/comedi/drivers/pcl726.c for (i = 0; i < insn->n; i++) { n 255 drivers/staging/comedi/drivers/pcl726.c return insn->n; n 276 drivers/staging/comedi/drivers/pcl726.c return insn->n; n 305 drivers/staging/comedi/drivers/pcl726.c return insn->n; n 234 drivers/staging/comedi/drivers/pcl730.c return insn->n; n 261 drivers/staging/comedi/drivers/pcl730.c return insn->n; n 923 drivers/staging/comedi/drivers/pcl812.c for (i = 0; i < insn->n; i++) { n 937 drivers/staging/comedi/drivers/pcl812.c return ret ? ret : insn->n; n 949 drivers/staging/comedi/drivers/pcl812.c for (i = 0; i < insn->n; i++) { n 956 drivers/staging/comedi/drivers/pcl812.c return insn->n; n 967 drivers/staging/comedi/drivers/pcl812.c return insn->n; n 982 drivers/staging/comedi/drivers/pcl812.c return insn->n; n 517 drivers/staging/comedi/drivers/pcl816.c for (i = 0; i < insn->n; i++) { n 530 drivers/staging/comedi/drivers/pcl816.c return ret ? ret : insn->n; n 541 drivers/staging/comedi/drivers/pcl816.c return insn->n; n 556 drivers/staging/comedi/drivers/pcl816.c return insn->n; n 815 drivers/staging/comedi/drivers/pcl818.c for (i = 0; i < insn->n; i++) { n 827 drivers/staging/comedi/drivers/pcl818.c return ret ? ret : insn->n; n 839 drivers/staging/comedi/drivers/pcl818.c for (i = 0; i < insn->n; i++) { n 848 drivers/staging/comedi/drivers/pcl818.c return insn->n; n 859 drivers/staging/comedi/drivers/pcl818.c return insn->n; n 874 drivers/staging/comedi/drivers/pcl818.c return insn->n; n 185 drivers/staging/comedi/drivers/pcm3724.c return insn->n; n 78 drivers/staging/comedi/drivers/pcmad.c for (i = 0; i < insn->n; i++) { n 100 drivers/staging/comedi/drivers/pcmad.c return insn->n; n 69 drivers/staging/comedi/drivers/pcmda12.c for (i = 0; i < insn->n; ++i) { n 83 drivers/staging/comedi/drivers/pcmda12.c return insn->n; n 274 drivers/staging/comedi/drivers/pcmmio.c return insn->n; n 293 drivers/staging/comedi/drivers/pcmmio.c return insn->n; n 580 drivers/staging/comedi/drivers/pcmmio.c for (i = 0; i < insn->n; i++) { n 597 drivers/staging/comedi/drivers/pcmmio.c return insn->n; n 646 drivers/staging/comedi/drivers/pcmmio.c for (i = 0; i < insn->n; i++) { n 662 drivers/staging/comedi/drivers/pcmmio.c return insn->n; n 244 drivers/staging/comedi/drivers/pcmuio.c return insn->n; n 263 drivers/staging/comedi/drivers/pcmuio.c return insn->n; n 146 drivers/staging/comedi/drivers/plx9080.h #define PLX_BIGEND_DMA(n) ((n) ? PLX_BIGEND_DMA1 : PLX_BIGEND_DMA0) n 300 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_MBOX(n) (0x0040 + (n) * 4) n 311 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_ALT_MBOX(n) ((n) < 2 ? 0x0078 + (n) * 4 : PLX_REG_MBOX(n)) n 357 drivers/staging/comedi/drivers/plx9080.h #define PLX_INTCSR_DMAIEN(n) ((n) ? PLX_INTCSR_DMA1IEN : PLX_INTCSR_DMA0IEN) n 365 drivers/staging/comedi/drivers/plx9080.h #define PLX_INTCSR_DMAIA(n) ((n) ? PLX_INTCSR_DMA1IA : PLX_INTCSR_DMA0IA) n 375 drivers/staging/comedi/drivers/plx9080.h #define PLX_INTCSR_ABNOTDMA(n) ((n) ? PLX_INTCSR_ABNOTDMA1 \ n 388 drivers/staging/comedi/drivers/plx9080.h #define PLX_INTCSR_MBIA(n) BIT(28 + (n)) n 456 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_DMAMODE(n) ((n) ? PLX_REG_DMAMODE1 : PLX_REG_DMAMODE0) n 496 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_DMAPADR(n) ((n) ? PLX_REG_DMAPADR1 : PLX_REG_DMAPADR0) n 501 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_DMALADR(n) ((n) ? PLX_REG_DMALADR1 : PLX_REG_DMALADR0) n 506 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_DMASIZ(n) ((n) ? PLX_REG_DMASIZ1 : PLX_REG_DMASIZ0) n 511 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_DMADPR(n) ((n) ? PLX_REG_DMADPR1 : PLX_REG_DMADPR0) n 527 drivers/staging/comedi/drivers/plx9080.h #define PLX_REG_DMACSR(n) ((n) ? PLX_REG_DMACSR1 : PLX_REG_DMACSR0) n 325 drivers/staging/comedi/drivers/quatech_daqp_cs.c for (i = 0; i < insn->n; i++) { n 344 drivers/staging/comedi/drivers/quatech_daqp_cs.c return ret ? ret : insn->n; n 646 drivers/staging/comedi/drivers/quatech_daqp_cs.c for (i = 0; i < insn->n; i++) { n 662 drivers/staging/comedi/drivers/quatech_daqp_cs.c return insn->n; n 677 drivers/staging/comedi/drivers/quatech_daqp_cs.c return insn->n; n 695 drivers/staging/comedi/drivers/quatech_daqp_cs.c return insn->n; n 539 drivers/staging/comedi/drivers/rtd520.c int n; n 551 drivers/staging/comedi/drivers/rtd520.c for (n = 0; n < insn->n; n++) { n 568 drivers/staging/comedi/drivers/rtd520.c data[n] = d & s->maxdata; n 572 drivers/staging/comedi/drivers/rtd520.c return n; n 1023 drivers/staging/comedi/drivers/rtd520.c for (i = 0; i < insn->n; ++i) { n 1045 drivers/staging/comedi/drivers/rtd520.c return insn->n; n 1058 drivers/staging/comedi/drivers/rtd520.c return insn->n; n 1085 drivers/staging/comedi/drivers/rtd520.c return insn->n; n 1160 drivers/staging/comedi/drivers/rtd520.c return insn->n; n 173 drivers/staging/comedi/drivers/rti800.c if (insn->n > 0) { n 182 drivers/staging/comedi/drivers/rti800.c for (i = 0; i < insn->n; i++) { n 200 drivers/staging/comedi/drivers/rti800.c return insn->n; n 214 drivers/staging/comedi/drivers/rti800.c for (i = 0; i < insn->n; i++) { n 226 drivers/staging/comedi/drivers/rti800.c return insn->n; n 235 drivers/staging/comedi/drivers/rti800.c return insn->n; n 250 drivers/staging/comedi/drivers/rti800.c return insn->n; n 52 drivers/staging/comedi/drivers/rti802.c for (i = 0; i < insn->n; i++) { n 65 drivers/staging/comedi/drivers/rti802.c return insn->n; n 203 drivers/staging/comedi/drivers/s526.c for (i = 0; i < insn->n; i++) n 206 drivers/staging/comedi/drivers/s526.c return insn->n; n 377 drivers/staging/comedi/drivers/s526.c return insn->n; n 413 drivers/staging/comedi/drivers/s526.c return insn->n; n 455 drivers/staging/comedi/drivers/s526.c for (i = 0; i < insn->n; i++) { n 469 drivers/staging/comedi/drivers/s526.c return insn->n; n 486 drivers/staging/comedi/drivers/s526.c for (i = 0; i < insn->n; i++) { n 498 drivers/staging/comedi/drivers/s526.c return insn->n; n 511 drivers/staging/comedi/drivers/s526.c return insn->n; n 547 drivers/staging/comedi/drivers/s526.c return insn->n; n 1283 drivers/staging/comedi/drivers/s626.c u16 n; n 1431 drivers/staging/comedi/drivers/s626.c for (n = 0; n < (2 * S626_RPSCLK_PER_US); n++) n 1496 drivers/staging/comedi/drivers/s626.c int n; n 1513 drivers/staging/comedi/drivers/s626.c for (n = 0; n < insn->n; n++) { n 1539 drivers/staging/comedi/drivers/s626.c if (n != 0) { n 1541 drivers/staging/comedi/drivers/s626.c data[n - 1] = s626_ai_reg_to_uint(tmp); n 1579 drivers/staging/comedi/drivers/s626.c if (n != 0) { n 1581 drivers/staging/comedi/drivers/s626.c data[n - 1] = s626_ai_reg_to_uint(tmp); n 1584 drivers/staging/comedi/drivers/s626.c return n; n 1589 drivers/staging/comedi/drivers/s626.c int n; n 1591 drivers/staging/comedi/drivers/s626.c for (n = 0; n < cmd->chanlist_len; n++) { n 1592 drivers/staging/comedi/drivers/s626.c if (CR_RANGE(cmd->chanlist[n]) == 0) n 1593 drivers/staging/comedi/drivers/s626.c ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_5V; n 1595 drivers/staging/comedi/drivers/s626.c ppl[n] = CR_CHAN(cmd->chanlist[n]) | S626_RANGE_10V; n 1597 drivers/staging/comedi/drivers/s626.c if (n != 0) n 1598 drivers/staging/comedi/drivers/s626.c ppl[n - 1] |= S626_EOPL; n 1600 drivers/staging/comedi/drivers/s626.c return n; n 1929 drivers/staging/comedi/drivers/s626.c for (i = 0; i < insn->n; i++) { n 1942 drivers/staging/comedi/drivers/s626.c return insn->n; n 1985 drivers/staging/comedi/drivers/s626.c return insn->n; n 2002 drivers/staging/comedi/drivers/s626.c return insn->n; n 2045 drivers/staging/comedi/drivers/s626.c return insn->n; n 2057 drivers/staging/comedi/drivers/s626.c for (i = 0; i < insn->n; i++) { n 2069 drivers/staging/comedi/drivers/s626.c return insn->n; n 77 drivers/staging/comedi/drivers/ssv_dnp.c return insn->n; n 121 drivers/staging/comedi/drivers/ssv_dnp.c return insn->n; n 761 drivers/staging/comedi/drivers/usbdux.c for (i = 0; i < insn->n; i++) { n 778 drivers/staging/comedi/drivers/usbdux.c return ret ? ret : insn->n; n 817 drivers/staging/comedi/drivers/usbdux.c for (i = 0; i < insn->n; i++) { n 833 drivers/staging/comedi/drivers/usbdux.c return ret ? ret : insn->n; n 1015 drivers/staging/comedi/drivers/usbdux.c return insn->n; n 1050 drivers/staging/comedi/drivers/usbdux.c return ret ? ret : insn->n; n 1065 drivers/staging/comedi/drivers/usbdux.c for (i = 0; i < insn->n; i++) { n 1079 drivers/staging/comedi/drivers/usbdux.c return ret ? ret : insn->n; n 1097 drivers/staging/comedi/drivers/usbdux.c for (i = 0; i < insn->n; i++) { n 1107 drivers/staging/comedi/drivers/usbdux.c return ret ? ret : insn->n; n 1312 drivers/staging/comedi/drivers/usbdux.c if (insn->n != 1) n 1321 drivers/staging/comedi/drivers/usbdux.c return insn->n; n 770 drivers/staging/comedi/drivers/usbduxfast.c int i, j, n, actual_length; n 815 drivers/staging/comedi/drivers/usbduxfast.c for (i = 0; i < insn->n;) { n 824 drivers/staging/comedi/drivers/usbduxfast.c n = actual_length / sizeof(u16); n 825 drivers/staging/comedi/drivers/usbduxfast.c if ((n % 16) != 0) { n 830 drivers/staging/comedi/drivers/usbduxfast.c for (j = chan; (j < n) && (i < insn->n); j = j + 16) { n 838 drivers/staging/comedi/drivers/usbduxfast.c return insn->n; n 729 drivers/staging/comedi/drivers/usbduxsigma.c for (i = 0; i < insn->n; i++) { n 746 drivers/staging/comedi/drivers/usbduxsigma.c return insn->n; n 780 drivers/staging/comedi/drivers/usbduxsigma.c for (i = 0; i < insn->n; i++) { n 793 drivers/staging/comedi/drivers/usbduxsigma.c return insn->n; n 946 drivers/staging/comedi/drivers/usbduxsigma.c return insn->n; n 981 drivers/staging/comedi/drivers/usbduxsigma.c ret = insn->n; n 1155 drivers/staging/comedi/drivers/usbduxsigma.c if (insn->n != 1) n 1164 drivers/staging/comedi/drivers/usbduxsigma.c return insn->n; n 241 drivers/staging/comedi/drivers/vmk80xx.c int n; n 262 drivers/staging/comedi/drivers/vmk80xx.c for (n = 0; n < insn->n; n++) { n 267 drivers/staging/comedi/drivers/vmk80xx.c data[n] = devpriv->usb_rx_buf[reg[0]]; n 272 drivers/staging/comedi/drivers/vmk80xx.c data[n] = devpriv->usb_rx_buf[reg[0]] + 256 * n 278 drivers/staging/comedi/drivers/vmk80xx.c return n; n 290 drivers/staging/comedi/drivers/vmk80xx.c int n; n 310 drivers/staging/comedi/drivers/vmk80xx.c for (n = 0; n < insn->n; n++) { n 311 drivers/staging/comedi/drivers/vmk80xx.c devpriv->usb_tx_buf[reg] = data[n]; n 319 drivers/staging/comedi/drivers/vmk80xx.c return n; n 330 drivers/staging/comedi/drivers/vmk80xx.c int n; n 339 drivers/staging/comedi/drivers/vmk80xx.c for (n = 0; n < insn->n; n++) { n 343 drivers/staging/comedi/drivers/vmk80xx.c data[n] = devpriv->usb_rx_buf[reg + chan]; n 348 drivers/staging/comedi/drivers/vmk80xx.c return n; n 431 drivers/staging/comedi/drivers/vmk80xx.c return ret ? ret : insn->n; n 442 drivers/staging/comedi/drivers/vmk80xx.c int n; n 462 drivers/staging/comedi/drivers/vmk80xx.c for (n = 0; n < insn->n; n++) { n 467 drivers/staging/comedi/drivers/vmk80xx.c data[n] = devpriv->usb_rx_buf[reg[0]]; n 469 drivers/staging/comedi/drivers/vmk80xx.c data[n] = devpriv->usb_rx_buf[reg[0] * (chan + 1) + 1] n 475 drivers/staging/comedi/drivers/vmk80xx.c return n; n 512 drivers/staging/comedi/drivers/vmk80xx.c return ret ? ret : insn->n; n 525 drivers/staging/comedi/drivers/vmk80xx.c int n; n 535 drivers/staging/comedi/drivers/vmk80xx.c for (n = 0; n < insn->n; n++) { n 536 drivers/staging/comedi/drivers/vmk80xx.c debtime = data[n]; n 556 drivers/staging/comedi/drivers/vmk80xx.c return n; n 568 drivers/staging/comedi/drivers/vmk80xx.c int n; n 580 drivers/staging/comedi/drivers/vmk80xx.c for (n = 0; n < insn->n; n++) { n 584 drivers/staging/comedi/drivers/vmk80xx.c data[n] = rx_buf[reg[0]] + 4 * rx_buf[reg[1]]; n 589 drivers/staging/comedi/drivers/vmk80xx.c return n; n 601 drivers/staging/comedi/drivers/vmk80xx.c int n; n 625 drivers/staging/comedi/drivers/vmk80xx.c for (n = 0; n < insn->n; n++) { n 626 drivers/staging/comedi/drivers/vmk80xx.c tx_buf[reg[0]] = (unsigned char)(data[n] & 0x03); n 627 drivers/staging/comedi/drivers/vmk80xx.c tx_buf[reg[1]] = (unsigned char)(data[n] >> 2) & 0xff; n 635 drivers/staging/comedi/drivers/vmk80xx.c return n; n 138 drivers/staging/comedi/kcomedilib/kcomedilib_main.c insn.n = 2; n 157 drivers/staging/comedi/kcomedilib/kcomedilib_main.c insn.n = 1; n 183 drivers/staging/comedi/kcomedilib/kcomedilib_main.c insn.n = 2; n 232 drivers/staging/comedi/kcomedilib/kcomedilib_main.c int n; n 236 drivers/staging/comedi/kcomedilib/kcomedilib_main.c n = 0; n 238 drivers/staging/comedi/kcomedilib/kcomedilib_main.c n = dev->subdevices[subdevice].n_chan; n 241 drivers/staging/comedi/kcomedilib/kcomedilib_main.c return n; n 108 drivers/staging/comedi/range.c int comedi_check_chanlist(struct comedi_subdevice *s, int n, n 115 drivers/staging/comedi/range.c for (i = 0; i < n; i++) { n 141 drivers/staging/exfat/exfat_core.c sector_t s, n; n 149 drivers/staging/exfat/exfat_core.c n = p_fs->data_start_sector; n 152 drivers/staging/exfat/exfat_core.c n = s + p_fs->sectors_per_clu; n 155 drivers/staging/exfat/exfat_core.c for (; s < n; s++) { n 243 drivers/staging/exfat/exfat_nls.c int i = 0, j, n = 0; n 252 drivers/staging/exfat/exfat_nls.c n++; n 255 drivers/staging/exfat/exfat_nls.c for (; i < 8; i++, n++) { n 261 drivers/staging/exfat/exfat_nls.c *(buf + n) = *(dosname + i) + ('a' - 'A'); n 263 drivers/staging/exfat/exfat_nls.c *(buf + n) = *(dosname + i); n 266 drivers/staging/exfat/exfat_nls.c *(buf + n) = '.'; n 267 drivers/staging/exfat/exfat_nls.c n++; n 270 drivers/staging/exfat/exfat_nls.c for (i = 8; i < DOS_NAME_LENGTH; i++, n++) { n 276 drivers/staging/exfat/exfat_nls.c *(buf + n) = *(dosname + i) + ('a' - 'A'); n 278 drivers/staging/exfat/exfat_nls.c *(buf + n) = *(dosname + i); n 280 drivers/staging/exfat/exfat_nls.c *(buf + n) = '\0'; n 49 drivers/staging/fieldbus/dev_core.c const char *buf, size_t n) n 63 drivers/staging/fieldbus/dev_core.c return n; n 138 drivers/staging/fieldbus/dev_core.c int n) n 139 drivers/staging/fwserial/dma_fifo.c int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n) n 148 drivers/staging/fwserial/dma_fifo.c if (n > fifo->avail) n 149 drivers/staging/fwserial/dma_fifo.c n = fifo->avail; n 150 drivers/staging/fwserial/dma_fifo.c if (n <= 0) n 154 drivers/staging/fwserial/dma_fifo.c l = min(n, fifo->capacity - ofs); n 156 drivers/staging/fwserial/dma_fifo.c memcpy(fifo->data, src + l, n - l); n 158 drivers/staging/fwserial/dma_fifo.c if (FAIL(fifo, addr_check(fifo->done, fifo->in, fifo->in + n) || n 159 drivers/staging/fwserial/dma_fifo.c fifo->avail < n, n 161 drivers/staging/fwserial/dma_fifo.c fifo->in, fifo->out, fifo->done, n, fifo->avail)) n 164 drivers/staging/fwserial/dma_fifo.c fifo->in += n; n 165 drivers/staging/fwserial/dma_fifo.c fifo->avail -= n; n 168 drivers/staging/fwserial/dma_fifo.c fifo->done, n, fifo->avail); n 170 drivers/staging/fwserial/dma_fifo.c return n; n 184 drivers/staging/fwserial/dma_fifo.c unsigned int len, n, ofs, l, limit; n 201 drivers/staging/fwserial/dma_fifo.c n = len; n 205 drivers/staging/fwserial/dma_fifo.c if (n > limit) { n 206 drivers/staging/fwserial/dma_fifo.c n = limit; n 208 drivers/staging/fwserial/dma_fifo.c } else if (ofs + n > fifo->guard) { n 212 drivers/staging/fwserial/dma_fifo.c fifo->out += round_up(n, fifo->align); n 217 drivers/staging/fwserial/dma_fifo.c fifo->out, fifo->done, n, len, fifo->avail); n 219 drivers/staging/fwserial/dma_fifo.c pended->len = n; n 234 drivers/staging/fwserial/dma_fifo.c return len - n; n 80 drivers/staging/fwserial/dma_fifo.h int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n); n 108 drivers/staging/fwserial/fwserial.c static inline void debug_short_write(struct fwtty_port *port, int c, int n) n 112 drivers/staging/fwserial/fwserial.c if (n < c) { n 117 drivers/staging/fwserial/fwserial.c avail, c, n); n 121 drivers/staging/fwserial/fwserial.c #define debug_short_write(port, c, n) n 504 drivers/staging/fwserial/fwserial.c int n, t, c, brk = 0; n 507 drivers/staging/fwserial/fwserial.c n = (elapsed * port->cps) / HZ + 1; n 510 drivers/staging/fwserial/fwserial.c fwtty_dbg(port, "sending %d brks\n", n); n 512 drivers/staging/fwserial/fwserial.c while (n) { n 513 drivers/staging/fwserial/fwserial.c t = min(n, 16); n 516 drivers/staging/fwserial/fwserial.c n -= c; n 530 drivers/staging/fwserial/fwserial.c int c, n = len; n 534 drivers/staging/fwserial/fwserial.c fwtty_dbg(port, "%d\n", n); n 535 drivers/staging/fwserial/fwserial.c fwtty_profile_data(port->stats.reads, n); n 538 drivers/staging/fwserial/fwserial.c n = 0; n 562 drivers/staging/fwserial/fwserial.c n = 0; n 566 drivers/staging/fwserial/fwserial.c c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n); n 569 drivers/staging/fwserial/fwserial.c n -= c; n 571 drivers/staging/fwserial/fwserial.c if (n) { n 588 drivers/staging/fwserial/fwserial.c port->stats.lost += n; n 705 drivers/staging/fwserial/fwserial.c int n, len; n 714 drivers/staging/fwserial/fwserial.c n = -EIO; n 719 drivers/staging/fwserial/fwserial.c n = -EALREADY; n 724 drivers/staging/fwserial/fwserial.c n = -EAGAIN; n 729 drivers/staging/fwserial/fwserial.c n = -ENOMEM; n 734 drivers/staging/fwserial/fwserial.c n = dma_fifo_out_pend(&port->tx_fifo, &txn->dma_pended); n 737 drivers/staging/fwserial/fwserial.c fwtty_dbg(port, "out: %u rem: %d\n", txn->dma_pended.len, n); n 739 drivers/staging/fwserial/fwserial.c if (n < 0) { n 741 drivers/staging/fwserial/fwserial.c if (n == -EAGAIN) { n 743 drivers/staging/fwserial/fwserial.c } else if (n == -ENODATA) { n 748 drivers/staging/fwserial/fwserial.c n); n 765 drivers/staging/fwserial/fwserial.c if (n == 0 || (!drain && n < WRITER_MINIMUM)) n 769 drivers/staging/fwserial/fwserial.c if (n >= 0 || n == -EAGAIN || n == -ENOMEM || n == -ENODATA) { n 773 drivers/staging/fwserial/fwserial.c unsigned long delay = (n == -ENOMEM) ? HZ : 1; n 791 drivers/staging/fwserial/fwserial.c return n; n 1096 drivers/staging/fwserial/fwserial.c int n, len; n 1102 drivers/staging/fwserial/fwserial.c n = dma_fifo_in(&port->tx_fifo, buf, c); n 1111 drivers/staging/fwserial/fwserial.c debug_short_write(port, c, n); n 1113 drivers/staging/fwserial/fwserial.c return (n < 0) ? 0 : n; n 1119 drivers/staging/fwserial/fwserial.c int n; n 1122 drivers/staging/fwserial/fwserial.c n = dma_fifo_avail(&port->tx_fifo); n 1125 drivers/staging/fwserial/fwserial.c fwtty_dbg(port, "%d\n", n); n 1127 drivers/staging/fwserial/fwserial.c return n; n 1133 drivers/staging/fwserial/fwserial.c int n; n 1136 drivers/staging/fwserial/fwserial.c n = dma_fifo_level(&port->tx_fifo); n 1139 drivers/staging/fwserial/fwserial.c fwtty_dbg(port, "%d\n", n); n 1141 drivers/staging/fwserial/fwserial.c return n; n 28 drivers/staging/fwserial/fwserial.h int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0; n 29 drivers/staging/fwserial/fwserial.h ++stat[n]; n 33 drivers/staging/fwserial/fwserial.h #define fwtty_profile_data(st, n) n 586 drivers/staging/greybus/tools/loopback_test.c int i, n, ret; n 590 drivers/staging/greybus/tools/loopback_test.c n = scandir(t->sysfs_prefix, &namelist, NULL, alphasort); n 591 drivers/staging/greybus/tools/loopback_test.c if (n < 0) { n 598 drivers/staging/greybus/tools/loopback_test.c if (n <= 2) { n 603 drivers/staging/greybus/tools/loopback_test.c for (i = 0; i < n; i++) { n 631 drivers/staging/greybus/tools/loopback_test.c for (i = 0; i < n; i++) n 347 drivers/staging/iio/adc/ad7280a.c unsigned int val, n; n 372 drivers/staging/iio/adc/ad7280a.c for (n = 0; n <= AD7280A_MAX_CHAIN; n++) { n 378 drivers/staging/iio/adc/ad7280a.c return n - 1; n 385 drivers/staging/iio/adc/ad7280a.c if (n != ad7280a_devaddr(val >> 27)) { n 566 drivers/staging/isdn/gigaset/common.c unsigned n, head, tail, bytesleft; n 580 drivers/staging/isdn/gigaset/common.c n = head - 1 - tail; n 582 drivers/staging/isdn/gigaset/common.c n = (RBUFSIZE - 1) - tail; n 584 drivers/staging/isdn/gigaset/common.c n = RBUFSIZE - tail; n 585 drivers/staging/isdn/gigaset/common.c if (!n) { n 591 drivers/staging/isdn/gigaset/common.c if (n > bytesleft) n 592 drivers/staging/isdn/gigaset/common.c n = bytesleft; n 593 drivers/staging/isdn/gigaset/common.c memcpy(inbuf->data + tail, src, n); n 594 drivers/staging/isdn/gigaset/common.c bytesleft -= n; n 595 drivers/staging/isdn/gigaset/common.c tail = (tail + n) % RBUFSIZE; n 596 drivers/staging/isdn/gigaset/common.c src += n; n 661 drivers/staging/isdn/gigaset/ser-gigaset.c unsigned tail, head, n; n 680 drivers/staging/isdn/gigaset/ser-gigaset.c n = min_t(unsigned, count, RBUFSIZE - tail); n 681 drivers/staging/isdn/gigaset/ser-gigaset.c memcpy(inbuf->data + tail, buf, n); n 682 drivers/staging/isdn/gigaset/ser-gigaset.c tail = (tail + n) % RBUFSIZE; n 683 drivers/staging/isdn/gigaset/ser-gigaset.c buf += n; n 684 drivers/staging/isdn/gigaset/ser-gigaset.c count -= n; n 689 drivers/staging/isdn/gigaset/ser-gigaset.c n = head - tail - 1; n 690 drivers/staging/isdn/gigaset/ser-gigaset.c if (count > n) { n 693 drivers/staging/isdn/gigaset/ser-gigaset.c count - n); n 694 drivers/staging/isdn/gigaset/ser-gigaset.c count = n; n 701 drivers/staging/ks7010/ks7010_sdio.c unsigned int n = 0; n 722 drivers/staging/ks7010/ks7010_sdio.c memcpy(rom_buf, fw_entry->data + n, size); n 724 drivers/staging/ks7010/ks7010_sdio.c offset = n; n 739 drivers/staging/ks7010/ks7010_sdio.c n += size; n 754 drivers/staging/ks7010/ks7010_sdio.c unsigned int n; n 781 drivers/staging/ks7010/ks7010_sdio.c for (n = 0; n < 50; ++n) { n 790 drivers/staging/ks7010/ks7010_sdio.c if ((50) <= n) { n 1237 drivers/staging/media/allegro-dvt/allegro-core.c size_t n, size_t size) n 1244 drivers/staging/media/allegro-dvt/allegro-core.c for (i = 0; i < n; i++) { n 1294 drivers/staging/media/allegro-dvt/allegro-core.c size_t n, size_t size) n 1298 drivers/staging/media/allegro-dvt/allegro-core.c n, size); n 1302 drivers/staging/media/allegro-dvt/allegro-core.c size_t n, size_t size) n 1306 drivers/staging/media/allegro-dvt/allegro-core.c n, PAGE_ALIGN(size)); n 1310 drivers/staging/media/allegro-dvt/allegro-core.c void *dest, size_t n) n 1390 drivers/staging/media/allegro-dvt/allegro-core.c size = nal_h264_write_sps(&dev->plat_dev->dev, dest, n, sps); n 1398 drivers/staging/media/allegro-dvt/allegro-core.c void *dest, size_t n) n 1427 drivers/staging/media/allegro-dvt/allegro-core.c size = nal_h264_write_pps(&dev->plat_dev->dev, dest, n, pps); n 40 drivers/staging/media/allegro-dvt/nal-h264.c int (*rbsp_bits)(struct rbsp *rbsp, int n, unsigned int *val); n 158 drivers/staging/media/allegro-dvt/nal-h264.c static int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value); n 159 drivers/staging/media/allegro-dvt/nal-h264.c static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value); n 248 drivers/staging/media/allegro-dvt/nal-h264.c static inline int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value) n 254 drivers/staging/media/allegro-dvt/nal-h264.c if (n > 8 * sizeof(*value)) n 257 drivers/staging/media/allegro-dvt/nal-h264.c for (i = n; i > 0; i--) { n 270 drivers/staging/media/allegro-dvt/nal-h264.c static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value) n 274 drivers/staging/media/allegro-dvt/nal-h264.c if (n > 8 * sizeof(value)) n 277 drivers/staging/media/allegro-dvt/nal-h264.c while (n--) { n 278 drivers/staging/media/allegro-dvt/nal-h264.c ret = rbsp_write_bit(rbsp, (value >> n) & 1); n 365 drivers/staging/media/allegro-dvt/nal-h264.c static int __rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int *value) n 367 drivers/staging/media/allegro-dvt/nal-h264.c return rbsp_write_bits(rbsp, n, *value); n 402 drivers/staging/media/allegro-dvt/nal-h264.c static inline void rbsp_bits(struct rbsp *rbsp, int n, int *value) n 406 drivers/staging/media/allegro-dvt/nal-h264.c rbsp->error = rbsp->ops->rbsp_bits(rbsp, n, value); n 750 drivers/staging/media/allegro-dvt/nal-h264.c void *dest, size_t n, struct nal_h264_sps *sps) n 760 drivers/staging/media/allegro-dvt/nal-h264.c rbsp_init(&rbsp, dest, n, &write); n 791 drivers/staging/media/allegro-dvt/nal-h264.c struct nal_h264_sps *sps, void *src, size_t n) n 801 drivers/staging/media/allegro-dvt/nal-h264.c rbsp_init(&rbsp, src, n, &read); n 841 drivers/staging/media/allegro-dvt/nal-h264.c void *dest, size_t n, struct nal_h264_pps *pps) n 851 drivers/staging/media/allegro-dvt/nal-h264.c rbsp_init(&rbsp, dest, n, &write); n 883 drivers/staging/media/allegro-dvt/nal-h264.c struct nal_h264_pps *pps, void *src, size_t n) n 890 drivers/staging/media/allegro-dvt/nal-h264.c rbsp_init(&rbsp, src, n, &read); n 925 drivers/staging/media/allegro-dvt/nal-h264.c ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n) n 935 drivers/staging/media/allegro-dvt/nal-h264.c rbsp_init(&rbsp, dest, n, &write); n 968 drivers/staging/media/allegro-dvt/nal-h264.c ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n) n 978 drivers/staging/media/allegro-dvt/nal-h264.c rbsp_init(&rbsp, src, n, &read); n 194 drivers/staging/media/allegro-dvt/nal-h264.h void *dest, size_t n, struct nal_h264_sps *sps); n 196 drivers/staging/media/allegro-dvt/nal-h264.h struct nal_h264_sps *sps, void *src, size_t n); n 200 drivers/staging/media/allegro-dvt/nal-h264.h void *dest, size_t n, struct nal_h264_pps *pps); n 202 drivers/staging/media/allegro-dvt/nal-h264.h struct nal_h264_pps *pps, void *src, size_t n); n 205 drivers/staging/media/allegro-dvt/nal-h264.h ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n); n 206 drivers/staging/media/allegro-dvt/nal-h264.h ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n); n 24 drivers/staging/media/hantro/hantro_mpeg2.c int i, n; n 30 drivers/staging/media/hantro/hantro_mpeg2.c n = zigzag[i]; n 31 drivers/staging/media/hantro/hantro_mpeg2.c qtable[n + 0] = ctrl->intra_quantiser_matrix[i]; n 32 drivers/staging/media/hantro/hantro_mpeg2.c qtable[n + 64] = ctrl->non_intra_quantiser_matrix[i]; n 33 drivers/staging/media/hantro/hantro_mpeg2.c qtable[n + 128] = ctrl->chroma_intra_quantiser_matrix[i]; n 34 drivers/staging/media/hantro/hantro_mpeg2.c qtable[n + 192] = ctrl->chroma_non_intra_quantiser_matrix[i]; n 17 drivers/staging/media/imx/imx-media-dev-common.c static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n) n 19 drivers/staging/media/imx/imx-media-dev-common.c return container_of(n, struct imx_media_dev, notifier); n 15 drivers/staging/media/imx/imx-media-dev.c static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n) n 17 drivers/staging/media/imx/imx-media-dev.c return container_of(n, struct imx_media_dev, notifier); n 65 drivers/staging/media/imx/imx6-mipi-csi2.c #define PHY_STOPSTATEDATA(n) BIT(PHY_STOPSTATEDATA_BIT + (n)) n 19 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_SP_PMEM_BASE(n) (0x20000 + (n) * 0x4000) n 114 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_GP_IRQ(n) (IMGU_REG_BASE + (n) * 4 + 0x50c) /* n = 0..4 */ n 151 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_GP_STRMON_STAT_MOD_PORT_S2V(n) (1 << (((n) - 1) * 2 + 20)) n 154 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_GP_STRMON_STAT_ACCS_PORT_ACC(n) (1 << (((n) - 1) * 2)) n 157 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_GP_STRMON_STAT_ACCS2SP1_MON_PORT_ACC(n) (1 << (((n) - 1) * 2)) n 160 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_GP_STRMON_STAT_ACCS2SP2_MON_PORT_ACC(n) (1 << (((n) - 1) * 2)) n 165 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_BASE(n) (IMGU_REG_BASE + (n) * 0x100 + 0x700) n 190 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_IRQCTRL_IRQ_GP_TIMER(n) BIT(20 + (n)) /* n=0..1 */ n 192 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_IRQCTRL_IRQ_SW_PIN(n) BIT(23 + (n)) /* n=0..4 */ n 197 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_EDGE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x00) n 198 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_MASK(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x04) n 199 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_STATUS(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x08) n 200 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_CLEAR(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x0c) n 201 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_ENABLE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x10) n 202 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x14) n 203 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_IRQCTRL_STR_OUT_ENABLE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x18) n 207 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_REG_SP_DMEM_BASE(n) (IMGU_REG_BASE + (n) * 0x4000 + 0x4000) n 354 drivers/staging/media/ipu3/ipu3-abi.h #define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(n) ((n) * 4 + 0x60) n 64 drivers/staging/media/ipu3/ipu3-css-pool.c u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE; n 66 drivers/staging/media/ipu3/ipu3-css-pool.c pool->entry[n].valid = true; n 67 drivers/staging/media/ipu3/ipu3-css-pool.c pool->last = n; n 89 drivers/staging/media/ipu3/ipu3-css-pool.c imgu_css_pool_last(struct imgu_css_pool *pool, unsigned int n) n 92 drivers/staging/media/ipu3/ipu3-css-pool.c int i = (pool->last + IPU3_CSS_POOL_SIZE - n) % IPU3_CSS_POOL_SIZE; n 94 drivers/staging/media/ipu3/ipu3-css-pool.c WARN_ON(n >= IPU3_CSS_POOL_SIZE); n 239 drivers/staging/media/meson/vdec/esparser.c struct v4l2_m2m_buffer *buf, *n; n 244 drivers/staging/media/meson/vdec/esparser.c v4l2_m2m_for_each_src_buf_safe(sess->m2m_ctx, buf, n) { n 58 drivers/staging/media/meson/vdec/vdec.c struct amvdec_buffer *tmp, *n; n 62 drivers/staging/media/meson/vdec/vdec.c list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) { n 364 drivers/staging/media/meson/vdec/vdec.c struct amvdec_timestamp *tmp, *n; n 366 drivers/staging/media/meson/vdec/vdec.c list_for_each_entry_safe(tmp, n, &sess->timestamps, list) { n 374 drivers/staging/media/meson/vdec/vdec.c struct amvdec_buffer *tmp, *n; n 376 drivers/staging/media/meson/vdec/vdec.c list_for_each_entry_safe(tmp, n, &sess->bufs_recycle, list) { n 330 drivers/staging/media/meson/vdec/vdec_helpers.c struct amvdec_timestamp *tmp, *n; n 337 drivers/staging/media/meson/vdec/vdec_helpers.c list_for_each_entry_safe(tmp, n, &sess->timestamps, list) { n 667 drivers/staging/media/omap4iss/iss_csi2.c unsigned int n = ctx->ctxnum; n 670 drivers/staging/media/omap4iss/iss_csi2.c status = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n)); n 671 drivers/staging/media/omap4iss/iss_csi2.c iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n), status); n 728 drivers/staging/media/omap4iss/iss_csi2.c csi2_ctx_enable(csi2, n, 1); n 459 drivers/staging/media/soc_camera/soc_mediabus.c int n) n 463 drivers/staging/media/soc_camera/soc_mediabus.c for (i = 0; i < n; i++) n 91 drivers/staging/media/soc_camera/soc_mt9v022.c int n) n 94 drivers/staging/media/soc_camera/soc_mt9v022.c for (i = 0; i < n; i++) n 485 drivers/staging/most/dim2/hal.c u16 n; n 491 drivers/staging/most/dim2/hal.c n = buf_size / packet_length; n 493 drivers/staging/most/dim2/hal.c if (n < 2u) n 496 drivers/staging/most/dim2/hal.c return packet_length * n; n 501 drivers/staging/most/dim2/hal.c u16 n; n 508 drivers/staging/most/dim2/hal.c n = buf_size / unit; n 510 drivers/staging/most/dim2/hal.c if (n < 1u) n 513 drivers/staging/most/dim2/hal.c return unit * n; n 59 drivers/staging/most/dim2/reg.h #define DIM2_MASK(n) (~((~(u32)0) << (n))) n 20 drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c #define VX855_GPI(n) (n) n 21 drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c #define VX855_GPO(n) (NR_VX855_GPI + (n)) n 22 drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c #define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n)) n 224 drivers/staging/rtl8188eu/include/rtw_security.h #define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1))) n 225 drivers/staging/rtl8188eu/include/rtw_security.h #define ROR32(A, n) ROL32((A), 32-(n)) n 285 drivers/staging/rtl8188eu/include/rtw_security.h #define S(x, n) RORc((x), (n)) n 286 drivers/staging/rtl8188eu/include/rtw_security.h #define R(x, n) (((x)&0xFFFFFFFFUL)>>(n)) n 37 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c struct list_head *ptr, *n; n 40 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c for (ptr = ieee->crypt_deinit_list.next, n = ptr->next; n 41 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) { n 219 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c struct list_head *ptr, *n; n 224 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs; n 225 drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c ptr = n, n = ptr->next) { n 500 drivers/staging/rtl8192u/r8192U_core.c int i, n, max = 0xff; n 505 drivers/staging/rtl8192u/r8192U_core.c for (n = 0; n <= max;) { n 506 drivers/staging/rtl8192u/r8192U_core.c seq_printf(m, "\nD: %2x > ", n); n 508 drivers/staging/rtl8192u/r8192U_core.c for (i = 0; i < 16 && n <= max; i++, n++) { n 509 drivers/staging/rtl8192u/r8192U_core.c read_nic_byte(dev, 0x000 | n, &byte_rd); n 515 drivers/staging/rtl8192u/r8192U_core.c for (n = 0; n <= max;) { n 516 drivers/staging/rtl8192u/r8192U_core.c seq_printf(m, "\nD: %2x > ", n); n 518 drivers/staging/rtl8192u/r8192U_core.c for (i = 0; i < 16 && n <= max; i++, n++) { n 519 drivers/staging/rtl8192u/r8192U_core.c read_nic_byte(dev, 0x100 | n, &byte_rd); n 525 drivers/staging/rtl8192u/r8192U_core.c for (n = 0; n <= max;) { n 526 drivers/staging/rtl8192u/r8192U_core.c seq_printf(m, "\nD: %2x > ", n); n 528 drivers/staging/rtl8192u/r8192U_core.c for (i = 0; i < 16 && n <= max; i++, n++) { n 529 drivers/staging/rtl8192u/r8192U_core.c read_nic_byte(dev, 0x300 | n, &byte_rd); n 136 drivers/staging/rtl8712/rtl871x_ioctl_linux.c int n, i; n 143 drivers/staging/rtl8712/rtl871x_ioctl_linux.c n = sprintf(buf, "wpa_ie="); n 145 drivers/staging/rtl8712/rtl871x_ioctl_linux.c n += snprintf(buf + n, MAX_WPA_IE_LEN - n, n 147 drivers/staging/rtl8712/rtl871x_ioctl_linux.c if (n >= MAX_WPA_IE_LEN) n 163 drivers/staging/rtl8712/rtl871x_ioctl_linux.c n = sprintf(buf, "rsn_ie="); n 165 drivers/staging/rtl8712/rtl871x_ioctl_linux.c n += snprintf(buf + n, MAX_WPA_IE_LEN - n, n 167 drivers/staging/rtl8712/rtl871x_ioctl_linux.c if (n >= MAX_WPA_IE_LEN) n 188 drivers/staging/rtl8712/rtl871x_security.h #define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1))) n 189 drivers/staging/rtl8712/rtl871x_security.h #define ROR32(A, n) ROL32((A), 32 - (n)) n 1758 drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c static s32 up, dn, m, n, WaitCount; n 1785 drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c n = 3; n 1806 drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c n = 3; n 1830 drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c if (up >= n) { /* if 連續 n 個2ç§’ retry count為0, 則調寬WiFi duration */ n 1832 drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c n = 3; n 1858 drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c n = 3*m; n 1874 drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c n = 3*m; n 1597 drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c static s32 up, dn, m, n, WaitCount; n 1653 drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c n = 3; n 1665 drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c up, dn, m, n, WaitCount n 1678 drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c if (up >= n) { /* if 連續 n 個2ç§’ retry count為0, 則調寬WiFi duration */ n 1680 drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c n = 3; n 1702 drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c n = 3*m; n 1718 drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c n = 3*m; n 412 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c u32 i, n; n 423 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c n = NR_RECVBUFF * sizeof(struct recv_buf) + 4; n 424 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c precvpriv->pallocated_recv_buf = rtw_zmalloc(n); n 478 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c n = precvpriv->free_recv_buf_queue_cnt; n 480 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c for (i = 0; i < n ; i++) { n 15 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c u32 n = 0; n 29 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c if ((++n % 60) == 0) { n 30 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c if ((n % 300) == 0) { n 32 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c __func__, n, pHalData->SdioTxOQTFreeSpace, agg_num); n 254 drivers/staging/rtl8723bs/hal/sdio_ops.c u32 n; n 257 drivers/staging/rtl8723bs/hal/sdio_ops.c n = cnt + shift; n 258 drivers/staging/rtl8723bs/hal/sdio_ops.c tmpbuf = rtw_malloc(n); n 262 drivers/staging/rtl8723bs/hal/sdio_ops.c err = sd_read(intfhdl, ftaddr, n, tmpbuf); n 357 drivers/staging/rtl8723bs/hal/sdio_ops.c u32 n; n 360 drivers/staging/rtl8723bs/hal/sdio_ops.c n = cnt + shift; n 361 drivers/staging/rtl8723bs/hal/sdio_ops.c tmpbuf = rtw_malloc(n); n 370 drivers/staging/rtl8723bs/hal/sdio_ops.c err = sd_write(intfhdl, ftaddr, n, tmpbuf); n 539 drivers/staging/rtl8723bs/hal/sdio_ops.c u32 n; n 550 drivers/staging/rtl8723bs/hal/sdio_ops.c n = RND4(cnt); n 551 drivers/staging/rtl8723bs/hal/sdio_ops.c tmpbuf = rtw_malloc(n); n 555 drivers/staging/rtl8723bs/hal/sdio_ops.c err = _sd_read(intfhdl, addr, n, tmpbuf); n 578 drivers/staging/rtl8723bs/hal/sdio_ops.c u32 n; n 591 drivers/staging/rtl8723bs/hal/sdio_ops.c n = RND4(cnt); n 592 drivers/staging/rtl8723bs/hal/sdio_ops.c tmpbuf = rtw_malloc(n); n 596 drivers/staging/rtl8723bs/hal/sdio_ops.c err = sd_read(intfhdl, addr, n, tmpbuf); n 273 drivers/staging/rtl8723bs/include/rtw_security.h #define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1))) n 274 drivers/staging/rtl8723bs/include/rtw_security.h #define ROR32(A, n) ROL32((A), 32-(n)) n 397 drivers/staging/rtl8723bs/include/rtw_security.h #define S(x, n) RORc((x), (n)) n 398 drivers/staging/rtl8723bs/include/rtw_security.h #define R(x, n) (((x)&0xFFFFFFFFUL)>>(n)) n 5127 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c int n = 0; /* number of args */ n 5133 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c n = priv_args[k].get_args & IW_PRIV_SIZE_MASK; n 5135 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c n = wdata.data.length; n 5146 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c for (j = 0; j < n; j++) { n 5160 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c for (j = 0; j < n; j++) { n 5174 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c memcpy(output, extra, n); n 625 drivers/staging/rts5208/rtsx_card.c u8 n = (u8)(clk - 2), min_n, max_n; n 639 drivers/staging/rts5208/rtsx_card.c if ((clk <= 2) || (n > max_n)) n 647 drivers/staging/rts5208/rtsx_card.c while ((n < min_n) && (div < max_div)) { n 648 drivers/staging/rts5208/rtsx_card.c n = (n + 2) * 2 - 2; n 651 drivers/staging/rts5208/rtsx_card.c dev_dbg(rtsx_dev(chip), "n = %d, div = %d\n", n, div); n 655 drivers/staging/rts5208/rtsx_card.c n -= 2; n 669 drivers/staging/rts5208/rtsx_card.c rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); n 134 drivers/staging/speakup/keyhelp.c int i, n; n 202 drivers/staging/speakup/keyhelp.c for (n = 0; p_keys[n]; n++) { n 203 drivers/staging/speakup/keyhelp.c val = p_keys[n]; n 204 drivers/staging/speakup/keyhelp.c if (n > 0) n 234 drivers/staging/speakup/kobjects.c int n; n 250 drivers/staging/speakup/kobjects.c for (n = 0; n <= num_keys; n++) { n 567 drivers/staging/speakup/kobjects.c rv = sprintf(buf, "%i\n", var->u.n.value); n 660 drivers/staging/speakup/kobjects.c var_data->u.n.low, var_data->u.n.high); n 670 drivers/staging/speakup/kobjects.c value = var_data->u.n.value; n 430 drivers/staging/speakup/main.c if (ch >= 0x100 || (direct && direct->u.n.value)) { n 731 drivers/staging/speakup/main.c if (ch >= 0x100 || (direct && direct->u.n.value)) { n 1275 drivers/staging/speakup/main.c {BELL_POS, .u.n = {NULL, 0, 0, 0, 0, 0, NULL} }, n 1276 drivers/staging/speakup/main.c {SPELL_DELAY, .u.n = {NULL, 0, 0, 4, 0, 0, NULL} }, n 1277 drivers/staging/speakup/main.c {ATTRIB_BLEEP, .u.n = {NULL, 1, 0, 3, 0, 0, NULL} }, n 1278 drivers/staging/speakup/main.c {BLEEPS, .u.n = {NULL, 3, 0, 3, 0, 0, NULL} }, n 1279 drivers/staging/speakup/main.c {BLEEP_TIME, .u.n = {NULL, 30, 1, 200, 0, 0, NULL} }, n 1280 drivers/staging/speakup/main.c {PUNC_LEVEL, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} }, n 1281 drivers/staging/speakup/main.c {READING_PUNC, .u.n = {NULL, 1, 0, 4, 0, 0, NULL} }, n 1282 drivers/staging/speakup/main.c {CURSOR_TIME, .u.n = {NULL, 120, 50, 600, 0, 0, NULL} }, n 1286 drivers/staging/speakup/main.c {KEY_ECHO, .u.n = {NULL, 1, 0, 2, 0, 0, NULL} }, n 1438 drivers/staging/speakup/main.c jiffies + msecs_to_jiffies(cursor_timeout->u.n.value)); n 1562 drivers/staging/speakup/main.c jiffies + msecs_to_jiffies(cursor_timeout->u.n.value)); n 1863 drivers/staging/speakup/main.c var_data->u.n.value); n 2363 drivers/staging/speakup/main.c spk_vars[0].u.n.high = vc->vc_cols; n 13 drivers/staging/speakup/speakup.h #define TOGGLE_0 .u.n = {NULL, 0, 0, 1, 0, 0, NULL } n 14 drivers/staging/speakup/speakup.h #define TOGGLE_1 .u.n = {NULL, 1, 0, 1, 0, 0, NULL } n 40 drivers/staging/speakup/speakup_acntpc.c { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } }, n 41 drivers/staging/speakup/speakup_acntpc.c { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } }, n 42 drivers/staging/speakup/speakup_acntpc.c { VOL, .u.n = {"\033A%d", 5, 0, 9, 0, 0, NULL } }, n 43 drivers/staging/speakup/speakup_acntpc.c { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } }, n 44 drivers/staging/speakup/speakup_acntpc.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 180 drivers/staging/speakup/speakup_acntpc.c jiffy_delta_val = jiffy_delta->u.n.value; n 198 drivers/staging/speakup/speakup_acntpc.c full_time_val = full_time->u.n.value; n 226 drivers/staging/speakup/speakup_acntpc.c jiffy_delta_val = jiffy_delta->u.n.value; n 227 drivers/staging/speakup/speakup_acntpc.c delay_time_val = delay_time->u.n.value; n 25 drivers/staging/speakup/speakup_acntsa.c { RATE, .u.n = {"\033R%c", 9, 0, 17, 0, 0, "0123456789abcdefgh" } }, n 26 drivers/staging/speakup/speakup_acntsa.c { PITCH, .u.n = {"\033P%d", 5, 0, 9, 0, 0, NULL } }, n 27 drivers/staging/speakup/speakup_acntsa.c { VOL, .u.n = {"\033A%d", 9, 0, 9, 0, 0, NULL } }, n 28 drivers/staging/speakup/speakup_acntsa.c { TONE, .u.n = {"\033V%d", 5, 0, 9, 0, 0, NULL } }, n 29 drivers/staging/speakup/speakup_acntsa.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 30 drivers/staging/speakup/speakup_apollo.c { RATE, .u.n = {"@W%d", 6, 1, 9, 0, 0, NULL } }, n 31 drivers/staging/speakup/speakup_apollo.c { PITCH, .u.n = {"@F%x", 10, 0, 15, 0, 0, NULL } }, n 32 drivers/staging/speakup/speakup_apollo.c { VOL, .u.n = {"@A%x", 10, 0, 15, 0, 0, NULL } }, n 33 drivers/staging/speakup/speakup_apollo.c { VOICE, .u.n = {"@V%d", 1, 1, 6, 0, 0, NULL } }, n 34 drivers/staging/speakup/speakup_apollo.c { LANG, .u.n = {"@=%d,", 1, 1, 4, 0, 0, NULL } }, n 35 drivers/staging/speakup/speakup_apollo.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 141 drivers/staging/speakup/speakup_apollo.c jiffy_delta_val = jiffy_delta->u.n.value; n 147 drivers/staging/speakup/speakup_apollo.c jiffy_delta_val = jiffy_delta->u.n.value; n 148 drivers/staging/speakup/speakup_apollo.c full_time_val = full_time->u.n.value; n 149 drivers/staging/speakup/speakup_apollo.c delay_time_val = delay_time->u.n.value; n 163 drivers/staging/speakup/speakup_apollo.c full_time_val = full_time->u.n.value; n 173 drivers/staging/speakup/speakup_apollo.c jiffy_delta_val = jiffy_delta->u.n.value; n 174 drivers/staging/speakup/speakup_apollo.c full_time_val = full_time->u.n.value; n 175 drivers/staging/speakup/speakup_apollo.c delay_time_val = delay_time->u.n.value; n 25 drivers/staging/speakup/speakup_audptr.c { RATE, .u.n = {"\x05[r%d]", 10, 0, 20, 100, -10, NULL } }, n 26 drivers/staging/speakup/speakup_audptr.c { PITCH, .u.n = {"\x05[f%d]", 80, 39, 4500, 0, 0, NULL } }, n 27 drivers/staging/speakup/speakup_audptr.c { VOL, .u.n = {"\x05[g%d]", 21, 0, 40, 0, 0, NULL } }, n 28 drivers/staging/speakup/speakup_audptr.c { TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, NULL } }, n 29 drivers/staging/speakup/speakup_audptr.c { PUNCT, .u.n = {"\x05[A%c]", 0, 0, 3, 0, 0, "nmsa" } }, n 30 drivers/staging/speakup/speakup_audptr.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 22 drivers/staging/speakup/speakup_bns.c { RATE, .u.n = {"\x05%dE", 8, 1, 16, 0, 0, NULL } }, n 23 drivers/staging/speakup/speakup_bns.c { PITCH, .u.n = {"\x05%dP", 8, 0, 16, 0, 0, NULL } }, n 24 drivers/staging/speakup/speakup_bns.c { VOL, .u.n = {"\x05%dV", 8, 0, 16, 0, 0, NULL } }, n 25 drivers/staging/speakup/speakup_bns.c { TONE, .u.n = {"\x05%dT", 8, 0, 16, 0, 0, NULL } }, n 26 drivers/staging/speakup/speakup_bns.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 44 drivers/staging/speakup/speakup_decext.c { RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } }, n 45 drivers/staging/speakup/speakup_decext.c { PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } }, n 46 drivers/staging/speakup/speakup_decext.c { VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } }, n 47 drivers/staging/speakup/speakup_decext.c { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, n 48 drivers/staging/speakup/speakup_decext.c { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, n 49 drivers/staging/speakup/speakup_decext.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 155 drivers/staging/speakup/speakup_decext.c jiffy_delta_val = jiffy_delta->u.n.value; n 174 drivers/staging/speakup/speakup_decext.c delay_time_val = delay_time->u.n.value; n 199 drivers/staging/speakup/speakup_decext.c jiffy_delta_val = jiffy_delta->u.n.value; n 200 drivers/staging/speakup/speakup_decext.c delay_time_val = delay_time->u.n.value; n 140 drivers/staging/speakup/speakup_decpc.c { RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } }, n 141 drivers/staging/speakup/speakup_decpc.c { PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } }, n 142 drivers/staging/speakup/speakup_decpc.c { VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } }, n 143 drivers/staging/speakup/speakup_decpc.c { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, n 144 drivers/staging/speakup/speakup_decpc.c { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, n 145 drivers/staging/speakup/speakup_decpc.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 373 drivers/staging/speakup/speakup_decpc.c jiffy_delta_val = jiffy_delta->u.n.value; n 392 drivers/staging/speakup/speakup_decpc.c delay_time_val = delay_time->u.n.value; n 416 drivers/staging/speakup/speakup_decpc.c jiffy_delta_val = jiffy_delta->u.n.value; n 417 drivers/staging/speakup/speakup_decpc.c delay_time_val = delay_time->u.n.value; n 46 drivers/staging/speakup/speakup_dectlk.c { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, n 47 drivers/staging/speakup/speakup_dectlk.c { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, n 48 drivers/staging/speakup/speakup_dectlk.c { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, n 49 drivers/staging/speakup/speakup_dectlk.c { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, n 50 drivers/staging/speakup/speakup_dectlk.c { VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } }, n 51 drivers/staging/speakup/speakup_dectlk.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 210 drivers/staging/speakup/speakup_dectlk.c jiffy_delta_val = jiffy_delta->u.n.value; n 241 drivers/staging/speakup/speakup_dectlk.c delay_time_val = delay_time->u.n.value; n 267 drivers/staging/speakup/speakup_dectlk.c jiffy_delta_val = jiffy_delta->u.n.value; n 268 drivers/staging/speakup/speakup_dectlk.c delay_time_val = delay_time->u.n.value; n 43 drivers/staging/speakup/speakup_dtlk.c { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, n 44 drivers/staging/speakup/speakup_dtlk.c { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, n 45 drivers/staging/speakup/speakup_dtlk.c { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, n 46 drivers/staging/speakup/speakup_dtlk.c { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, n 47 drivers/staging/speakup/speakup_dtlk.c { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, n 48 drivers/staging/speakup/speakup_dtlk.c { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, n 49 drivers/staging/speakup/speakup_dtlk.c { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, n 50 drivers/staging/speakup/speakup_dtlk.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 194 drivers/staging/speakup/speakup_dtlk.c jiffy_delta_val = jiffy_delta->u.n.value; n 211 drivers/staging/speakup/speakup_dtlk.c delay_time_val = delay_time->u.n.value; n 227 drivers/staging/speakup/speakup_dtlk.c delay_time_val = delay_time->u.n.value; n 228 drivers/staging/speakup/speakup_dtlk.c jiffy_delta_val = jiffy_delta->u.n.value; n 25 drivers/staging/speakup/speakup_dummy.c { RATE, .u.n = {"RATE %d\n", 8, 1, 16, 0, 0, NULL } }, n 26 drivers/staging/speakup/speakup_dummy.c { PITCH, .u.n = {"PITCH %d\n", 8, 0, 16, 0, 0, NULL } }, n 27 drivers/staging/speakup/speakup_dummy.c { VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } }, n 28 drivers/staging/speakup/speakup_dummy.c { TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } }, n 29 drivers/staging/speakup/speakup_dummy.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 39 drivers/staging/speakup/speakup_keypc.c { RATE, .u.n = {"\04%c ", 8, 0, 10, 81, -8, NULL } }, n 40 drivers/staging/speakup/speakup_keypc.c { PITCH, .u.n = {"[f%d]", 5, 0, 9, 40, 10, NULL } }, n 41 drivers/staging/speakup/speakup_keypc.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 181 drivers/staging/speakup/speakup_keypc.c jiffy_delta_val = jiffy_delta->u.n.value; n 199 drivers/staging/speakup/speakup_keypc.c full_time_val = full_time->u.n.value; n 232 drivers/staging/speakup/speakup_keypc.c jiffy_delta_val = jiffy_delta->u.n.value; n 233 drivers/staging/speakup/speakup_keypc.c delay_time_val = delay_time->u.n.value; n 24 drivers/staging/speakup/speakup_ltlk.c { RATE, .u.n = {"\x01%ds", 8, 0, 9, 0, 0, NULL } }, n 25 drivers/staging/speakup/speakup_ltlk.c { PITCH, .u.n = {"\x01%dp", 50, 0, 99, 0, 0, NULL } }, n 26 drivers/staging/speakup/speakup_ltlk.c { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, n 27 drivers/staging/speakup/speakup_ltlk.c { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, n 28 drivers/staging/speakup/speakup_ltlk.c { PUNCT, .u.n = {"\x01%db", 7, 0, 15, 0, 0, NULL } }, n 29 drivers/staging/speakup/speakup_ltlk.c { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, n 30 drivers/staging/speakup/speakup_ltlk.c { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, n 31 drivers/staging/speakup/speakup_ltlk.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 40 drivers/staging/speakup/speakup_soft.c { PAUSE, .u.n = {"\x01P" } }, n 41 drivers/staging/speakup/speakup_soft.c { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } }, n 42 drivers/staging/speakup/speakup_soft.c { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } }, n 43 drivers/staging/speakup/speakup_soft.c { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, n 44 drivers/staging/speakup/speakup_soft.c { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, n 45 drivers/staging/speakup/speakup_soft.c { PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } }, n 46 drivers/staging/speakup/speakup_soft.c { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, n 47 drivers/staging/speakup/speakup_soft.c { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, n 48 drivers/staging/speakup/speakup_soft.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 161 drivers/staging/speakup/speakup_soft.c cp = cp + sprintf(cp, var->u.n.synth_fmt, n 162 drivers/staging/speakup/speakup_soft.c var->u.n.value); n 24 drivers/staging/speakup/speakup_spkout.c { RATE, .u.n = {"\x05R%d", 7, 0, 9, 0, 0, NULL } }, n 25 drivers/staging/speakup/speakup_spkout.c { PITCH, .u.n = {"\x05P%d", 3, 0, 9, 0, 0, NULL } }, n 26 drivers/staging/speakup/speakup_spkout.c { VOL, .u.n = {"\x05V%d", 9, 0, 9, 0, 0, NULL } }, n 27 drivers/staging/speakup/speakup_spkout.c { TONE, .u.n = {"\x05T%c", 8, 0, 25, 65, 0, NULL } }, n 28 drivers/staging/speakup/speakup_spkout.c { PUNCT, .u.n = {"\x05M%c", 0, 0, 3, 0, 0, "nsma" } }, n 29 drivers/staging/speakup/speakup_spkout.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 22 drivers/staging/speakup/speakup_txprt.c { RATE, .u.n = {"\x05R%d", 5, 0, 9, 0, 0, NULL } }, n 23 drivers/staging/speakup/speakup_txprt.c { PITCH, .u.n = {"\x05P%d", 5, 0, 9, 0, 0, NULL } }, n 24 drivers/staging/speakup/speakup_txprt.c { VOL, .u.n = {"\x05V%d", 5, 0, 9, 0, 0, NULL } }, n 25 drivers/staging/speakup/speakup_txprt.c { TONE, .u.n = {"\x05T%c", 12, 0, 25, 61, 0, NULL } }, n 26 drivers/staging/speakup/speakup_txprt.c { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, n 73 drivers/staging/speakup/spk_priv.h int synth_request_region(unsigned long start, unsigned long n); n 74 drivers/staging/speakup/spk_priv.h int synth_release_region(unsigned long start, unsigned long n); n 132 drivers/staging/speakup/spk_types.h struct num_var_t n; n 72 drivers/staging/speakup/synth.c jiffy_delta_val = jiffy_delta->u.n.value; n 92 drivers/staging/speakup/synth.c full_time_val = full_time->u.n.value; n 106 drivers/staging/speakup/synth.c jiffy_delta_val = jiffy_delta->u.n.value; n 107 drivers/staging/speakup/synth.c delay_time_val = delay_time->u.n.value; n 108 drivers/staging/speakup/synth.c full_time_val = full_time->u.n.value; n 191 drivers/staging/speakup/synth.c msecs_to_jiffies(trigger_time->u.n.value)); n 327 drivers/staging/speakup/synth.c int synth_request_region(unsigned long start, unsigned long n) n 334 drivers/staging/speakup/synth.c synth_res.end = start + n - 1; n 340 drivers/staging/speakup/synth.c int synth_release_region(unsigned long start, unsigned long n) n 347 drivers/staging/speakup/synth.c { DELAY, .u.n = {NULL, 100, 100, 2000, 0, 0, NULL } }, n 348 drivers/staging/speakup/synth.c { TRIGGER, .u.n = {NULL, 20, 10, 2000, 0, 0, NULL } }, n 349 drivers/staging/speakup/synth.c { JIFFY, .u.n = {NULL, 50, 20, 200, 0, 0, NULL } }, n 350 drivers/staging/speakup/synth.c { FULL, .u.n = {NULL, 400, 200, 60000, 0, 0, NULL } }, n 403 drivers/staging/speakup/synth.c synth_time_vars[0].u.n.value = n 404 drivers/staging/speakup/synth.c synth_time_vars[0].u.n.default_val = synth->delay; n 405 drivers/staging/speakup/synth.c synth_time_vars[1].u.n.value = n 406 drivers/staging/speakup/synth.c synth_time_vars[1].u.n.default_val = synth->trigger; n 407 drivers/staging/speakup/synth.c synth_time_vars[2].u.n.value = n 408 drivers/staging/speakup/synth.c synth_time_vars[2].u.n.default_val = synth->jiffies; n 409 drivers/staging/speakup/synth.c synth_time_vars[3].u.n.value = n 410 drivers/staging/speakup/synth.c synth_time_vars[3].u.n.default_val = synth->full; n 189 drivers/staging/speakup/varhandlers.c val = var_data->u.n.value; n 192 drivers/staging/speakup/varhandlers.c if (input < var_data->u.n.low || input > var_data->u.n.high) n 194 drivers/staging/speakup/varhandlers.c var_data->u.n.default_val = input; n 197 drivers/staging/speakup/varhandlers.c val = var_data->u.n.default_val; n 210 drivers/staging/speakup/varhandlers.c if (val < var_data->u.n.low || val > var_data->u.n.high) n 213 drivers/staging/speakup/varhandlers.c var_data->u.n.value = val; n 224 drivers/staging/speakup/varhandlers.c if (var_data->u.n.multiplier != 0) n 225 drivers/staging/speakup/varhandlers.c val *= var_data->u.n.multiplier; n 226 drivers/staging/speakup/varhandlers.c val += var_data->u.n.offset; n 232 drivers/staging/speakup/varhandlers.c if (!var_data->u.n.synth_fmt) n 238 drivers/staging/speakup/varhandlers.c if (!var_data->u.n.out_str) n 239 drivers/staging/speakup/varhandlers.c sprintf(cp, var_data->u.n.synth_fmt, (int)val); n 241 drivers/staging/speakup/varhandlers.c sprintf(cp, var_data->u.n.synth_fmt, n 242 drivers/staging/speakup/varhandlers.c var_data->u.n.out_str[val]); n 99 drivers/staging/uwb/include/umc.h struct umc_dev *umc_device_create(struct device *parent, int n); n 24 drivers/staging/uwb/include/whci.h #define UWBCAPDATA(n) (8*(n)) n 33 drivers/staging/uwb/include/whci.h #define UWBCAPDATA_SIZE(n) (8 + 8*(n)) n 205 drivers/staging/uwb/rsv.c dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n", bow->total_expired, bow->n); n 225 drivers/staging/uwb/rsv.c bow->n = prandom_u32() & (bow->window - 1); n 226 drivers/staging/uwb/rsv.c dev_dbg(dev, "new_window=%d, n=%d\n", bow->window, bow->n); n 229 drivers/staging/uwb/rsv.c timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; n 27 drivers/staging/uwb/umc-dev.c struct umc_dev *umc_device_create(struct device *parent, int n) n 33 drivers/staging/uwb/umc-dev.c dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n); n 295 drivers/staging/uwb/uwb.h u8 n; n 91 drivers/staging/uwb/whci.c static int whci_add_cap(struct whci_card *card, int n) n 97 drivers/staging/uwb/whci.c umc = umc_device_create(&card->pci->dev, n); n 101 drivers/staging/uwb/whci.c capdata = le_readq(card->uwbbase + UWBCAPDATA(n)); n 109 drivers/staging/uwb/whci.c umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata); n 114 drivers/staging/uwb/whci.c + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; n 123 drivers/staging/uwb/whci.c card->devs[n] = umc; n 131 drivers/staging/uwb/whci.c static void whci_del_cap(struct whci_card *card, int n) n 133 drivers/staging/uwb/whci.c struct umc_dev *umc = card->devs[n]; n 155 drivers/staging/uwb/whci.c int err, n_caps, n; n 194 drivers/staging/uwb/whci.c for (n = 0; n <= card->n_caps; n++) { n 195 drivers/staging/uwb/whci.c err = whci_add_cap(card, n); n 196 drivers/staging/uwb/whci.c if (err < 0 && n == 0) { n 203 drivers/staging/uwb/whci.c "#%u: %d\n", n, err); n 226 drivers/staging/uwb/whci.c int n; n 231 drivers/staging/uwb/whci.c for (n = card->n_caps; n >= 0 ; n--) n 232 drivers/staging/uwb/whci.c whci_del_cap(card, n); n 135 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_GET_FC_FTYPE(n) ((((u16)(n)) & GENMASK(3, 2)) >> 2) n 136 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_GET_FC_FSTYPE(n) ((((u16)(n)) & GENMASK(7, 4)) >> 4) n 137 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_GET_FC_TODS(n) ((((u16)(n)) & (BIT(8))) >> 8) n 138 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_GET_FC_FROMDS(n) ((((u16)(n)) & (BIT(9))) >> 9) n 139 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_GET_FC_ISWEP(n) ((((u16)(n)) & (BIT(14))) >> 14) n 141 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_SET_FC_FTYPE(n) (((u16)(n)) << 2) n 142 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_SET_FC_FSTYPE(n) (((u16)(n)) << 4) n 143 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_SET_FC_TODS(n) (((u16)(n)) << 8) n 144 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_SET_FC_FROMDS(n) (((u16)(n)) << 9) n 145 drivers/staging/wlan-ng/p80211hdr.h #define WLAN_SET_FC_ISWEP(n) (((u16)(n)) << 14) n 200 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_ESS(n) ((n) & BIT(0)) n 201 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_IBSS(n) (((n) & BIT(1)) >> 1) n 202 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_CFPOLLABLE(n) (((n) & BIT(2)) >> 2) n 203 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_CFPOLLREQ(n) (((n) & BIT(3)) >> 3) n 204 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_PRIVACY(n) (((n) & BIT(4)) >> 4) n 206 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_SHORT(n) (((n) & BIT(5)) >> 5) n 207 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_PBCC(n) (((n) & BIT(6)) >> 6) n 208 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_GET_MGMT_CAP_INFO_AGILITY(n) (((n) & BIT(7)) >> 7) n 210 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_ESS(n) (n) n 211 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_IBSS(n) ((n) << 1) n 212 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_CFPOLLABLE(n) ((n) << 2) n 213 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_CFPOLLREQ(n) ((n) << 3) n 214 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_PRIVACY(n) ((n) << 4) n 216 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_SHORT(n) ((n) << 5) n 217 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_PBCC(n) ((n) << 6) n 218 drivers/staging/wlan-ng/p80211mgmt.h #define WLAN_SET_MGMT_CAP_INFO_AGILITY(n) ((n) << 7) n 168 drivers/staging/wlan-ng/p80211types.h #define P80211DID_MKID(s, g, i, n, t, a) (P80211DID_MKSECTION(s) | \ n 171 drivers/staging/wlan-ng/p80211types.h P80211DID_MKINDEX(n) | \ n 1131 drivers/staging/wlan-ng/prism2sta.c unsigned int i, n; n 1136 drivers/staging/wlan-ng/prism2sta.c for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) { n 1144 drivers/staging/wlan-ng/prism2sta.c result = &inf->info.chinforesult.result[n]; n 1163 drivers/staging/wlan-ng/prism2sta.c n++; n 1167 drivers/staging/wlan-ng/prism2sta.c hw->channel_info.count = n; n 181 drivers/staging/wusbcore/crypto.c const struct aes_ccm_nonce *n, n 199 drivers/staging/wusbcore/crypto.c scratch->b0.ccm_nonce = *n; n 224 drivers/staging/wusbcore/crypto.c scratch->ax.ccm_nonce = *n; n 248 drivers/staging/wusbcore/crypto.c struct aes_ccm_nonce n = *_n; n 269 drivers/staging/wusbcore/crypto.c memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */ n 271 drivers/staging/wusbcore/crypto.c &n, a, b, blen); n 20 drivers/staging/wusbcore/host/whci/asl.c struct list_head *n, *p; n 24 drivers/staging/wusbcore/host/whci/asl.c n = qset->list_node.next; n 25 drivers/staging/wusbcore/host/whci/asl.c if (n == &whc->async_list) n 26 drivers/staging/wusbcore/host/whci/asl.c n = n->next; n 31 drivers/staging/wusbcore/host/whci/asl.c *next = container_of(n, struct whc_qset, list_node); n 138 drivers/staging/wusbcore/host/whci/whci-hc.h #define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */ n 303 drivers/staging/wusbcore/include/wusb.h const struct aes_ccm_nonce *n, n 307 drivers/staging/wusbcore/include/wusb.h return wusb_prf(out, out_size, key, n, a, b, blen, 64); n 311 drivers/staging/wusbcore/include/wusb.h const struct aes_ccm_nonce *n, n 315 drivers/staging/wusbcore/include/wusb.h return wusb_prf(out, out_size, key, n, a, b, blen, 128); n 319 drivers/staging/wusbcore/include/wusb.h const struct aes_ccm_nonce *n, n 323 drivers/staging/wusbcore/include/wusb.h return wusb_prf(out, out_size, key, n, a, b, blen, 256); n 329 drivers/staging/wusbcore/include/wusb.h const struct aes_ccm_nonce *n, n 333 drivers/staging/wusbcore/include/wusb.h return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, n 354 drivers/staging/wusbcore/include/wusb.h const struct aes_ccm_nonce *n, n 358 drivers/staging/wusbcore/include/wusb.h return wusb_prf_64(mic_out, 8, key, n, &a, n 901 drivers/target/iscsi/cxgbit/cxgbit_cm.c struct neighbour *n; n 909 drivers/target/iscsi/cxgbit/cxgbit_cm.c n = dst_neigh_lookup(dst, peer_ip); n 910 drivers/target/iscsi/cxgbit/cxgbit_cm.c if (!n) n 914 drivers/target/iscsi/cxgbit/cxgbit_cm.c if (!(n->nud_state & NUD_VALID)) n 915 drivers/target/iscsi/cxgbit/cxgbit_cm.c neigh_event_send(n, NULL); n 918 drivers/target/iscsi/cxgbit/cxgbit_cm.c if (n->dev->flags & IFF_LOOPBACK) { n 932 drivers/target/iscsi/cxgbit/cxgbit_cm.c n, ndev, 0); n 951 drivers/target/iscsi/cxgbit/cxgbit_cm.c ndev = cxgbit_get_real_dev(n->dev); n 964 drivers/target/iscsi/cxgbit/cxgbit_cm.c csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority); n 966 drivers/target/iscsi/cxgbit/cxgbit_cm.c csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0); n 992 drivers/target/iscsi/cxgbit/cxgbit_cm.c neigh_release(n); n 99 drivers/target/iscsi/cxgbit/cxgbit_target.c static inline unsigned int cxgbit_sgl_len(unsigned int n) n 101 drivers/target/iscsi/cxgbit/cxgbit_target.c n--; n 102 drivers/target/iscsi/cxgbit/cxgbit_target.c return (3 * n) / 2 + (n & 1) + 2; n 459 drivers/tee/optee/call.c int n = 0; n 490 drivers/tee/optee/call.c pages_data->pages_list[n++] = optee_page; n 492 drivers/tee/optee/call.c if (n == PAGELIST_ENTRIES_PER_PAGE) { n 496 drivers/tee/optee/call.c n = 0; n 40 drivers/tee/optee/core.c size_t n; n 44 drivers/tee/optee/core.c for (n = 0; n < num_params; n++) { n 45 drivers/tee/optee/core.c struct tee_param *p = params + n; n 46 drivers/tee/optee/core.c const struct optee_msg_param *mp = msg_params + n; n 169 drivers/tee/optee/core.c size_t n; n 171 drivers/tee/optee/core.c for (n = 0; n < num_params; n++) { n 172 drivers/tee/optee/core.c const struct tee_param *p = params + n; n 173 drivers/tee/optee/core.c struct optee_msg_param *mp = msg_params + n; n 200 drivers/tee/optee/rpc.c size_t n; n 210 drivers/tee/optee/rpc.c for (n = 1; n < arg->num_params; n++) { n 211 drivers/tee/optee/rpc.c if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { n 188 drivers/tee/optee/supp.c size_t n; n 197 drivers/tee/optee/supp.c for (n = 0; n < num_params; n++) n 198 drivers/tee/optee/supp.c if (tee_param_is_memref(params + n) && params[n].u.memref.shm) n 199 drivers/tee/optee/supp.c tee_shm_put(params[n].u.memref.shm); n 205 drivers/tee/optee/supp.c for (n = 0; n < num_params; n++) n 206 drivers/tee/optee/supp.c if (params[n].attr && n 207 drivers/tee/optee/supp.c params[n].attr != TEE_IOCTL_PARAM_ATTR_META) n 345 drivers/tee/optee/supp.c size_t n; n 358 drivers/tee/optee/supp.c for (n = 0; n < req->num_params; n++) { n 359 drivers/tee/optee/supp.c struct tee_param *p = req->param + n; n 364 drivers/tee/optee/supp.c p->u.value.a = param[n + num_meta].u.value.a; n 365 drivers/tee/optee/supp.c p->u.value.b = param[n + num_meta].u.value.b; n 366 drivers/tee/optee/supp.c p->u.value.c = param[n + num_meta].u.value.c; n 370 drivers/tee/optee/supp.c p->u.memref.size = param[n + num_meta].u.memref.size; n 207 drivers/tee/tee_core.c size_t n; n 209 drivers/tee/tee_core.c for (n = 0; n < num_params; n++) { n 213 drivers/tee/tee_core.c if (copy_from_user(&ip, uparams + n, sizeof(ip))) n 220 drivers/tee/tee_core.c params[n].attr = ip.attr; n 227 drivers/tee/tee_core.c params[n].u.value.a = ip.a; n 228 drivers/tee/tee_core.c params[n].u.value.b = ip.b; n 229 drivers/tee/tee_core.c params[n].u.value.c = ip.c; n 257 drivers/tee/tee_core.c params[n].u.memref.shm_offs = ip.a; n 258 drivers/tee/tee_core.c params[n].u.memref.size = ip.b; n 259 drivers/tee/tee_core.c params[n].u.memref.shm = shm; n 272 drivers/tee/tee_core.c size_t n; n 274 drivers/tee/tee_core.c for (n = 0; n < num_params; n++) { n 275 drivers/tee/tee_core.c struct tee_ioctl_param __user *up = uparams + n; n 276 drivers/tee/tee_core.c struct tee_param *p = params + n; n 301 drivers/tee/tee_core.c size_t n; n 359 drivers/tee/tee_core.c for (n = 0; n < arg.num_params; n++) n 360 drivers/tee/tee_core.c if (tee_param_is_memref(params + n) && n 361 drivers/tee/tee_core.c params[n].u.memref.shm) n 362 drivers/tee/tee_core.c tee_shm_put(params[n].u.memref.shm); n 373 drivers/tee/tee_core.c size_t n; n 421 drivers/tee/tee_core.c for (n = 0; n < arg.num_params; n++) n 422 drivers/tee/tee_core.c if (tee_param_is_memref(params + n) && n 423 drivers/tee/tee_core.c params[n].u.memref.shm) n 424 drivers/tee/tee_core.c tee_shm_put(params[n].u.memref.shm); n 464 drivers/tee/tee_core.c size_t n; n 466 drivers/tee/tee_core.c for (n = 0; n < num_params; n++) { n 468 drivers/tee/tee_core.c struct tee_param *p = params + n; n 497 drivers/tee/tee_core.c if (copy_to_user(uparams + n, &ip, sizeof(ip))) n 558 drivers/tee/tee_core.c size_t n; n 560 drivers/tee/tee_core.c for (n = 0; n < num_params; n++) { n 561 drivers/tee/tee_core.c struct tee_param *p = params + n; n 564 drivers/tee/tee_core.c if (copy_from_user(&ip, uparams + n, sizeof(ip))) n 34 drivers/tee/tee_shm.c size_t n; n 41 drivers/tee/tee_shm.c for (n = 0; n < shm->num_pages; n++) n 42 drivers/tee/tee_shm.c put_page(shm->pages[n]); n 317 drivers/tee/tee_shm.c size_t n; n 325 drivers/tee/tee_shm.c for (n = 0; n < shm->num_pages; n++) n 326 drivers/tee/tee_shm.c put_page(shm->pages[n]); n 1574 drivers/thermal/tegra/soctherm.c int i, j, n, ret; n 1579 drivers/thermal/tegra/soctherm.c n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips"); n 1580 drivers/thermal/tegra/soctherm.c if (n <= 0) { n 1583 drivers/thermal/tegra/soctherm.c return n; n 1586 drivers/thermal/tegra/soctherm.c n = min(max_num_prop, n); n 1592 drivers/thermal/tegra/soctherm.c tlb, n); n 1599 drivers/thermal/tegra/soctherm.c for (j = 0; j < n; j = j + 2) { n 276 drivers/thunderbolt/domain.c struct attribute *attr, int n) n 1590 drivers/thunderbolt/icm.c struct icm_notification *n = container_of(work, typeof(*n), work); n 1591 drivers/thunderbolt/icm.c struct tb *tb = n->tb; n 1602 drivers/thunderbolt/icm.c switch (n->pkg->code) { n 1604 drivers/thunderbolt/icm.c icm->device_connected(tb, n->pkg); n 1607 drivers/thunderbolt/icm.c icm->device_disconnected(tb, n->pkg); n 1610 drivers/thunderbolt/icm.c icm->xdomain_connected(tb, n->pkg); n 1613 drivers/thunderbolt/icm.c icm->xdomain_disconnected(tb, n->pkg); n 1616 drivers/thunderbolt/icm.c icm->rtd3_veto(tb, n->pkg); n 1623 drivers/thunderbolt/icm.c kfree(n->pkg); n 1624 drivers/thunderbolt/icm.c kfree(n); n 1630 drivers/thunderbolt/icm.c struct icm_notification *n; n 1632 drivers/thunderbolt/icm.c n = kmalloc(sizeof(*n), GFP_KERNEL); n 1633 drivers/thunderbolt/icm.c if (!n) n 1636 drivers/thunderbolt/icm.c INIT_WORK(&n->work, icm_handle_notification); n 1637 drivers/thunderbolt/icm.c n->pkg = kmemdup(buf, size, GFP_KERNEL); n 1638 drivers/thunderbolt/icm.c n->tb = tb; n 1640 drivers/thunderbolt/icm.c queue_work(tb->wq, &n->work); n 196 drivers/thunderbolt/nhi.c struct ring_frame *frame, *n; n 198 drivers/thunderbolt/nhi.c list_for_each_entry_safe(frame, n, &ring->queue, list) { n 1356 drivers/thunderbolt/switch.c struct attribute *attr, int n) n 250 drivers/thunderbolt/tb.c struct tb_tunnel *n; n 252 drivers/thunderbolt/tb.c list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { n 613 drivers/thunderbolt/tb.c struct tb_tunnel *n; n 616 drivers/thunderbolt/tb.c list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { n 708 drivers/thunderbolt/tb.c struct tb_tunnel *tunnel, *n; n 718 drivers/thunderbolt/tb.c list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) n 444 drivers/tty/goldfish.c static void gf_early_write(struct console *con, const char *s, unsigned int n) n 448 drivers/tty/goldfish.c uart_console_write(&dev->port, s, n, gf_early_console_putchar); n 155 drivers/tty/hvc/hvc_console.c unsigned i = 0, n = 0; n 168 drivers/tty/hvc/hvc_console.c if (b[n] == '\n' && !donecr) { n 172 drivers/tty/hvc/hvc_console.c c[i++] = b[n++]; n 478 drivers/tty/hvc/hvc_console.c int n; n 480 drivers/tty/hvc/hvc_console.c n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf); n 481 drivers/tty/hvc/hvc_console.c if (n <= 0) { n 482 drivers/tty/hvc/hvc_console.c if (n == 0 || n == -EAGAIN) { n 490 drivers/tty/hvc/hvc_console.c hp->n_outbuf -= n; n 492 drivers/tty/hvc/hvc_console.c memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); n 496 drivers/tty/hvc/hvc_console.c return n; n 635 drivers/tty/hvc/hvc_console.c int i, n, count, poll_mask = 0; n 685 drivers/tty/hvc/hvc_console.c n = hp->ops->get_chars(hp->vtermno, buf, count); n 686 drivers/tty/hvc/hvc_console.c if (n <= 0) { n 688 drivers/tty/hvc/hvc_console.c if (n == -EPIPE) { n 692 drivers/tty/hvc/hvc_console.c } else if ( n == -EAGAIN ) { n 703 drivers/tty/hvc/hvc_console.c for (i = 0; i < n; ++i) { n 723 drivers/tty/hvc/hvc_console.c read_total += n; n 865 drivers/tty/hvc/hvc_console.c int n; n 868 drivers/tty/hvc/hvc_console.c n = hp->ops->get_chars(hp->vtermno, &ch, 1); n 870 drivers/tty/hvc/hvc_console.c if (n <= 0) n 880 drivers/tty/hvc/hvc_console.c int n; n 883 drivers/tty/hvc/hvc_console.c n = hp->ops->put_chars(hp->vtermno, &ch, 1); n 884 drivers/tty/hvc/hvc_console.c } while (n <= 0); n 53 drivers/tty/hvc/hvc_xen.c struct xencons_info *entry, *n, *ret = NULL; n 58 drivers/tty/hvc/hvc_xen.c list_for_each_entry_safe(entry, n, &xenconsoles, list) { n 1440 drivers/tty/hvc/hvcs.c static int hvcs_alloc_index_list(int n) n 1444 drivers/tty/hvc/hvcs.c hvcs_index_list = kmalloc_array(n, sizeof(hvcs_index_count), n 1448 drivers/tty/hvc/hvcs.c hvcs_index_count = n; n 832 drivers/tty/hvc/hvsi.c int n; n 837 drivers/tty/hvc/hvsi.c n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf); n 838 drivers/tty/hvc/hvsi.c if (n > 0) { n 840 drivers/tty/hvc/hvsi.c pr_debug("%s: wrote %i chars\n", __func__, n); n 842 drivers/tty/hvc/hvsi.c } else if (n == -EIO) { n 1088 drivers/tty/hvc/hvsi.c unsigned int i = 0, n = 0; n 1101 drivers/tty/hvc/hvsi.c if (buf[n] == '\n' && !donecr) { n 1105 drivers/tty/hvc/hvsi.c c[i++] = buf[n++]; n 1394 drivers/tty/mxser.c int id, i, j, n; n 1413 drivers/tty/mxser.c n = inb(port + 2); n 1414 drivers/tty/mxser.c if (n == 'M') { n 1416 drivers/tty/mxser.c } else if ((j == 1) && (n == 1)) { n 1429 drivers/tty/mxser.c int i, n; n 1438 drivers/tty/mxser.c n = inb(port + 5); n 1439 drivers/tty/mxser.c if ((n & 0x61) == 0x60) n 1441 drivers/tty/mxser.c if ((n & 1) == 1) n 168 drivers/tty/n_tty.c size_t tail, size_t n) n 175 drivers/tty/n_tty.c if (n > size) { n 182 drivers/tty/n_tty.c n -= size; n 186 drivers/tty/n_tty.c tty_audit_add_data(tty, from, n); n 187 drivers/tty/n_tty.c uncopied = copy_to_user(to, from, n); n 188 drivers/tty/n_tty.c zero_buffer(tty, from, n - uncopied); n 227 drivers/tty/n_tty.c ssize_t n = 0; n 230 drivers/tty/n_tty.c n = ldata->commit_head - ldata->read_tail; n 232 drivers/tty/n_tty.c n = ldata->canon_head - ldata->read_tail; n 233 drivers/tty/n_tty.c return n; n 1515 drivers/tty/n_tty.c size_t n, head; n 1518 drivers/tty/n_tty.c n = min_t(size_t, count, N_TTY_BUF_SIZE - head); n 1519 drivers/tty/n_tty.c memcpy(read_buf_addr(ldata, head), cp, n); n 1520 drivers/tty/n_tty.c ldata->read_head += n; n 1521 drivers/tty/n_tty.c cp += n; n 1522 drivers/tty/n_tty.c count -= n; n 1525 drivers/tty/n_tty.c n = min_t(size_t, count, N_TTY_BUF_SIZE - head); n 1526 drivers/tty/n_tty.c memcpy(read_buf_addr(ldata, head), cp, n); n 1527 drivers/tty/n_tty.c ldata->read_head += n; n 1703 drivers/tty/n_tty.c int room, n, rcvd = 0, overflow; n 1736 drivers/tty/n_tty.c n = min(count, room); n 1737 drivers/tty/n_tty.c if (!n) n 1742 drivers/tty/n_tty.c __receive_buf(tty, cp, fp, n); n 1744 drivers/tty/n_tty.c cp += n; n 1746 drivers/tty/n_tty.c fp += n; n 1747 drivers/tty/n_tty.c count -= n; n 1748 drivers/tty/n_tty.c rcvd += n; n 1969 drivers/tty/n_tty.c size_t n; n 1975 drivers/tty/n_tty.c n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); n 1976 drivers/tty/n_tty.c n = min(*nr, n); n 1977 drivers/tty/n_tty.c if (n) { n 1979 drivers/tty/n_tty.c retval = copy_to_user(*b, from, n); n 1980 drivers/tty/n_tty.c n -= retval; n 1981 drivers/tty/n_tty.c is_eof = n == 1 && *from == EOF_CHAR(tty); n 1982 drivers/tty/n_tty.c tty_audit_add_data(tty, from, n); n 1983 drivers/tty/n_tty.c zero_buffer(tty, from, n); n 1984 drivers/tty/n_tty.c smp_store_release(&ldata->read_tail, ldata->read_tail + n); n 1988 drivers/tty/n_tty.c n = 0; n 1989 drivers/tty/n_tty.c *b += n; n 1990 drivers/tty/n_tty.c *nr -= n; n 2023 drivers/tty/n_tty.c size_t n, size, more, c; n 2032 drivers/tty/n_tty.c n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); n 2035 drivers/tty/n_tty.c size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); n 2038 drivers/tty/n_tty.c __func__, *nr, tail, n, size); n 2041 drivers/tty/n_tty.c more = n - (size - tail); n 2049 drivers/tty/n_tty.c n = eol - tail; n 2050 drivers/tty/n_tty.c if (n > N_TTY_BUF_SIZE) n 2051 drivers/tty/n_tty.c n += N_TTY_BUF_SIZE; n 2052 drivers/tty/n_tty.c c = n + found; n 2056 drivers/tty/n_tty.c n = c; n 2060 drivers/tty/n_tty.c __func__, eol, found, n, c, tail, more); n 2062 drivers/tty/n_tty.c ret = tty_copy_to_user(tty, *b, tail, n); n 2065 drivers/tty/n_tty.c *b += n; n 2066 drivers/tty/n_tty.c *nr -= n; n 175 drivers/tty/serial/8250/8250_core.c struct hlist_node *n; n 183 drivers/tty/serial/8250/8250_core.c hlist_for_each(n, h) { n 184 drivers/tty/serial/8250/8250_core.c i = hlist_entry(n, struct irq_info, node); n 189 drivers/tty/serial/8250/8250_core.c if (n == NULL) { n 228 drivers/tty/serial/8250/8250_core.c struct hlist_node *n; n 235 drivers/tty/serial/8250/8250_core.c hlist_for_each(n, h) { n 236 drivers/tty/serial/8250/8250_core.c i = hlist_entry(n, struct irq_info, node); n 241 drivers/tty/serial/8250/8250_core.c BUG_ON(n == NULL); n 79 drivers/tty/serial/8250/8250_lpss.c unsigned long m, n; n 94 drivers/tty/serial/8250/8250_lpss.c rational_best_approximation(fuart, fref, w, w, &m, &n); n 98 drivers/tty/serial/8250/8250_lpss.c reg = (m << BYT_PRV_CLK_M_VAL_SHIFT) | (n << BYT_PRV_CLK_N_VAL_SHIFT); n 2423 drivers/tty/serial/amba-pl011.c static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n) n 2427 drivers/tty/serial/amba-pl011.c uart_console_write(&dev->port, s, n, qdf2400_e44_putc); n 2442 drivers/tty/serial/amba-pl011.c static void pl011_early_write(struct console *con, const char *s, unsigned n) n 2446 drivers/tty/serial/amba-pl011.c uart_console_write(&dev->port, s, n, pl011_putc); n 550 drivers/tty/serial/arc_uart.c unsigned int n) n 554 drivers/tty/serial/arc_uart.c uart_console_write(&dev->port, s, n, arc_serial_console_putchar); n 82 drivers/tty/serial/atmel_serial.h #define ATMEL_US_MAX_ITER(n) (((n) << 24) & ATMEL_US_MAX_ITER_MASK) n 773 drivers/tty/serial/bcm63xx_uart.c static void bcm_early_write(struct console *con, const char *s, unsigned n) n 777 drivers/tty/serial/bcm63xx_uart.c uart_console_write(&dev->port, s, n, bcm_console_putchar); n 369 drivers/tty/serial/clps711x.c unsigned n) n 375 drivers/tty/serial/clps711x.c uart_console_write(port, c, n, uart_clps711x_console_putchar); n 393 drivers/tty/serial/digicolor-usart.c unsigned n) n 405 drivers/tty/serial/digicolor-usart.c uart_console_write(port, c, n, digicolor_uart_console_putchar); n 39 drivers/tty/serial/earlycon-arm-semihost.c static void smh_write(struct console *con, const char *s, unsigned n) n 42 drivers/tty/serial/earlycon-arm-semihost.c uart_console_write(&dev->port, s, n, smh_putc); n 19 drivers/tty/serial/earlycon-riscv-sbi.c const char *s, unsigned n) n 22 drivers/tty/serial/earlycon-riscv-sbi.c uart_console_write(&dev->port, s, n, sbi_putc); n 30 drivers/tty/serial/efm32-uart.c #define UARTn_FRAME_DATABITS(n) ((n) - 3) n 78 drivers/tty/serial/efm32-uart.c #define UARTn_ROUTE_LOCATION(n) (((n) << 8) & UARTn_ROUTE_LOCATION__MASK) n 792 drivers/tty/serial/fsl_linflexuart.c unsigned int n) n 796 drivers/tty/serial/fsl_linflexuart.c uart_console_write(&dev->port, s, n, linflex_earlycon_putchar); n 2303 drivers/tty/serial/fsl_lpuart.c static void lpuart_early_write(struct console *con, const char *s, unsigned n) n 2307 drivers/tty/serial/fsl_lpuart.c uart_console_write(&dev->port, s, n, lpuart_console_putchar); n 2310 drivers/tty/serial/fsl_lpuart.c static void lpuart32_early_write(struct console *con, const char *s, unsigned n) n 2314 drivers/tty/serial/fsl_lpuart.c uart_console_write(&dev->port, s, n, lpuart32_console_putchar); n 195 drivers/tty/serial/ifx6x60.c int n; n 204 drivers/tty/serial/ifx6x60.c for (n = 0; n < len; n++) { n 220 drivers/tty/serial/ifx6x60.c int n; n 230 drivers/tty/serial/ifx6x60.c for (n = 0; n < len; n++) { n 452 drivers/tty/serial/jsm/jsm_cls.c int n; n 474 drivers/tty/serial/jsm/jsm_cls.c n = 32; n 481 drivers/tty/serial/jsm/jsm_cls.c n = min(n, qlen); n 483 drivers/tty/serial/jsm/jsm_cls.c while (n > 0) { n 486 drivers/tty/serial/jsm/jsm_cls.c n--; n 280 drivers/tty/serial/jsm/jsm_neo.c int n = 0; n 344 drivers/tty/serial/jsm/jsm_neo.c n = min(((u32) total), (RQUEUESIZE - (u32) head)); n 352 drivers/tty/serial/jsm/jsm_neo.c n = min((u32) n, (u32) 12); n 365 drivers/tty/serial/jsm/jsm_neo.c memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); n 371 drivers/tty/serial/jsm/jsm_neo.c memset(ch->ch_equeue + head, 0, n); n 374 drivers/tty/serial/jsm/jsm_neo.c head = (head + n) & RQUEUEMASK; n 375 drivers/tty/serial/jsm/jsm_neo.c total -= n; n 376 drivers/tty/serial/jsm/jsm_neo.c qleft -= n; n 377 drivers/tty/serial/jsm/jsm_neo.c ch->ch_rxcount += n; n 478 drivers/tty/serial/jsm/jsm_neo.c int n; n 521 drivers/tty/serial/jsm/jsm_neo.c n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; n 529 drivers/tty/serial/jsm/jsm_neo.c n = min(n, qlen); n 531 drivers/tty/serial/jsm/jsm_neo.c while (n > 0) { n 534 drivers/tty/serial/jsm/jsm_neo.c s = min(s, n); n 542 drivers/tty/serial/jsm/jsm_neo.c n -= s; n 117 drivers/tty/serial/kgdb_nmi.c static int n; n 129 drivers/tty/serial/kgdb_nmi.c } else if (c == magic[n]) { n 130 drivers/tty/serial/kgdb_nmi.c n = (n + 1) % m; n 131 drivers/tty/serial/kgdb_nmi.c if (!n) n 135 drivers/tty/serial/kgdb_nmi.c n = 0; n 44 drivers/tty/serial/lpc32xx_hs.c #define LPC32XX_HSU_TX_LEV(n) (((n) >> 8) & 0xFF) n 45 drivers/tty/serial/lpc32xx_hs.c #define LPC32XX_HSU_RX_LEV(n) ((n) & 0xFF) n 67 drivers/tty/serial/lpc32xx_hs.c #define LPC32XX_HSU_OFFSET(n) ((n) << 9) n 300 drivers/tty/serial/men_z135_uart.c int n; n 339 drivers/tty/serial/men_z135_uart.c n = 4 - BYTES_TO_ALIGN(wptr); n 341 drivers/tty/serial/men_z135_uart.c n = txfree; n 343 drivers/tty/serial/men_z135_uart.c n = qlen; n 345 drivers/tty/serial/men_z135_uart.c if (n <= 0) n 352 drivers/tty/serial/men_z135_uart.c n = min(n, s); n 354 drivers/tty/serial/men_z135_uart.c memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n); n 355 drivers/tty/serial/men_z135_uart.c xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1); n 357 drivers/tty/serial/men_z135_uart.c iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL); n 359 drivers/tty/serial/men_z135_uart.c port->icount.tx += n; n 495 drivers/tty/serial/mps2-uart.c static void mps2_early_write(struct console *con, const char *s, unsigned int n) n 499 drivers/tty/serial/mps2-uart.c uart_console_write(&dev->port, s, n, mps2_early_putchar); n 1684 drivers/tty/serial/msm_serial.c msm_serial_early_write(struct console *con, const char *s, unsigned n) n 1688 drivers/tty/serial/msm_serial.c __msm_console_write(&dev->port, s, n, false); n 1704 drivers/tty/serial/msm_serial.c msm_serial_early_write_dm(struct console *con, const char *s, unsigned n) n 1708 drivers/tty/serial/msm_serial.c __msm_console_write(&dev->port, s, n, true); n 620 drivers/tty/serial/mvebu-uart.c unsigned n) n 624 drivers/tty/serial/mvebu-uart.c uart_console_write(&dev->port, s, n, mvebu_uart_putc); n 228 drivers/tty/serial/omap-serial.c unsigned int n = port->uartclk / (mode * baud); n 231 drivers/tty/serial/omap-serial.c if (n == 0) n 232 drivers/tty/serial/omap-serial.c n = 1; n 234 drivers/tty/serial/omap-serial.c abs_diff = baud - (port->uartclk / (mode * n)); n 1103 drivers/tty/serial/qcom_geni_serial.c const char *s, unsigned int n) n 1107 drivers/tty/serial/qcom_geni_serial.c __qcom_geni_serial_console_write(&dev->port, s, n); n 2526 drivers/tty/serial/samsung.c static void samsung_early_write(struct console *con, const char *s, unsigned n) n 2530 drivers/tty/serial/samsung.c uart_console_write(&dev->port, s, n, samsung_early_putc); n 848 drivers/tty/serial/sccnxp.c static void sccnxp_console_write(struct console *co, const char *c, unsigned n) n 855 drivers/tty/serial/sccnxp.c uart_console_write(port, c, n, sccnxp_console_putchar); n 727 drivers/tty/serial/sifive.c unsigned int n) n 732 drivers/tty/serial/sifive.c uart_console_write(port, s, n, early_sifive_serial_putc); n 1043 drivers/tty/serial/sprd_serial.c static void sprd_early_write(struct console *con, const char *s, unsigned int n) n 1047 drivers/tty/serial/sprd_serial.c uart_console_write(&dev->port, s, n, sprd_putc); n 417 drivers/tty/serial/sunhv.c static int fill_con_write_page(const char *s, unsigned int n, n 424 drivers/tty/serial/sunhv.c while (n--) { n 439 drivers/tty/serial/sunhv.c static void sunhv_console_write_paged(struct console *con, const char *s, unsigned n) n 450 drivers/tty/serial/sunhv.c while (n > 0) { n 453 drivers/tty/serial/sunhv.c unsigned int cpy = fill_con_write_page(s, n, n 456 drivers/tty/serial/sunhv.c n -= cpy; n 494 drivers/tty/serial/sunhv.c static void sunhv_console_write_bychar(struct console *con, const char *s, unsigned n) n 505 drivers/tty/serial/sunhv.c for (i = 0; i < n; i++) { n 650 drivers/tty/serial/sunsab.c int n, m; n 663 drivers/tty/serial/sunsab.c n = (SAB_BASE_BAUD * 10) / baud; n 665 drivers/tty/serial/sunsab.c while (n >= 640) { n 666 drivers/tty/serial/sunsab.c n = n / 2; n 669 drivers/tty/serial/sunsab.c n = (n+5) / 10; n 674 drivers/tty/serial/sunsab.c if ((m == 0) && ((n & 1) == 0)) { n 675 drivers/tty/serial/sunsab.c n = n / 2; n 678 drivers/tty/serial/sunsab.c *n_ret = n - 1; n 688 drivers/tty/serial/sunsab.c int bits, n, m; n 717 drivers/tty/serial/sunsab.c calc_ebrg(baud, &n, &m); n 719 drivers/tty/serial/sunsab.c up->cached_ebrg = n | (m << 6); n 862 drivers/tty/serial/sunsab.c static void sunsab_console_write(struct console *con, const char *s, unsigned n) n 873 drivers/tty/serial/sunsab.c uart_console_write(&up->port, s, n, sunsab_console_putchar); n 207 drivers/tty/serial/uartlite.c int stat, busy, n = 0; n 216 drivers/tty/serial/uartlite.c n++; n 220 drivers/tty/serial/uartlite.c if (n > 1) { n 572 drivers/tty/serial/uartlite.c const char *s, unsigned n) n 575 drivers/tty/serial/uartlite.c uart_console_write(&device->port, s, n, early_uartlite_putc); n 1138 drivers/tty/serial/xilinx_uartps.c unsigned n) n 1142 drivers/tty/serial/xilinx_uartps.c uart_console_write(&dev->port, s, n, cdns_uart_console_putchar); n 261 drivers/tty/tty_buffer.c struct tty_buffer *b, *n; n 273 drivers/tty/tty_buffer.c n = tty_buffer_alloc(port, size); n 274 drivers/tty/tty_buffer.c if (n != NULL) { n 275 drivers/tty/tty_buffer.c n->flags = flags; n 276 drivers/tty/tty_buffer.c buf->tail = n; n 285 drivers/tty/tty_buffer.c smp_store_release(&b->next, n); n 476 drivers/tty/tty_buffer.c int n; n 481 drivers/tty/tty_buffer.c n = port->client_ops->receive_buf(port, p, f, count); n 482 drivers/tty/tty_buffer.c if (n > 0) n 483 drivers/tty/tty_buffer.c memset(p, 0, n); n 484 drivers/tty/tty_buffer.c return n; n 571 drivers/tty/tty_io.c int closecount = 0, n; n 644 drivers/tty/tty_io.c for (n = 0; n < closecount; n++) n 477 drivers/tty/vt/consolemap.c int i, n; n 480 drivers/tty/vt/consolemap.c p1 = p->uni_pgdir[n = unicode >> 11]; n 482 drivers/tty/vt/consolemap.c p1 = p->uni_pgdir[n] = kmalloc_array(32, sizeof(u16 *), n 489 drivers/tty/vt/consolemap.c p2 = p1[n = (unicode >> 6) & 0x1f]; n 491 drivers/tty/vt/consolemap.c p2 = p1[n] = kmalloc_array(64, sizeof(u16), GFP_KERNEL); n 67 drivers/tty/vt/selection.c sel_pos(int n) n 70 drivers/tty/vt/selection.c return screen_glyph_unicode(sel_cons, n / 2); n 71 drivers/tty/vt/selection.c return inverse_translate(sel_cons, screen_glyph(sel_cons, n), n 2584 drivers/tty/vt/vt.c int c, next_c, tc, ok, n = 0, draw_x = -1; n 2629 drivers/tty/vt/vt.c n++; n 2834 drivers/tty/vt/vt.c return n; n 4667 drivers/tty/vt/vt.c u32 screen_glyph_unicode(struct vc_data *vc, int n) n 4672 drivers/tty/vt/vt.c return uniscr->lines[n / vc->vc_cols][n % vc->vc_cols]; n 4673 drivers/tty/vt/vt.c return inverse_translate(vc, screen_glyph(vc, n * 2), 1); n 218 drivers/tty/vt/vt_ioctl.c int vt_waitactive(int n) n 224 drivers/tty/vt/vt_ioctl.c if (n == fg_console + 1) { n 232 drivers/tty/vt/vt_ioctl.c } while (vw.event.newev != n); n 106 drivers/uio/uio_sercos3.c int n, int pci_bar) n 108 drivers/uio/uio_sercos3.c info->mem[n].addr = pci_resource_start(dev, pci_bar); n 109 drivers/uio/uio_sercos3.c if (!info->mem[n].addr) n 111 drivers/uio/uio_sercos3.c info->mem[n].internal_addr = ioremap(pci_resource_start(dev, pci_bar), n 113 drivers/uio/uio_sercos3.c if (!info->mem[n].internal_addr) n 115 drivers/uio/uio_sercos3.c info->mem[n].size = pci_resource_len(dev, pci_bar); n 116 drivers/uio/uio_sercos3.c info->mem[n].memtype = UIO_MEM_PHYS; n 621 drivers/usb/atm/usbatm.c struct sk_buff *skb, *n; n 624 drivers/usb/atm/usbatm.c skb_queue_walk_safe(&instance->sndqueue, skb, n) { n 938 drivers/usb/chipidea/core.c struct device_attribute *attr, const char *buf, size_t n) n 966 drivers/usb/chipidea/core.c return (ret == 0) ? n : ret; n 63 drivers/usb/chipidea/udc.c static inline int ep_to_bit(struct ci_hdrc *ci, int n) n 67 drivers/usb/chipidea/udc.c if (n >= ci->hw_ep_max / 2) n 68 drivers/usb/chipidea/udc.c n += fill; n 70 drivers/usb/chipidea/udc.c return n; n 101 drivers/usb/chipidea/udc.c int n = hw_ep_bit(num, dir); n 105 drivers/usb/chipidea/udc.c hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n)); n 106 drivers/usb/chipidea/udc.c while (hw_read(ci, OP_ENDPTFLUSH, BIT(n))) n 108 drivers/usb/chipidea/udc.c } while (hw_read(ci, OP_ENDPTSTAT, BIT(n))); n 186 drivers/usb/chipidea/udc.c int n = hw_ep_bit(num, dir); n 194 drivers/usb/chipidea/udc.c hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n)); n 196 drivers/usb/chipidea/udc.c while (hw_read(ci, OP_ENDPTPRIME, BIT(n))) n 250 drivers/usb/chipidea/udc.c static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n) n 252 drivers/usb/chipidea/udc.c n = ep_to_bit(ci, n); n 253 drivers/usb/chipidea/udc.c return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n)); n 472 drivers/usb/chipidea/udc.c int n = hw_ep_bit(hwep->num, hwep->dir); n 484 drivers/usb/chipidea/udc.c if (hw_read(ci, OP_ENDPTPRIME, BIT(n))) n 488 drivers/usb/chipidea/udc.c tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n)); n 562 drivers/usb/chipidea/udc.c int n = hw_ep_bit(hwep->num, hwep->dir); n 565 drivers/usb/chipidea/udc.c if (!hw_read(ci, OP_ENDPTSTAT, BIT(n))) n 16 drivers/usb/chipidea/ulpi.c #define ULPI_ADDR(n) ((n) << 16) n 17 drivers/usb/chipidea/ulpi.c #define ULPI_DATA(n) (n) n 188 drivers/usb/class/cdc-acm.c int i, n; n 191 drivers/usb/class/cdc-acm.c n = ACM_NW; n 194 drivers/usb/class/cdc-acm.c n -= acm->wb[i].use; n 196 drivers/usb/class/cdc-acm.c return n; n 271 drivers/usb/class/usbtmc.c int n; n 318 drivers/usb/class/usbtmc.c n = 0; n 334 drivers/usb/class/usbtmc.c n++; n 345 drivers/usb/class/usbtmc.c if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) { n 400 drivers/usb/class/usbtmc.c int n; n 429 drivers/usb/class/usbtmc.c n = 0; n 440 drivers/usb/class/usbtmc.c n++; n 452 drivers/usb/class/usbtmc.c (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN)) n 1623 drivers/usb/class/usbtmc.c int n; n 1652 drivers/usb/class/usbtmc.c n = 0; n 1693 drivers/usb/class/usbtmc.c n++; n 1701 drivers/usb/class/usbtmc.c (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN)); n 1705 drivers/usb/class/usbtmc.c n++; n 1708 drivers/usb/class/usbtmc.c if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) { n 22 drivers/usb/core/config.c static inline const char *plural(int n) n 24 drivers/usb/core/config.c return (n == 1 ? "" : "s"); n 31 drivers/usb/core/config.c int n = 0; n 41 drivers/usb/core/config.c ++n; n 47 drivers/usb/core/config.c *num_skipped = n; n 263 drivers/usb/core/config.c int n, i, j, retval; n 272 drivers/usb/core/config.c n = USB_DT_ENDPOINT_AUDIO_SIZE; n 274 drivers/usb/core/config.c n = USB_DT_ENDPOINT_SIZE; n 314 drivers/usb/core/config.c memcpy(&endpoint->desc, d, n); n 335 drivers/usb/core/config.c n = fls(d->bInterval*8); n 336 drivers/usb/core/config.c if (n == 0) n 337 drivers/usb/core/config.c n = 7; /* 8 ms = 2^(7-1) uframes */ n 348 drivers/usb/core/config.c n = clamp(fls(d->bInterval) + 3, i, j); n 349 drivers/usb/core/config.c i = j = n; n 357 drivers/usb/core/config.c n = clamp(fls(d->bInterval), i, j); n 358 drivers/usb/core/config.c i = j = n; n 367 drivers/usb/core/config.c n = 10; n 375 drivers/usb/core/config.c n = 7; /* 8 ms = 2^(7-1) uframes */ n 378 drivers/usb/core/config.c n = 4; /* 8 ms = 2^(4-1) frames */ n 387 drivers/usb/core/config.c d->bEndpointAddress, d->bInterval, n); n 388 drivers/usb/core/config.c endpoint->desc.bInterval = n; n 474 drivers/usb/core/config.c USB_DT_INTERFACE, &n); n 477 drivers/usb/core/config.c if (n > 0) n 479 drivers/usb/core/config.c n, plural(n), "endpoint"); n 511 drivers/usb/core/config.c int i, n; n 554 drivers/usb/core/config.c USB_DT_INTERFACE, &n); n 556 drivers/usb/core/config.c if (n > 0) n 558 drivers/usb/core/config.c n, plural(n), "interface"); n 581 drivers/usb/core/config.c n = 0; n 590 drivers/usb/core/config.c ++n; n 596 drivers/usb/core/config.c if (n != num_ep_orig) n 600 drivers/usb/core/config.c cfgno, inum, asnum, n, plural(n), num_ep_orig); n 616 drivers/usb/core/config.c int i, j, n; n 651 drivers/usb/core/config.c n = 0; n 686 drivers/usb/core/config.c n >= nintf_orig) { n 701 drivers/usb/core/config.c for (i = 0; i < n; ++i) { n 705 drivers/usb/core/config.c if (i < n) { n 708 drivers/usb/core/config.c } else if (n < USB_MAXINTERFACES) { n 709 drivers/usb/core/config.c inums[n] = inum; n 710 drivers/usb/core/config.c nalts[n] = 1; n 711 drivers/usb/core/config.c ++n; n 746 drivers/usb/core/config.c if (n != nintf) n 749 drivers/usb/core/config.c cfgno, n, plural(n), nintf_orig); n 750 drivers/usb/core/config.c else if (n == 0) n 752 drivers/usb/core/config.c config->desc.bNumInterfaces = nintf = n; n 789 drivers/usb/core/config.c USB_DT_INTERFACE, &n); n 791 drivers/usb/core/config.c if (n > 0) n 793 drivers/usb/core/config.c n, plural(n), "configuration"); n 812 drivers/usb/core/config.c for (n = 0; n < intfc->num_altsetting; ++n) { n 813 drivers/usb/core/config.c if (intfc->altsetting[n].desc. n 817 drivers/usb/core/config.c if (n >= intfc->num_altsetting) n 2039 drivers/usb/core/devio.c compat_uint_t n; n 2044 drivers/usb/core/devio.c if (get_user(n, &p32->ep) || put_user(n, &p->ep) || n 2045 drivers/usb/core/devio.c get_user(n, &p32->len) || put_user(n, &p->len) || n 2046 drivers/usb/core/devio.c get_user(n, &p32->timeout) || put_user(n, &p->timeout) || n 153 drivers/usb/core/driver.c struct usb_dynid *dynid, *n; n 164 drivers/usb/core/driver.c list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) { n 221 drivers/usb/core/driver.c struct usb_dynid *dynid, *n; n 224 drivers/usb/core/driver.c list_for_each_entry_safe(dynid, n, &usb_drv->dynids.list, node) { n 1296 drivers/usb/core/driver.c int i = 0, n = 0; n 1305 drivers/usb/core/driver.c n = udev->actconfig->desc.bNumInterfaces; n 1306 drivers/usb/core/driver.c for (i = n - 1; i >= 0; --i) { n 1352 drivers/usb/core/driver.c while (++i < n) { n 27 drivers/usb/core/generic.c static inline const char *plural(int n) n 29 drivers/usb/core/generic.c return (n == 1 ? "" : "s"); n 404 drivers/usb/core/hcd.c unsigned n, t = 2 + 2*strlen(s); n 413 drivers/usb/core/hcd.c n = len; n 414 drivers/usb/core/hcd.c while (n--) { n 416 drivers/usb/core/hcd.c if (!n--) n 1457 drivers/usb/core/hcd.c int n; n 1465 drivers/usb/core/hcd.c n = dma_map_sg( n 1470 drivers/usb/core/hcd.c if (n <= 0) n 1474 drivers/usb/core/hcd.c urb->num_mapped_sgs = n; n 1475 drivers/usb/core/hcd.c if (n != urb->num_sgs) n 1980 drivers/usb/core/hub.c int n; n 1982 drivers/usb/core/hub.c for (n = 0; n < hdev->maxchild; n++) { n 1983 drivers/usb/core/hub.c if (hub->ports[n]->port_owner == owner) n 1984 drivers/usb/core/hub.c hub->ports[n]->port_owner = NULL; n 1818 drivers/usb/core/message.c int n, nintf; n 1844 drivers/usb/core/message.c n = nintf = 0; n 1852 drivers/usb/core/message.c for (; n < nintf; ++n) { n 1853 drivers/usb/core/message.c new_interfaces[n] = kzalloc( n 1856 drivers/usb/core/message.c if (!new_interfaces[n]) { n 1859 drivers/usb/core/message.c while (--n >= 0) n 1860 drivers/usb/core/message.c kfree(new_interfaces[n]); n 850 drivers/usb/core/sysfs.c struct attribute *a, int n) n 889 drivers/usb/core/sysfs.c size_t srclen, n; n 908 drivers/usb/core/sysfs.c n = min(nleft, srclen - (size_t) off); n 909 drivers/usb/core/sysfs.c memcpy(buf, src + off, n); n 910 drivers/usb/core/sysfs.c nleft -= n; n 911 drivers/usb/core/sysfs.c buf += n; n 1234 drivers/usb/core/sysfs.c struct attribute *a, int n) n 424 drivers/usb/core/urb.c int n, len; n 450 drivers/usb/core/urb.c for (n = 0; n < urb->number_of_packets; n++) { n 451 drivers/usb/core/urb.c len = urb->iso_frame_desc[n].length; n 454 drivers/usb/core/urb.c urb->iso_frame_desc[n].status = -EXDEV; n 455 drivers/usb/core/urb.c urb->iso_frame_desc[n].actual_length = 0; n 123 drivers/usb/dwc3/core.h #define DWC3_GUSB2PHYCFG(n) (0xc200 + ((n) * 0x04)) n 124 drivers/usb/dwc3/core.h #define DWC3_GUSB2I2CCTL(n) (0xc240 + ((n) * 0x04)) n 126 drivers/usb/dwc3/core.h #define DWC3_GUSB2PHYACC(n) (0xc280 + ((n) * 0x04)) n 128 drivers/usb/dwc3/core.h #define DWC3_GUSB3PIPECTL(n) (0xc2c0 + ((n) * 0x04)) n 130 drivers/usb/dwc3/core.h #define DWC3_GTXFIFOSIZ(n) (0xc300 + ((n) * 0x04)) n 131 drivers/usb/dwc3/core.h #define DWC3_GRXFIFOSIZ(n) (0xc380 + ((n) * 0x04)) n 133 drivers/usb/dwc3/core.h #define DWC3_GEVNTADRLO(n) (0xc400 + ((n) * 0x10)) n 134 drivers/usb/dwc3/core.h #define DWC3_GEVNTADRHI(n) (0xc404 + ((n) * 0x10)) n 135 drivers/usb/dwc3/core.h #define DWC3_GEVNTSIZ(n) (0xc408 + ((n) * 0x10)) n 136 drivers/usb/dwc3/core.h #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10)) n 150 drivers/usb/dwc3/core.h #define DWC3_DEP_BASE(n) (0xc800 + ((n) * 0x10)) n 156 drivers/usb/dwc3/core.h #define DWC3_DEV_IMOD(n) (0xca00 + ((n) * 0x4)) n 180 drivers/usb/dwc3/core.h #define DWC3_GDBGLSPMUX_HOSTSELECT(n) ((n) & 0x3fff) n 181 drivers/usb/dwc3/core.h #define DWC3_GDBGLSPMUX_DEVSELECT(n) (((n) & 0xf) << 4) n 182 drivers/usb/dwc3/core.h #define DWC3_GDBGLSPMUX_EPSELECT(n) ((n) & 0xf) n 185 drivers/usb/dwc3/core.h #define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f) n 186 drivers/usb/dwc3/core.h #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) n 187 drivers/usb/dwc3/core.h #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) n 200 drivers/usb/dwc3/core.h #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) n 201 drivers/usb/dwc3/core.h #define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24) n 205 drivers/usb/dwc3/core.h #define DWC31_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 16) n 206 drivers/usb/dwc3/core.h #define DWC31_GRXTHRCFG_RXPKTCNT(n) (((n) & 0x1f) << 21) n 209 drivers/usb/dwc3/core.h #define DWC31_RXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13) n 211 drivers/usb/dwc3/core.h #define DWC31_RXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5) n 212 drivers/usb/dwc3/core.h #define DWC31_MAXRXBURSTSIZE_PRD(n) ((n) & 0x1f) n 215 drivers/usb/dwc3/core.h #define DWC31_GTXTHRCFG_MAXTXBURSTSIZE(n) (((n) & 0x1f) << 16) n 216 drivers/usb/dwc3/core.h #define DWC31_GTXTHRCFG_TXPKTCNT(n) (((n) & 0x1f) << 21) n 219 drivers/usb/dwc3/core.h #define DWC31_TXTHRNUMPKT_HS_PRD(n) (((n) & 0x3) << 13) n 221 drivers/usb/dwc3/core.h #define DWC31_TXTHRNUMPKT_PRD(n) (((n) & 0x1f) << 5) n 222 drivers/usb/dwc3/core.h #define DWC31_MAXTXBURSTSIZE_PRD(n) ((n) & 0x1f) n 225 drivers/usb/dwc3/core.h #define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19) n 233 drivers/usb/dwc3/core.h #define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12) n 234 drivers/usb/dwc3/core.h #define DWC3_GCTL_PRTCAPDIR(n) ((n) << 12) n 241 drivers/usb/dwc3/core.h #define DWC3_GCTL_SCALEDOWN(n) ((n) << 4) n 264 drivers/usb/dwc3/core.h #define DWC3_GSTS_CURMOD(n) ((n) & 0x3) n 274 drivers/usb/dwc3/core.h #define DWC3_GUSB2PHYCFG_PHYIF(n) (n << 3) n 276 drivers/usb/dwc3/core.h #define DWC3_GUSB2PHYCFG_USBTRDTIM(n) (n << 10) n 287 drivers/usb/dwc3/core.h #define DWC3_GUSB2PHYACC_ADDR(n) (n << 16) n 288 drivers/usb/dwc3/core.h #define DWC3_GUSB2PHYACC_EXTEND_ADDR(n) (n << 8) n 289 drivers/usb/dwc3/core.h #define DWC3_GUSB2PHYACC_DATA(n) (n & 0xff) n 297 drivers/usb/dwc3/core.h #define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19) n 305 drivers/usb/dwc3/core.h #define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1) n 309 drivers/usb/dwc3/core.h #define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */ n 310 drivers/usb/dwc3/core.h #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) n 311 drivers/usb/dwc3/core.h #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) n 314 drivers/usb/dwc3/core.h #define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ n 315 drivers/usb/dwc3/core.h #define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff) n 319 drivers/usb/dwc3/core.h #define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff) n 322 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS0_MODE(n) ((n) & 0x3) n 326 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS0_MBUS_TYPE(n) (((n) >> 3) & 0x7) n 327 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS0_SBUS_TYPE(n) (((n) >> 6) & 0x3) n 328 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS0_MDWIDTH(n) (((n) >> 8) & 0xff) n 329 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS0_SDWIDTH(n) (((n) >> 16) & 0xff) n 330 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS0_AWIDTH(n) (((n) >> 24) & 0xff) n 333 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24) n 337 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS1_PWROPT(n) ((n) << 24) n 342 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3) n 346 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS3_HSPHY_IFC(n) (((n) & (3 << 2)) >> 2) n 351 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS3_FSPHY_IFC(n) (((n) & (3 << 4)) >> 4) n 356 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13) n 368 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff) n 369 drivers/usb/dwc3/core.h #define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff) n 390 drivers/usb/dwc3/core.h #define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f) n 400 drivers/usb/dwc3/core.h #define DWC3_DCTL_HIRD_THRES(n) ((n) << 24) n 406 drivers/usb/dwc3/core.h #define DWC3_DCTL_TRGTULST(n) ((n) << 17) n 414 drivers/usb/dwc3/core.h #define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20) n 428 drivers/usb/dwc3/core.h #define DWC3_DCTL_ULSTCHNGREQ(n) (((n) << 5) & DWC3_DCTL_ULSTCHNGREQ_MASK) n 466 drivers/usb/dwc3/core.h #define DWC3_DSTS_USBLNKST(n) (((n) & DWC3_DSTS_USBLNKST_MASK) >> 18) n 471 drivers/usb/dwc3/core.h #define DWC3_DSTS_SOFFN(n) (((n) & DWC3_DSTS_SOFFN_MASK) >> 3) n 495 drivers/usb/dwc3/core.h #define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F) n 501 drivers/usb/dwc3/core.h #define DWC3_DGCMDPAR_FIFO_NUM(n) ((n) << 0) n 533 drivers/usb/dwc3/core.h #define DWC3_DALEPENA_EP(n) BIT(n) n 772 drivers/usb/dwc3/core.h #define DWC3_TRB_SIZE_LENGTH(n) ((n) & DWC3_TRB_SIZE_MASK) n 773 drivers/usb/dwc3/core.h #define DWC3_TRB_SIZE_PCM1(n) (((n) & 0x03) << 24) n 774 drivers/usb/dwc3/core.h #define DWC3_TRB_SIZE_TRBSTS(n) (((n) & (0x0f << 28)) >> 28) n 786 drivers/usb/dwc3/core.h #define DWC3_TRB_CTRL_TRBCTL(n) (((n) & 0x3f) << 4) n 789 drivers/usb/dwc3/core.h #define DWC3_TRB_CTRL_SID_SOFN(n) (((n) & 0xffff) << 14) n 790 drivers/usb/dwc3/core.h #define DWC3_TRB_CTRL_GET_SID_SOFN(n) (((n) & (0xffff << 14)) >> 14) n 792 drivers/usb/dwc3/core.h #define DWC3_TRBCTL_TYPE(n) ((n) & (0x3f << 4)) n 841 drivers/usb/dwc3/core.h #define DWC3_MODE(n) ((n) & 0x7) n 843 drivers/usb/dwc3/core.h #define DWC3_MDWIDTH(n) (((n) & 0xff00) >> 8) n 846 drivers/usb/dwc3/core.h #define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15) n 857 drivers/usb/dwc3/core.h #define DWC3_RAM1_DEPTH(n) ((n) & 0xffff) n 1298 drivers/usb/dwc3/core.h #define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3) n 1307 drivers/usb/dwc3/core.h #define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8) n 36 drivers/usb/dwc3/debugfs.c #define dump_ep_register_set(n) \ n 38 drivers/usb/dwc3/debugfs.c .name = "DEPCMDPAR2("__stringify(n)")", \ n 39 drivers/usb/dwc3/debugfs.c .offset = DWC3_DEP_BASE(n) + \ n 43 drivers/usb/dwc3/debugfs.c .name = "DEPCMDPAR1("__stringify(n)")", \ n 44 drivers/usb/dwc3/debugfs.c .offset = DWC3_DEP_BASE(n) + \ n 48 drivers/usb/dwc3/debugfs.c .name = "DEPCMDPAR0("__stringify(n)")", \ n 49 drivers/usb/dwc3/debugfs.c .offset = DWC3_DEP_BASE(n) + \ n 53 drivers/usb/dwc3/debugfs.c .name = "DEPCMD("__stringify(n)")", \ n 54 drivers/usb/dwc3/debugfs.c .offset = DWC3_DEP_BASE(n) + \ n 29 drivers/usb/dwc3/dwc3-keystone.c #define USBSS_IRQ_EOI_LINE(n) BIT(n) n 60 drivers/usb/dwc3/dwc3-st.c #define SEL_OVERRIDE_VBUSVALID(n) (n << 0) n 61 drivers/usb/dwc3/dwc3-st.c #define SEL_OVERRIDE_POWERPRESENT(n) (n << 4) n 62 drivers/usb/dwc3/dwc3-st.c #define SEL_OVERRIDE_BVALID(n) (n << 8) n 71 drivers/usb/dwc3/dwc3-st.c #define USB3_FORCE_OPMODE(n) (n << 5) n 30 drivers/usb/dwc3/gadget.c #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \ n 1174 drivers/usb/dwc3/gadget.c struct dwc3_request *req, *n; n 1196 drivers/usb/dwc3/gadget.c list_for_each_entry_safe(req, n, &dep->pending_list, list) { n 23 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) n 29 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16) n 31 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25) n 36 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1) n 37 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3) n 38 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17) n 39 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22) n 40 drivers/usb/dwc3/gadget.h #define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26) n 49 drivers/usb/dwc3/gadget.h #define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff) n 913 drivers/usb/early/ehci-dbgp.c static void early_dbgp_write(struct console *con, const char *str, u32 n) n 939 drivers/usb/early/ehci-dbgp.c while (n > 0) { n 940 drivers/usb/early/ehci-dbgp.c for (chunk = 0; chunk < DBGP_MAX_PACKET && n > 0; n 941 drivers/usb/early/ehci-dbgp.c str++, chunk++, n--) { n 946 drivers/usb/early/ehci-dbgp.c n++; n 871 drivers/usb/early/xhci-dbc.c static void early_xdbc_write(struct console *con, const char *str, u32 n) n 880 drivers/usb/early/xhci-dbc.c while (n > 0) { n 881 drivers/usb/early/xhci-dbc.c for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) { n 887 drivers/usb/early/xhci-dbc.c n++; n 1330 drivers/usb/gadget/composite.c unsigned n; n 1334 drivers/usb/gadget/composite.c for (n = 0; n < n_strings; n++) { n 1367 drivers/usb/gadget/composite.c int usb_string_ids_n(struct usb_composite_dev *c, unsigned n) n 1370 drivers/usb/gadget/composite.c if (unlikely(n > 254 || (unsigned)next + n > 254)) n 1372 drivers/usb/gadget/composite.c c->next_string_id += n; n 1527 drivers/usb/gadget/composite.c int j, count, n, ret; n 1538 drivers/usb/gadget/composite.c n = ext_prop->data_len + n 1540 drivers/usb/gadget/composite.c if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ) n 1542 drivers/usb/gadget/composite.c usb_ext_prop_put_size(buf, n); n 1568 drivers/usb/gadget/composite.c buf += n; n 1569 drivers/usb/gadget/composite.c count += n; n 468 drivers/usb/gadget/function/f_fs.c size_t n) n 477 drivers/usb/gadget/function/f_fs.c const size_t size = n * sizeof *events; n 488 drivers/usb/gadget/function/f_fs.c } while (++i < n); n 490 drivers/usb/gadget/function/f_fs.c ffs->ev.count -= n; n 492 drivers/usb/gadget/function/f_fs.c memmove(ffs->ev.types, ffs->ev.types + n, n 506 drivers/usb/gadget/function/f_fs.c size_t n; n 538 drivers/usb/gadget/function/f_fs.c n = len / sizeof(struct usb_functionfs_event); n 539 drivers/usb/gadget/function/f_fs.c if (unlikely(!n)) { n 557 drivers/usb/gadget/function/f_fs.c min(n, (size_t)ffs->ev.count)); n 2772 drivers/usb/gadget/function/f_fs.c unsigned n = ffs->ev.count; n 2773 drivers/usb/gadget/function/f_fs.c for (; n; --n, ++ev) n 2594 drivers/usb/gadget/function/f_mass_storage.c static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n) n 2598 drivers/usb/gadget/function/f_mass_storage.c while (n--) { n 2606 drivers/usb/gadget/function/f_mass_storage.c int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n) n 2611 drivers/usb/gadget/function/f_mass_storage.c buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL); n 2617 drivers/usb/gadget/function/f_mass_storage.c i = n; n 2630 drivers/usb/gadget/function/f_mass_storage.c common->fsg_num_buffers = n; n 2640 drivers/usb/gadget/function/f_mass_storage.c _fsg_common_free_buffers(buffhds, n); n 2655 drivers/usb/gadget/function/f_mass_storage.c static void _fsg_common_remove_luns(struct fsg_common *common, int n) n 2659 drivers/usb/gadget/function/f_mass_storage.c for (i = 0; i < n; ++i) n 120 drivers/usb/gadget/function/f_mass_storage.h int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n); n 875 drivers/usb/gadget/function/f_midi.c int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0; n 936 drivers/usb/gadget/function/f_midi.c n = USB_DT_MS_HEADER_SIZE n 939 drivers/usb/gadget/function/f_midi.c ms_header_desc.wTotalLength = cpu_to_le16(n); n 944 drivers/usb/gadget/function/f_midi.c for (n = 0; n < midi->in_ports; n++) { n 945 drivers/usb/gadget/function/f_midi.c struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n]; n 946 drivers/usb/gadget/function/f_midi.c struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n]; n 968 drivers/usb/gadget/function/f_midi.c ms_in_desc.baAssocJackID[n] = out_emb->bJackID; n 972 drivers/usb/gadget/function/f_midi.c for (n = 0; n < midi->out_ports; n++) { n 973 drivers/usb/gadget/function/f_midi.c struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n]; n 974 drivers/usb/gadget/function/f_midi.c struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n]; n 996 drivers/usb/gadget/function/f_midi.c ms_out_desc.baAssocJackID[n] = in_emb->bJackID; n 498 drivers/usb/gadget/function/f_rndis.c u32 n; n 501 drivers/usb/gadget/function/f_rndis.c buf = rndis_get_next_response(rndis->params, &n); n 503 drivers/usb/gadget/function/f_rndis.c memcpy(req->buf, buf, n); n 507 drivers/usb/gadget/function/f_rndis.c value = n; n 1314 drivers/usb/gadget/function/f_tcm.c const char *n; n 1317 drivers/usb/gadget/function/f_tcm.c n = strstr(name, "naa."); n 1318 drivers/usb/gadget/function/f_tcm.c if (!n) n 1320 drivers/usb/gadget/function/f_tcm.c n += 4; n 1321 drivers/usb/gadget/function/f_tcm.c len = strlen(n); n 1324 drivers/usb/gadget/function/f_tcm.c return n; n 1013 drivers/usb/gadget/function/rndis.c rndis_resp_t *r, *n; n 1015 drivers/usb/gadget/function/rndis.c list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { n 1026 drivers/usb/gadget/function/rndis.c rndis_resp_t *r, *n; n 1030 drivers/usb/gadget/function/rndis.c list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { n 345 drivers/usb/gadget/function/u_ether.c static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) n 350 drivers/usb/gadget/function/u_ether.c if (!n) n 354 drivers/usb/gadget/function/u_ether.c i = n; n 384 drivers/usb/gadget/function/u_ether.c static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) n 389 drivers/usb/gadget/function/u_ether.c status = prealloc(&dev->tx_reqs, link->in_ep, n); n 392 drivers/usb/gadget/function/u_ether.c status = prealloc(&dev->rx_reqs, link->out_ep, n); n 396 drivers/usb/gadget/function/u_serial.c unsigned n; n 400 drivers/usb/gadget/function/u_serial.c n = port->n_read; n 401 drivers/usb/gadget/function/u_serial.c if (n) { n 402 drivers/usb/gadget/function/u_serial.c packet += n; n 403 drivers/usb/gadget/function/u_serial.c size -= n; n 507 drivers/usb/gadget/function/u_serial.c int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE; n 513 drivers/usb/gadget/function/u_serial.c for (i = 0; i < n; i++) { n 114 drivers/usb/gadget/function/uvc_configfs.c struct config_group *child, *n; n 116 drivers/usb/gadget/function/uvc_configfs.c list_for_each_entry_safe(child, n, &group->default_groups, group_entry) { n 1281 drivers/usb/gadget/function/uvc_configfs.c int ret = 0, n = 0; n 1296 drivers/usb/gadget/function/uvc_configfs.c ret = __uvcg_iter_frm_intrv(page, len, __uvcg_count_frm_intrv, &n); n 1300 drivers/usb/gadget/function/uvc_configfs.c tmp = frm_intrv = kcalloc(n, sizeof(u32), GFP_KERNEL); n 1314 drivers/usb/gadget/function/uvc_configfs.c ch->frame.b_frame_interval_type = n; n 1315 drivers/usb/gadget/function/uvc_configfs.c sort(ch->dw_frame_interval, n, sizeof(*ch->dw_frame_interval), n 2023 drivers/usb/gadget/function/uvc_configfs.c static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n, n 2079 drivers/usb/gadget/function/uvc_configfs.c static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n, n 2114 drivers/usb/gadget/function/uvc_configfs.c u->desc.bFormatIndex = n + 1; n 2122 drivers/usb/gadget/function/uvc_configfs.c m->desc.bFormatIndex = n + 1; n 92 drivers/usb/gadget/legacy/hid.c struct hidg_func_node *e, *n; n 113 drivers/usb/gadget/legacy/hid.c list_for_each_entry(n, &hidg_func_list, node) { n 114 drivers/usb/gadget/legacy/hid.c if (n == e) n 116 drivers/usb/gadget/legacy/hid.c usb_remove_function(c, n->f); n 117 drivers/usb/gadget/legacy/hid.c usb_put_function(n->f); n 135 drivers/usb/gadget/legacy/hid.c struct hidg_func_node *n, *m; n 145 drivers/usb/gadget/legacy/hid.c list_for_each_entry(n, &hidg_func_list, node) { n 146 drivers/usb/gadget/legacy/hid.c n->fi = usb_get_function_instance("hid"); n 147 drivers/usb/gadget/legacy/hid.c if (IS_ERR(n->fi)) { n 148 drivers/usb/gadget/legacy/hid.c status = PTR_ERR(n->fi); n 151 drivers/usb/gadget/legacy/hid.c hid_opts = container_of(n->fi, struct f_hid_opts, func_inst); n 152 drivers/usb/gadget/legacy/hid.c hid_opts->subclass = n->func->subclass; n 153 drivers/usb/gadget/legacy/hid.c hid_opts->protocol = n->func->protocol; n 154 drivers/usb/gadget/legacy/hid.c hid_opts->report_length = n->func->report_length; n 155 drivers/usb/gadget/legacy/hid.c hid_opts->report_desc_length = n->func->report_desc_length; n 156 drivers/usb/gadget/legacy/hid.c hid_opts->report_desc = n->func->report_desc; n 196 drivers/usb/gadget/legacy/hid.c if (m == n) n 205 drivers/usb/gadget/legacy/hid.c struct hidg_func_node *n; n 207 drivers/usb/gadget/legacy/hid.c list_for_each_entry(n, &hidg_func_list, node) { n 208 drivers/usb/gadget/legacy/hid.c usb_put_function(n->f); n 209 drivers/usb/gadget/legacy/hid.c usb_put_function_instance(n->fi); n 240 drivers/usb/gadget/legacy/hid.c struct hidg_func_node *e, *n; n 242 drivers/usb/gadget/legacy/hid.c list_for_each_entry_safe(e, n, &hidg_func_list, node) { n 1012 drivers/usb/gadget/legacy/inode.c unsigned i, n; n 1014 drivers/usb/gadget/legacy/inode.c n = len / sizeof (struct usb_gadgetfs_event); n 1015 drivers/usb/gadget/legacy/inode.c if (dev->ev_next < n) n 1016 drivers/usb/gadget/legacy/inode.c n = dev->ev_next; n 1019 drivers/usb/gadget/legacy/inode.c for (i = 0; i < n; i++) { n 1022 drivers/usb/gadget/legacy/inode.c n = i + 1; n 1027 drivers/usb/gadget/legacy/inode.c len = n * sizeof (struct usb_gadgetfs_event); n 1037 drivers/usb/gadget/legacy/inode.c if (dev->ev_next > n) { n 1038 drivers/usb/gadget/legacy/inode.c memmove(&dev->event[0], &dev->event[n], n 1040 drivers/usb/gadget/legacy/inode.c * (dev->ev_next - n)); n 1042 drivers/usb/gadget/legacy/inode.c dev->ev_next -= n; n 22 drivers/usb/gadget/u_f.h #define vla_item(groupname, type, name, n) \ n 26 drivers/usb/gadget/u_f.h size_t size = (n) * sizeof(type); \ n 31 drivers/usb/gadget/u_f.h #define vla_item_with_sz(groupname, type, name, n) \ n 32 drivers/usb/gadget/u_f.h size_t groupname##_##name##__sz = (n) * sizeof(type); \ n 88 drivers/usb/gadget/udc/aspeed-vhub/vhub.h #define VHUB_EP_IRQ(n) (1 << (n)) n 37 drivers/usb/gadget/udc/at91_udc.h #define AT91_UDP_EP(n) (1 << (n)) /* Endpoint Interrupt Status */ n 48 drivers/usb/gadget/udc/at91_udc.h #define AT91_UDP_CSR(n) (0x30+((n)*4)) /* Endpoint Control/Status Registers 0-7 */ n 69 drivers/usb/gadget/udc/at91_udc.h #define AT91_UDP_FDR(n) (0x50+((n)*4)) /* Endpoint FIFO Data Registers 0-7 */ n 325 drivers/usb/gadget/udc/atmel_usba_udc.c int n; n 333 drivers/usb/gadget/udc/atmel_usba_udc.c n = 0; n 337 drivers/usb/gadget/udc/atmel_usba_udc.c n = ARRAY_SIZE(mode_1_cfg); n 341 drivers/usb/gadget/udc/atmel_usba_udc.c n = ARRAY_SIZE(mode_2_cfg); n 345 drivers/usb/gadget/udc/atmel_usba_udc.c n = ARRAY_SIZE(mode_3_cfg); n 349 drivers/usb/gadget/udc/atmel_usba_udc.c n = ARRAY_SIZE(mode_4_cfg); n 354 drivers/usb/gadget/udc/atmel_usba_udc.c return n; n 1792 drivers/usb/gadget/udc/atmel_usba_udc.c int i, n; n 1840 drivers/usb/gadget/udc/atmel_usba_udc.c n = fifo_mode ? udc->num_ep : udc->configured_ep; n 1841 drivers/usb/gadget/udc/atmel_usba_udc.c for (i = 1; i < n; i++) { n 1077 drivers/usb/gadget/udc/bcm63xx_udc.c struct bcm63xx_req *breq, *n; n 1093 drivers/usb/gadget/udc/bcm63xx_udc.c list_for_each_entry_safe(breq, n, &bep->queue, queue) { n 89 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_SRRBAL(n) (0x200 + (n * 0x10)) n 90 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_SRRBAH(n) (0x204 + (n * 0x10)) n 91 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_SRRINT(n) (0x208 + (n * 0x10)) n 92 drivers/usb/gadget/udc/bdc/bdc.h #define BDC_INTCTLS(n) (0x20c + (n * 0x10)) n 1463 drivers/usb/gadget/udc/core.c struct device_attribute *attr, const char *buf, size_t n) n 1470 drivers/usb/gadget/udc/core.c return n; n 1475 drivers/usb/gadget/udc/core.c struct device_attribute *attr, const char *buf, size_t n) n 1495 drivers/usb/gadget/udc/core.c return n; n 21 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPSET0(n) (0x20 + (n - 1) * 0x30) n 22 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPSET1(n) (0x24 + (n - 1) * 0x30) n 23 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPSET2(n) (0x28 + (n - 1) * 0x30) n 24 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPFFR(n) (0x2c + (n - 1) * 0x30) n 25 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPSTRID(n) (0x40 + (n - 1) * 0x30) n 54 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPPRD_W0(n) (0x520 + (n - 1) * 0x10) n 55 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPPRD_W1(n) (0x524 + (n - 1) * 0x10) n 56 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPPRD_W2(n) (0x528 + (n - 1) * 0x10) n 57 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPRD_PTR(n) (0x52C + (n - 1) * 0x10) n 60 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_OFFSET_EPPORT(n) (0x1010 + (n - 1) * 0x10) n 83 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_GTM_TST_CUR_EP_ENTRY(n) ((n & 0xF) << 12) n 84 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_GTM_TST_EP_ENTRY(n) ((n & 0xF) << 8) n 85 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_GTM_TST_EP_NUM(n) ((n & 0xF) << 4) n 270 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR0_EPn_PRD_INT(n) (1 << (n + 16)) n 287 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR0_EPn_FIFO_INT(n) (1 << n) n 358 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR2_EP_STR_ACCEPT_INT(n) (1 << (5 * n - 1)) n 359 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR2_EP_STR_RESUME_INT(n) (1 << (5 * n - 2)) n 360 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR2_EP_STR_REQ_INT(n) (1 << (5 * n - 3)) n 361 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR2_EP_STR_NOTRDY_INT(n) (1 << (5 * n - 4)) n 362 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR2_EP_STR_PRIME_INT(n) (1 << (5 * n - 5)) n 398 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR3_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1)) n 399 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR3_EP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2)) n 400 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR3_EP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3)) n 401 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR3_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4)) n 402 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR3_EP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5)) n 439 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR4_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 12) - 1)) n 440 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR4_EP_STR_RESUME_INT(n) (1 << (5 * (n - 12) - 2)) n 441 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR4_EP_STR_REQ_INT(n) (1 << (5 * (n - 12) - 3)) n 442 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR4_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 12) - 4)) n 443 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR4_EP_STR_PRIME_INT(n) (1 << (5 * (n - 12) - 5)) n 448 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGR5_EP_STL_INT(n) (1 << n) n 468 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER0_EEPn_PRD_INT(n) (1 << (n + 16)) n 485 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER0_EEPn_FIFO_INT(n) (1 << n) n 525 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER2_EEP_STR_ACCEPT_INT(n) (1 << (5 * n - 1)) n 526 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER2_EEP_STR_RESUME_INT(n) (1 << (5 * n - 2)) n 527 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER2_EEP_STR_REQ_INT(n) (1 << (5 * n - 3)) n 528 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER2_EEP_STR_NOTRDY_INT(n) (1 << (5 * n - 4)) n 529 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER2_EEP_STR_PRIME_INT(n) (1 << (5 * n - 5)) n 535 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER3_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1)) n 536 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER3_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2)) n 537 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER3_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3)) n 538 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER3_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4)) n 539 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER3_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5)) n 545 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER4_EEP_RX0_INT(n) (1 << (n + 16)) n 546 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER4_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1)) n 547 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER4_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2)) n 548 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER4_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3)) n 549 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER4_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4)) n 550 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER4_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5)) n 569 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_EPPRDR_EP_PRD_RDY(n) (1 << n) n 592 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_IGER5_EEP_STL_INT(n) (1 << n) n 604 drivers/usb/gadget/udc/fusb300_udc.h #define FUSB300_EPPRD0_BTC(n) (n & 0xFFFFFF) n 24 drivers/usb/gadget/udc/goku_udc.h #define INT_EPxDATASET(n) (0x00020 << (n)) /* 0 < n < 4 */ n 28 drivers/usb/gadget/udc/goku_udc.h #define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */ n 157 drivers/usb/gadget/udc/goku_udc.h #define COMMAND_EP(n) ((n) << 4) n 333 drivers/usb/gadget/udc/lpc32xx_udc.c #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5) n 334 drivers/usb/gadget/udc/lpc32xx_udc.c #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16) n 348 drivers/usb/gadget/udc/lpc32xx_udc.c #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F) n 349 drivers/usb/gadget/udc/lpc32xx_udc.c #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF) n 1149 drivers/usb/gadget/udc/lpc32xx_udc.c int n, i, bl; n 1160 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < cbytes; n += 4) n 1167 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < bl; n++) n 1168 drivers/usb/gadget/udc/lpc32xx_udc.c data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF); n 1176 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < bytes; n += 4) { n 1179 drivers/usb/gadget/udc/lpc32xx_udc.c bl = bytes - n; n 1184 drivers/usb/gadget/udc/lpc32xx_udc.c data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF); n 1193 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < cbytes; n += 4) { n 1203 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < bl; n++) n 1204 drivers/usb/gadget/udc/lpc32xx_udc.c data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF); n 1250 drivers/usb/gadget/udc/lpc32xx_udc.c int n, i, bl; n 1261 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < cbytes; n += 4) n 1268 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < bl; n++) n 1269 drivers/usb/gadget/udc/lpc32xx_udc.c tmp |= data[cbytes + n] << (n * 8); n 1278 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < bytes; n += 4) { n 1279 drivers/usb/gadget/udc/lpc32xx_udc.c bl = bytes - n; n 1285 drivers/usb/gadget/udc/lpc32xx_udc.c tmp |= data[n + i] << (i * 8); n 1296 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < cbytes; n += 4) { n 1306 drivers/usb/gadget/udc/lpc32xx_udc.c for (n = 0; n < bl; n++) n 1307 drivers/usb/gadget/udc/lpc32xx_udc.c tmp |= data[cbytes + n] << (n * 8); n 79 drivers/usb/gadget/udc/omap_udc.h # define UDC_TX_DONE_IE(n) (1 << (4 * (n) - 2)) n 80 drivers/usb/gadget/udc/omap_udc.h # define UDC_RX_CNT_IE(n) (1 << (4 * (n) - 3)) n 81 drivers/usb/gadget/udc/omap_udc.h # define UDC_RX_EOT_IE(n) (1 << (4 * (n) - 4)) n 59 drivers/usb/gadget/udc/pxa27x_udc.h #define UDCICR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2)) n 69 drivers/usb/gadget/udc/pxa27x_udc.h #define UDCISR_INT(n, intr) (((intr) & 0x03) << (((n) & 0x0F) * 2)) n 250 drivers/usb/gadget/udc/pxa27x_udc.h #define USB_EP_IN_BULK(n) USB_EP_BULK(n, "ep" #n "in-bulk", 1, \ n 252 drivers/usb/gadget/udc/pxa27x_udc.h #define USB_EP_OUT_BULK(n) USB_EP_BULK(n, "ep" #n "out-bulk", 0, \ n 254 drivers/usb/gadget/udc/pxa27x_udc.h #define USB_EP_IN_ISO(n) USB_EP_ISO(n, "ep" #n "in-iso", 1, \ n 256 drivers/usb/gadget/udc/pxa27x_udc.h #define USB_EP_OUT_ISO(n) USB_EP_ISO(n, "ep" #n "out-iso", 0, \ n 258 drivers/usb/gadget/udc/pxa27x_udc.h #define USB_EP_IN_INT(n) USB_EP_INT(n, "ep" #n "in-int", 1, \ n 35 drivers/usb/gadget/udc/renesas_usb3.c #define USB3_DMA_CH0_CON(n) (0x030 + ((n) - 1) * 0x10) /* n = 1 to 4 */ n 36 drivers/usb/gadget/udc/renesas_usb3.c #define USB3_DMA_CH0_PRD_ADR(n) (0x034 + ((n) - 1) * 0x10) /* n = 1 to 4 */ n 75 drivers/usb/gadget/udc/renesas_usb3.c #define AXI_INT_PRDEN_CLR_STA_SHIFT(n) (16 + (n) - 1) n 76 drivers/usb/gadget/udc/renesas_usb3.c #define AXI_INT_PRDERR_STA_SHIFT(n) (0 + (n) - 1) n 77 drivers/usb/gadget/udc/renesas_usb3.c #define AXI_INT_PRDEN_CLR_STA(n) (1 << AXI_INT_PRDEN_CLR_STA_SHIFT(n)) n 78 drivers/usb/gadget/udc/renesas_usb3.c #define AXI_INT_PRDERR_STA(n) (1 << AXI_INT_PRDERR_STA_SHIFT(n)) n 81 drivers/usb/gadget/udc/renesas_usb3.c #define DMA_INT(n) BIT(n) n 87 drivers/usb/gadget/udc/renesas_usb3.c #define DMA_COM_PIPE_NO(n) (((n) << DMA_CON_PIPE_NO_SHIFT) & \ n 103 drivers/usb/gadget/udc/renesas_usb3.c #define USB_COM_CON_DEV_ADDR(n) (((n) << USB_COM_CON_DEV_ADDR_SHIFT) & \ n 114 drivers/usb/gadget/udc/renesas_usb3.c #define USB20_CON_B2_TSTMOD(n) (((n) << USB20_CON_B2_TSTMOD_SHIFT) & \ n 155 drivers/usb/gadget/udc/renesas_usb3.c #define USB_INT_2_PIPE(n) BIT(n) n 166 drivers/usb/gadget/udc/renesas_usb3.c #define PX_CON_BYTE_EN_BYTES(n) (((n) << PX_CON_BYTE_EN_SHIFT) & \ n 205 drivers/usb/gadget/udc/renesas_usb3.c #define PN_MOD_TYPE(n) (((n) << PN_MOD_TYPE_SHIFT) & \ n 208 drivers/usb/gadget/udc/renesas_usb3.c #define PN_MOD_EPNUM(n) ((n) & PN_MOD_EPNUM_MASK) n 220 drivers/usb/gadget/udc/renesas_usb3.c #define PN_RAMMAP_MPKT(n) (((n) << PN_RAMMAP_MPKT_SHIFT) & \ n 224 drivers/usb/gadget/udc/renesas_usb3.c #define PN_RAMMAP_RAMIF(n) (((n) << PN_RAMMAP_RAMIF_SHIFT) & \ n 378 drivers/usb/gadget/udc/renesas_usb3.c #define usb3_get_ep(usb3, n) ((usb3)->usb3_ep + (n)) n 15 drivers/usb/host/ehci-sysfs.c int nports, index, n; n 24 drivers/usb/host/ehci-sysfs.c n = scnprintf(ptr, count, "%d\n", index + 1); n 25 drivers/usb/host/ehci-sysfs.c ptr += n; n 26 drivers/usb/host/ehci-sysfs.c count -= n; n 73 drivers/usb/host/ehci-sysfs.c int n; n 76 drivers/usb/host/ehci-sysfs.c n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max); n 77 drivers/usb/host/ehci-sysfs.c return n; n 292 drivers/usb/host/ehci-timer.c struct ehci_itd *itd, *n; n 300 drivers/usb/host/ehci-timer.c list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { n 1241 drivers/usb/host/fotg210-hcd.c struct fotg210_itd *itd, *n; n 1246 drivers/usb/host/fotg210-hcd.c list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) { n 4694 drivers/usb/host/fotg210-hcd.c int n; n 4697 drivers/usb/host/fotg210-hcd.c n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max); n 4698 drivers/usb/host/fotg210-hcd.c return n; n 117 drivers/usb/host/isp116x.h #define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK) n 123 drivers/usb/host/isp116x.h #define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK) n 102 drivers/usb/host/isp1362.h #define HCHWCFG_DBWIDTH(n) (((n) << 3) & HCHWCFG_DBWIDTH_MASK) n 110 drivers/usb/host/isp1362.h #define HCDMACFG_BURST_LEN(n) (((n) << 5) & HCDMACFG_BURST_LEN_MASK) n 116 drivers/usb/host/isp1362.h #define HCDMACFG_BUF_TYPE(n) (((n) << 1) & HCDMACFG_BUF_TYPE_MASK) n 164 drivers/usb/host/isp1362.h #define HCDIRADDR_ADDR(n) (((n) << 0) & HCDIRADDR_ADDR_MASK) n 166 drivers/usb/host/isp1362.h #define HCDIRADDR_COUNT(n) (((n) << 16) & HCDIRADDR_COUNT_MASK) n 492 drivers/usb/host/isp1362.h static inline const char *ISP1362_INT_NAME(int n) n 494 drivers/usb/host/isp1362.h switch (n) { n 603 drivers/usb/host/ohci-q.c int i, this_sg_len, n; n 657 drivers/usb/host/ohci-q.c n = min(this_sg_len, 4096); n 660 drivers/usb/host/ohci-q.c if (n >= data_len || (i == 1 && n >= this_sg_len)) { n 664 drivers/usb/host/ohci-q.c td_fill(ohci, info, data, n, urb, cnt); n 665 drivers/usb/host/ohci-q.c this_sg_len -= n; n 666 drivers/usb/host/ohci-q.c data_len -= n; n 667 drivers/usb/host/ohci-q.c data += n; n 69 drivers/usb/host/u132-hcd.c #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444) n 363 drivers/usb/host/xhci-dbgtty.c unsigned int n, size = req->actual; n 366 drivers/usb/host/xhci-dbgtty.c n = port->n_read; n 367 drivers/usb/host/xhci-dbgtty.c if (n) { n 368 drivers/usb/host/xhci-dbgtty.c packet += n; n 369 drivers/usb/host/xhci-dbgtty.c size -= n; n 141 drivers/usb/host/xhci-debugfs.c size_t n, const char *cap_name) n 145 drivers/usb/host/xhci-debugfs.c size_t psic, nregs = n; n 152 drivers/usb/host/xhci-debugfs.c nregs = min(4 + psic, n); n 1905 drivers/usb/host/xhci-mem.c struct xhci_tt_bw_info *tt, *n; n 1906 drivers/usb/host/xhci-mem.c list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { n 630 drivers/usb/host/xhci.h #define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20) n 837 drivers/usb/isp1760/isp1760-hcd.c int n; n 867 drivers/usb/isp1760/isp1760-hcd.c n = 0; n 903 drivers/usb/isp1760/isp1760-hcd.c n++; n 904 drivers/usb/isp1760/isp1760-hcd.c if (n >= ENQUEUE_DEPTH) n 168 drivers/usb/isp1760/isp1760-regs.h #define DC_IEPTX(n) (1 << (11 + 2 * (n))) n 169 drivers/usb/isp1760/isp1760-regs.h #define DC_IEPRX(n) (1 << (10 + 2 * (n))) n 170 drivers/usb/isp1760/isp1760-regs.h #define DC_IEPRXTX(n) (3 << (10 + 2 * (n))) n 184 drivers/usb/isp1760/isp1760-regs.h #define DC_ENDPIDX(n) ((n) << 1) n 54 drivers/usb/misc/ftdi-elan.c #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444) n 766 drivers/usb/misc/sisusbvga/sisusb_init.c unsigned short data, data2, time, i, j, k, m, n, o; n 821 drivers/usb/misc/sisusbvga/sisusb_init.c for (n = 0; n < 3; n++) { n 823 drivers/usb/misc/sisusbvga/sisusb_init.c SiS_WriteDAC(SiS_Pr, DACData, sf, n, n 830 drivers/usb/misc/sisusbvga/sisusb_init.c SiS_WriteDAC(SiS_Pr, DACData, sf, n, n 1327 drivers/usb/mon/mon_bin.c int n; n 1330 drivers/usb/mon/mon_bin.c for (n = 0; n < npages; n++) { n 1333 drivers/usb/mon/mon_bin.c while (n-- != 0) n 1334 drivers/usb/mon/mon_bin.c free_page((unsigned long) map[n].ptr); n 1337 drivers/usb/mon/mon_bin.c map[n].ptr = (unsigned char *) vaddr; n 1338 drivers/usb/mon/mon_bin.c map[n].pg = virt_to_page((void *) vaddr); n 1345 drivers/usb/mon/mon_bin.c int n; n 1347 drivers/usb/mon/mon_bin.c for (n = 0; n < npages; n++) n 1348 drivers/usb/mon/mon_bin.c free_page((unsigned long) map[n].ptr); n 247 drivers/usb/mtu3/mtu3_hw_regs.h #define QMU_RX_ZLP_ERR(n) (BIT(16) << (n)) n 481 drivers/usb/musb/cppi_dma.c int n = 100; n 490 drivers/usb/musb/cppi_dma.c } while (n-- > 0); n 46 drivers/usb/musb/da8xx.c #define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2)) n 1322 drivers/usb/musb/musb_core.c unsigned i, n; n 1328 drivers/usb/musb/musb_core.c n = musb->config->fifo_cfg_size; n 1338 drivers/usb/musb/musb_core.c n = ARRAY_SIZE(mode_0_cfg); n 1342 drivers/usb/musb/musb_core.c n = ARRAY_SIZE(mode_1_cfg); n 1346 drivers/usb/musb/musb_core.c n = ARRAY_SIZE(mode_2_cfg); n 1350 drivers/usb/musb/musb_core.c n = ARRAY_SIZE(mode_3_cfg); n 1354 drivers/usb/musb/musb_core.c n = ARRAY_SIZE(mode_4_cfg); n 1358 drivers/usb/musb/musb_core.c n = ARRAY_SIZE(mode_5_cfg); n 1373 drivers/usb/musb/musb_core.c for (i = 0; i < n; i++) { n 1393 drivers/usb/musb/musb_core.c n + 1, musb->config->num_eps * 2 - 1, n 1735 drivers/usb/musb/musb_core.c const char *buf, size_t n) n 1752 drivers/usb/musb/musb_core.c return (status == 0) ? n : status; n 1758 drivers/usb/musb/musb_core.c const char *buf, size_t n) n 1777 drivers/usb/musb/musb_core.c return n; n 1814 drivers/usb/musb/musb_core.c const char *buf, size_t n) n 1828 drivers/usb/musb/musb_core.c return n; n 188 drivers/usb/musb/musb_cppi41.c struct cppi41_dma_channel *cppi41_channel, *n; n 198 drivers/usb/musb/musb_cppi41.c list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, n 37 drivers/usb/renesas_usbhs/fifo.h #define usbhsf_get_dnfifo(p, n) (&((p)->fifo_info.dfifo[n])) n 179 drivers/usb/roles/class.c usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n) n 154 drivers/usb/serial/bus.c struct usb_dynid *dynid, *n; n 157 drivers/usb/serial/bus.c list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { n 386 drivers/usb/serial/garmin_gps.c int n = 0; n 409 drivers/usb/serial/garmin_gps.c while (n < size) { n 411 drivers/usb/serial/garmin_gps.c n++; n 159 drivers/usb/serial/io_ionsp.h #define IOSP_WRITE_UART_REG(n) ((n) & 0x07) // UartReg[ n ] := P1 n 187 drivers/usb/serial/keyspan_usa28msg.h n, // typically a count/status byte n 270 drivers/usb/serial/keyspan_usa49msg.h n, // typically a count/status byte n 281 drivers/usb/storage/usb.c int n; n 283 drivers/usb/storage/usb.c n = strlen(us->unusual_dev->vendorName); n 284 drivers/usb/storage/usb.c memcpy(data+8, us->unusual_dev->vendorName, min(8, n)); n 285 drivers/usb/storage/usb.c n = strlen(us->unusual_dev->productName); n 286 drivers/usb/storage/usb.c memcpy(data+16, us->unusual_dev->productName, min(16, n)); n 271 drivers/usb/typec/bus.c size_t n, u16 svid, u8 mode) n 275 drivers/usb/typec/bus.c for (i = 0; i < n; i++) { n 1060 drivers/usb/typec/ucsi/ucsi_ccg.c const char *buf, size_t n) n 1069 drivers/usb/typec/ucsi/ucsi_ccg.c return n; n 1077 drivers/usb/typec/ucsi/ucsi_ccg.c return n; n 94 drivers/vfio/platform/vfio_platform_private.h extern void __vfio_platform_register_reset(struct vfio_platform_reset_node *n); n 1191 drivers/vfio/vfio_iommu_type1.c struct rb_node *n; n 1197 drivers/vfio/vfio_iommu_type1.c n = rb_first(&iommu->dma_list); n 1199 drivers/vfio/vfio_iommu_type1.c for (; n; n = rb_next(n)) { n 1203 drivers/vfio/vfio_iommu_type1.c dma = rb_entry(n, struct vfio_dma, node); n 1234 drivers/vfio/vfio_iommu_type1.c size_t n = dma->iova + dma->size - iova; n 1238 drivers/vfio/vfio_iommu_type1.c n >> PAGE_SHIFT, n 1559 drivers/vfio/vfio_iommu_type1.c struct vfio_iova *n, *next; n 1570 drivers/vfio/vfio_iommu_type1.c list_for_each_entry_safe(n, next, iova, list) { n 1574 drivers/vfio/vfio_iommu_type1.c if (start > n->end || end < n->start) n 1583 drivers/vfio/vfio_iommu_type1.c if (start > n->start) n 1584 drivers/vfio/vfio_iommu_type1.c ret = vfio_iommu_iova_insert(&n->list, n->start, n 1586 drivers/vfio/vfio_iommu_type1.c if (!ret && end < n->end) n 1587 drivers/vfio/vfio_iommu_type1.c ret = vfio_iommu_iova_insert(&n->list, end + 1, n 1588 drivers/vfio/vfio_iommu_type1.c n->end); n 1592 drivers/vfio/vfio_iommu_type1.c list_del(&n->list); n 1593 drivers/vfio/vfio_iommu_type1.c kfree(n); n 1605 drivers/vfio/vfio_iommu_type1.c struct iommu_resv_region *n, *next; n 1607 drivers/vfio/vfio_iommu_type1.c list_for_each_entry_safe(n, next, resv_regions, list) { n 1608 drivers/vfio/vfio_iommu_type1.c list_del(&n->list); n 1609 drivers/vfio/vfio_iommu_type1.c kfree(n); n 1615 drivers/vfio/vfio_iommu_type1.c struct vfio_iova *n, *next; n 1617 drivers/vfio/vfio_iommu_type1.c list_for_each_entry_safe(n, next, iova, list) { n 1618 drivers/vfio/vfio_iommu_type1.c list_del(&n->list); n 1619 drivers/vfio/vfio_iommu_type1.c kfree(n); n 1627 drivers/vfio/vfio_iommu_type1.c struct vfio_iova *n; n 1630 drivers/vfio/vfio_iommu_type1.c list_for_each_entry(n, iova, list) { n 1631 drivers/vfio/vfio_iommu_type1.c ret = vfio_iommu_iova_insert(iova_copy, n->start, n->end); n 1864 drivers/vfio/vfio_iommu_type1.c struct rb_node *n, *p; n 1866 drivers/vfio/vfio_iommu_type1.c n = rb_first(&iommu->dma_list); n 1867 drivers/vfio/vfio_iommu_type1.c for (; n; n = rb_next(n)) { n 1871 drivers/vfio/vfio_iommu_type1.c dma = rb_entry(n, struct vfio_dma, node); n 1887 drivers/vfio/vfio_iommu_type1.c struct rb_node *n; n 1889 drivers/vfio/vfio_iommu_type1.c n = rb_first(&iommu->dma_list); n 1890 drivers/vfio/vfio_iommu_type1.c for (; n; n = rb_next(n)) { n 1893 drivers/vfio/vfio_iommu_type1.c dma = rb_entry(n, struct vfio_dma, node); n 269 drivers/vhost/net.c static void vhost_net_clear_ubuf_info(struct vhost_net *n) n 274 drivers/vhost/net.c kfree(n->vqs[i].ubuf_info); n 275 drivers/vhost/net.c n->vqs[i].ubuf_info = NULL; n 279 drivers/vhost/net.c static int vhost_net_set_ubuf_info(struct vhost_net *n) n 288 drivers/vhost/net.c n->vqs[i].ubuf_info = n 290 drivers/vhost/net.c sizeof(*n->vqs[i].ubuf_info), n 292 drivers/vhost/net.c if (!n->vqs[i].ubuf_info) n 298 drivers/vhost/net.c vhost_net_clear_ubuf_info(n); n 302 drivers/vhost/net.c static void vhost_net_vq_reset(struct vhost_net *n) n 306 drivers/vhost/net.c vhost_net_clear_ubuf_info(n); n 309 drivers/vhost/net.c n->vqs[i].done_idx = 0; n 310 drivers/vhost/net.c n->vqs[i].upend_idx = 0; n 311 drivers/vhost/net.c n->vqs[i].ubufs = NULL; n 312 drivers/vhost/net.c n->vqs[i].vhost_hlen = 0; n 313 drivers/vhost/net.c n->vqs[i].sock_hlen = 0; n 314 drivers/vhost/net.c vhost_net_buf_init(&n->vqs[i].rxq); n 421 drivers/vhost/net.c static void vhost_net_disable_vq(struct vhost_net *n, n 426 drivers/vhost/net.c struct vhost_poll *poll = n->poll + (nvq - n->vqs); n 432 drivers/vhost/net.c static int vhost_net_enable_vq(struct vhost_net *n, n 437 drivers/vhost/net.c struct vhost_poll *poll = n->poll + (nvq - n->vqs); n 1275 drivers/vhost/net.c struct vhost_net *n; n 1282 drivers/vhost/net.c n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); n 1283 drivers/vhost/net.c if (!n) n 1287 drivers/vhost/net.c kvfree(n); n 1295 drivers/vhost/net.c kvfree(n); n 1298 drivers/vhost/net.c n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; n 1303 drivers/vhost/net.c kvfree(n); n 1307 drivers/vhost/net.c n->vqs[VHOST_NET_VQ_TX].xdp = xdp; n 1309 drivers/vhost/net.c dev = &n->dev; n 1310 drivers/vhost/net.c vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; n 1311 drivers/vhost/net.c vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; n 1312 drivers/vhost/net.c n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; n 1313 drivers/vhost/net.c n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; n 1315 drivers/vhost/net.c n->vqs[i].ubufs = NULL; n 1316 drivers/vhost/net.c n->vqs[i].ubuf_info = NULL; n 1317 drivers/vhost/net.c n->vqs[i].upend_idx = 0; n 1318 drivers/vhost/net.c n->vqs[i].done_idx = 0; n 1319 drivers/vhost/net.c n->vqs[i].batched_xdp = 0; n 1320 drivers/vhost/net.c n->vqs[i].vhost_hlen = 0; n 1321 drivers/vhost/net.c n->vqs[i].sock_hlen = 0; n 1322 drivers/vhost/net.c n->vqs[i].rx_ring = NULL; n 1323 drivers/vhost/net.c vhost_net_buf_init(&n->vqs[i].rxq); n 1329 drivers/vhost/net.c vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); n 1330 drivers/vhost/net.c vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); n 1332 drivers/vhost/net.c f->private_data = n; n 1333 drivers/vhost/net.c n->page_frag.page = NULL; n 1334 drivers/vhost/net.c n->refcnt_bias = 0; n 1339 drivers/vhost/net.c static struct socket *vhost_net_stop_vq(struct vhost_net *n, n 1348 drivers/vhost/net.c vhost_net_disable_vq(n, vq); n 1356 drivers/vhost/net.c static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, n 1359 drivers/vhost/net.c *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); n 1360 drivers/vhost/net.c *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); n 1363 drivers/vhost/net.c static void vhost_net_flush_vq(struct vhost_net *n, int index) n 1365 drivers/vhost/net.c vhost_poll_flush(n->poll + index); n 1366 drivers/vhost/net.c vhost_poll_flush(&n->vqs[index].vq.poll); n 1369 drivers/vhost/net.c static void vhost_net_flush(struct vhost_net *n) n 1371 drivers/vhost/net.c vhost_net_flush_vq(n, VHOST_NET_VQ_TX); n 1372 drivers/vhost/net.c vhost_net_flush_vq(n, VHOST_NET_VQ_RX); n 1373 drivers/vhost/net.c if (n->vqs[VHOST_NET_VQ_TX].ubufs) { n 1374 drivers/vhost/net.c mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n 1375 drivers/vhost/net.c n->tx_flush = true; n 1376 drivers/vhost/net.c mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n 1378 drivers/vhost/net.c vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); n 1379 drivers/vhost/net.c mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n 1380 drivers/vhost/net.c n->tx_flush = false; n 1381 drivers/vhost/net.c atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); n 1382 drivers/vhost/net.c mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n 1388 drivers/vhost/net.c struct vhost_net *n = f->private_data; n 1392 drivers/vhost/net.c vhost_net_stop(n, &tx_sock, &rx_sock); n 1393 drivers/vhost/net.c vhost_net_flush(n); n 1394 drivers/vhost/net.c vhost_dev_stop(&n->dev); n 1395 drivers/vhost/net.c vhost_dev_cleanup(&n->dev); n 1396 drivers/vhost/net.c vhost_net_vq_reset(n); n 1405 drivers/vhost/net.c vhost_net_flush(n); n 1406 drivers/vhost/net.c kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); n 1407 drivers/vhost/net.c kfree(n->vqs[VHOST_NET_VQ_TX].xdp); n 1408 drivers/vhost/net.c kfree(n->dev.vqs); n 1409 drivers/vhost/net.c if (n->page_frag.page) n 1410 drivers/vhost/net.c __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias); n 1411 drivers/vhost/net.c kvfree(n); n 1490 drivers/vhost/net.c static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) n 1498 drivers/vhost/net.c mutex_lock(&n->dev.mutex); n 1499 drivers/vhost/net.c r = vhost_dev_check_owner(&n->dev); n 1507 drivers/vhost/net.c vq = &n->vqs[index].vq; n 1508 drivers/vhost/net.c nvq = &n->vqs[index]; n 1532 drivers/vhost/net.c vhost_net_disable_vq(n, vq); n 1538 drivers/vhost/net.c r = vhost_net_enable_vq(n, vq); n 1547 drivers/vhost/net.c n->tx_packets = 0; n 1548 drivers/vhost/net.c n->tx_zcopy_err = 0; n 1549 drivers/vhost/net.c n->tx_flush = false; n 1557 drivers/vhost/net.c vhost_zerocopy_signal_used(n, vq); n 1562 drivers/vhost/net.c vhost_net_flush_vq(n, index); n 1566 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1571 drivers/vhost/net.c vhost_net_enable_vq(n, vq); n 1580 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1584 drivers/vhost/net.c static long vhost_net_reset_owner(struct vhost_net *n) n 1591 drivers/vhost/net.c mutex_lock(&n->dev.mutex); n 1592 drivers/vhost/net.c err = vhost_dev_check_owner(&n->dev); n 1600 drivers/vhost/net.c vhost_net_stop(n, &tx_sock, &rx_sock); n 1601 drivers/vhost/net.c vhost_net_flush(n); n 1602 drivers/vhost/net.c vhost_dev_stop(&n->dev); n 1603 drivers/vhost/net.c vhost_dev_reset_owner(&n->dev, umem); n 1604 drivers/vhost/net.c vhost_net_vq_reset(n); n 1606 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1614 drivers/vhost/net.c static int vhost_net_set_backend_features(struct vhost_net *n, u64 features) n 1618 drivers/vhost/net.c mutex_lock(&n->dev.mutex); n 1620 drivers/vhost/net.c mutex_lock(&n->vqs[i].vq.mutex); n 1621 drivers/vhost/net.c n->vqs[i].vq.acked_backend_features = features; n 1622 drivers/vhost/net.c mutex_unlock(&n->vqs[i].vq.mutex); n 1624 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1629 drivers/vhost/net.c static int vhost_net_set_features(struct vhost_net *n, u64 features) n 1647 drivers/vhost/net.c mutex_lock(&n->dev.mutex); n 1649 drivers/vhost/net.c !vhost_log_access_ok(&n->dev)) n 1653 drivers/vhost/net.c if (vhost_init_device_iotlb(&n->dev, true)) n 1658 drivers/vhost/net.c mutex_lock(&n->vqs[i].vq.mutex); n 1659 drivers/vhost/net.c n->vqs[i].vq.acked_features = features; n 1660 drivers/vhost/net.c n->vqs[i].vhost_hlen = vhost_hlen; n 1661 drivers/vhost/net.c n->vqs[i].sock_hlen = sock_hlen; n 1662 drivers/vhost/net.c mutex_unlock(&n->vqs[i].vq.mutex); n 1664 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1668 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1672 drivers/vhost/net.c static long vhost_net_set_owner(struct vhost_net *n) n 1676 drivers/vhost/net.c mutex_lock(&n->dev.mutex); n 1677 drivers/vhost/net.c if (vhost_dev_has_owner(&n->dev)) { n 1681 drivers/vhost/net.c r = vhost_net_set_ubuf_info(n); n 1684 drivers/vhost/net.c r = vhost_dev_set_owner(&n->dev); n 1686 drivers/vhost/net.c vhost_net_clear_ubuf_info(n); n 1687 drivers/vhost/net.c vhost_net_flush(n); n 1689 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1696 drivers/vhost/net.c struct vhost_net *n = f->private_data; n 1707 drivers/vhost/net.c return vhost_net_set_backend(n, backend.index, backend.fd); n 1718 drivers/vhost/net.c return vhost_net_set_features(n, features); n 1729 drivers/vhost/net.c return vhost_net_set_backend_features(n, features); n 1731 drivers/vhost/net.c return vhost_net_reset_owner(n); n 1733 drivers/vhost/net.c return vhost_net_set_owner(n); n 1735 drivers/vhost/net.c mutex_lock(&n->dev.mutex); n 1736 drivers/vhost/net.c r = vhost_dev_ioctl(&n->dev, ioctl, argp); n 1738 drivers/vhost/net.c r = vhost_vring_ioctl(&n->dev, ioctl, argp); n 1740 drivers/vhost/net.c vhost_net_flush(n); n 1741 drivers/vhost/net.c mutex_unlock(&n->dev.mutex); n 1757 drivers/vhost/net.c struct vhost_net *n = file->private_data; n 1758 drivers/vhost/net.c struct vhost_dev *dev = &n->dev; n 1768 drivers/vhost/net.c struct vhost_net *n = file->private_data; n 1769 drivers/vhost/net.c struct vhost_dev *dev = &n->dev; n 1776 drivers/vhost/net.c struct vhost_net *n = file->private_data; n 1777 drivers/vhost/net.c struct vhost_dev *dev = &n->dev; n 642 drivers/vhost/scsi.c unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes); n 643 drivers/vhost/scsi.c sg_set_page(sg++, pages[npages++], n, offset); n 644 drivers/vhost/scsi.c bytes -= n; n 43 drivers/vhost/test.c static void handle_vq(struct vhost_test *n) n 45 drivers/vhost/test.c struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; n 58 drivers/vhost/test.c vhost_disable_notify(&n->dev, vq); n 70 drivers/vhost/test.c if (unlikely(vhost_enable_notify(&n->dev, vq))) { n 71 drivers/vhost/test.c vhost_disable_notify(&n->dev, vq); n 87 drivers/vhost/test.c vhost_add_used_and_signal(&n->dev, vq, head, 0); n 100 drivers/vhost/test.c struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev); n 102 drivers/vhost/test.c handle_vq(n); n 107 drivers/vhost/test.c struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL); n 111 drivers/vhost/test.c if (!n) n 115 drivers/vhost/test.c kfree(n); n 119 drivers/vhost/test.c dev = &n->dev; n 120 drivers/vhost/test.c vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n 121 drivers/vhost/test.c n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; n 125 drivers/vhost/test.c f->private_data = n; n 130 drivers/vhost/test.c static void *vhost_test_stop_vq(struct vhost_test *n, n 142 drivers/vhost/test.c static void vhost_test_stop(struct vhost_test *n, void **privatep) n 144 drivers/vhost/test.c *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); n 147 drivers/vhost/test.c static void vhost_test_flush_vq(struct vhost_test *n, int index) n 149 drivers/vhost/test.c vhost_poll_flush(&n->vqs[index].poll); n 152 drivers/vhost/test.c static void vhost_test_flush(struct vhost_test *n) n 154 drivers/vhost/test.c vhost_test_flush_vq(n, VHOST_TEST_VQ); n 159 drivers/vhost/test.c struct vhost_test *n = f->private_data; n 162 drivers/vhost/test.c vhost_test_stop(n, &private); n 163 drivers/vhost/test.c vhost_test_flush(n); n 164 drivers/vhost/test.c vhost_dev_stop(&n->dev); n 165 drivers/vhost/test.c vhost_dev_cleanup(&n->dev); n 168 drivers/vhost/test.c vhost_test_flush(n); n 169 drivers/vhost/test.c kfree(n); n 173 drivers/vhost/test.c static long vhost_test_run(struct vhost_test *n, int test) n 182 drivers/vhost/test.c mutex_lock(&n->dev.mutex); n 183 drivers/vhost/test.c r = vhost_dev_check_owner(&n->dev); n 187 drivers/vhost/test.c for (index = 0; index < n->dev.nvqs; ++index) { n 189 drivers/vhost/test.c if (!vhost_vq_access_ok(&n->vqs[index])) { n 195 drivers/vhost/test.c for (index = 0; index < n->dev.nvqs; ++index) { n 196 drivers/vhost/test.c vq = n->vqs + index; n 198 drivers/vhost/test.c priv = test ? n : NULL; n 204 drivers/vhost/test.c r = vhost_vq_init_access(&n->vqs[index]); n 212 drivers/vhost/test.c vhost_test_flush_vq(n, index); n 216 drivers/vhost/test.c mutex_unlock(&n->dev.mutex); n 220 drivers/vhost/test.c mutex_unlock(&n->dev.mutex); n 224 drivers/vhost/test.c static long vhost_test_reset_owner(struct vhost_test *n) n 230 drivers/vhost/test.c mutex_lock(&n->dev.mutex); n 231 drivers/vhost/test.c err = vhost_dev_check_owner(&n->dev); n 239 drivers/vhost/test.c vhost_test_stop(n, &priv); n 240 drivers/vhost/test.c vhost_test_flush(n); n 241 drivers/vhost/test.c vhost_dev_stop(&n->dev); n 242 drivers/vhost/test.c vhost_dev_reset_owner(&n->dev, umem); n 244 drivers/vhost/test.c mutex_unlock(&n->dev.mutex); n 248 drivers/vhost/test.c static int vhost_test_set_features(struct vhost_test *n, u64 features) n 252 drivers/vhost/test.c mutex_lock(&n->dev.mutex); n 254 drivers/vhost/test.c !vhost_log_access_ok(&n->dev)) { n 255 drivers/vhost/test.c mutex_unlock(&n->dev.mutex); n 258 drivers/vhost/test.c vq = &n->vqs[VHOST_TEST_VQ]; n 262 drivers/vhost/test.c mutex_unlock(&n->dev.mutex); n 269 drivers/vhost/test.c struct vhost_test *n = f->private_data; n 279 drivers/vhost/test.c return vhost_test_run(n, test); n 293 drivers/vhost/test.c return vhost_test_set_features(n, features); n 295 drivers/vhost/test.c return vhost_test_reset_owner(n); n 297 drivers/vhost/test.c mutex_lock(&n->dev.mutex); n 298 drivers/vhost/test.c r = vhost_dev_ioctl(&n->dev, ioctl, argp); n 300 drivers/vhost/test.c r = vhost_vring_ioctl(&n->dev, ioctl, argp); n 301 drivers/vhost/test.c vhost_test_flush(n); n 302 drivers/vhost/test.c mutex_unlock(&n->dev.mutex); n 640 drivers/vhost/vhost.c struct vhost_msg_node *node, *n; n 644 drivers/vhost/vhost.c list_for_each_entry_safe(node, n, &dev->read_list, node) { n 649 drivers/vhost/vhost.c list_for_each_entry_safe(node, n, &dev->pending_list, node) { n 1061 drivers/vhost/vhost.c struct vhost_msg_node *node, *n; n 1065 drivers/vhost/vhost.c list_for_each_entry_safe(node, n, &d->pending_list, node) { n 2350 drivers/vhost/vhost.c void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) n 2352 drivers/vhost/vhost.c vq->last_avail_idx -= n; n 2406 drivers/vhost/vhost.c int start, n, r; n 2409 drivers/vhost/vhost.c n = vq->num - start; n 2410 drivers/vhost/vhost.c if (n < count) { n 2411 drivers/vhost/vhost.c r = __vhost_add_used_n(vq, heads, n); n 2414 drivers/vhost/vhost.c heads += n; n 2415 drivers/vhost/vhost.c count -= n; n 197 drivers/vhost/vhost.h void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); n 272 drivers/vhost/vsock.c struct virtio_vsock_pkt *pkt, *n; n 285 drivers/vhost/vsock.c list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { n 292 drivers/vhost/vsock.c list_for_each_entry_safe(pkt, n, &freeme, list) { n 30 drivers/video/backlight/bd6107.c #define BD6107_PORTSEL_LEDM(n) (1 << (n)) n 236 drivers/video/backlight/lm3533_bl.c struct attribute *attr, int n) n 266 drivers/video/backlight/pwm_bl.c unsigned int i, j, n = 0; n 333 drivers/video/backlight/pwm_bl.c n = (data->levels[i + 1] - value) / num_steps; n 334 drivers/video/backlight/pwm_bl.c if (n > 0) { n 337 drivers/video/backlight/pwm_bl.c value += n; n 650 drivers/video/console/sticore.c unsigned char *n, *p, *q; n 653 drivers/video/console/sticore.c n = kcalloc(4, size, STI_LOWMEM); n 654 drivers/video/console/sticore.c if (!n) n 656 drivers/video/console/sticore.c p = n + 3; n 662 drivers/video/console/sticore.c return n + 3; n 2599 drivers/video/fbdev/amifb.c int src_idx, u32 n) n 2606 drivers/video/fbdev/amifb.c if (!n) n 2611 drivers/video/fbdev/amifb.c last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG)); n 2616 drivers/video/fbdev/amifb.c if (dst_idx + n <= BITS_PER_LONG) { n 2628 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 2632 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 2633 drivers/video/fbdev/amifb.c while (n >= 8) { n 2642 drivers/video/fbdev/amifb.c n -= 8; n 2644 drivers/video/fbdev/amifb.c while (n--) n 2657 drivers/video/fbdev/amifb.c if (dst_idx + n <= BITS_PER_LONG) { n 2664 drivers/video/fbdev/amifb.c } else if (src_idx + n <= BITS_PER_LONG) { n 2682 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 2690 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 2694 drivers/video/fbdev/amifb.c m = n % BITS_PER_LONG; n 2695 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 2696 drivers/video/fbdev/amifb.c while (n >= 4) { n 2709 drivers/video/fbdev/amifb.c n -= 4; n 2711 drivers/video/fbdev/amifb.c while (n--) { n 2739 drivers/video/fbdev/amifb.c const unsigned long *src, int src_idx, u32 n) n 2746 drivers/video/fbdev/amifb.c if (!n) n 2749 drivers/video/fbdev/amifb.c dst += (n - 1) / BITS_PER_LONG; n 2750 drivers/video/fbdev/amifb.c src += (n - 1) / BITS_PER_LONG; n 2751 drivers/video/fbdev/amifb.c if ((n - 1) % BITS_PER_LONG) { n 2752 drivers/video/fbdev/amifb.c dst_idx += (n - 1) % BITS_PER_LONG; n 2755 drivers/video/fbdev/amifb.c src_idx += (n - 1) % BITS_PER_LONG; n 2762 drivers/video/fbdev/amifb.c last = ~(~0UL << (BITS_PER_LONG - 1 - ((dst_idx - n) % BITS_PER_LONG))); n 2767 drivers/video/fbdev/amifb.c if ((unsigned long)dst_idx + 1 >= n) { n 2779 drivers/video/fbdev/amifb.c n -= dst_idx + 1; n 2783 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 2784 drivers/video/fbdev/amifb.c while (n >= 8) { n 2793 drivers/video/fbdev/amifb.c n -= 8; n 2795 drivers/video/fbdev/amifb.c while (n--) n 2808 drivers/video/fbdev/amifb.c if ((unsigned long)dst_idx + 1 >= n) { n 2815 drivers/video/fbdev/amifb.c } else if (1 + (unsigned long)src_idx >= n) { n 2833 drivers/video/fbdev/amifb.c n -= dst_idx + 1; n 2841 drivers/video/fbdev/amifb.c n -= dst_idx + 1; n 2845 drivers/video/fbdev/amifb.c m = n % BITS_PER_LONG; n 2846 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 2847 drivers/video/fbdev/amifb.c while (n >= 4) { n 2860 drivers/video/fbdev/amifb.c n -= 4; n 2862 drivers/video/fbdev/amifb.c while (n--) { n 2891 drivers/video/fbdev/amifb.c const unsigned long *src, int src_idx, u32 n) n 2898 drivers/video/fbdev/amifb.c if (!n) n 2903 drivers/video/fbdev/amifb.c last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG)); n 2908 drivers/video/fbdev/amifb.c if (dst_idx + n <= BITS_PER_LONG) { n 2920 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 2924 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 2925 drivers/video/fbdev/amifb.c while (n >= 8) { n 2934 drivers/video/fbdev/amifb.c n -= 8; n 2936 drivers/video/fbdev/amifb.c while (n--) n 2949 drivers/video/fbdev/amifb.c if (dst_idx + n <= BITS_PER_LONG) { n 2956 drivers/video/fbdev/amifb.c } else if (src_idx + n <= BITS_PER_LONG) { n 2974 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 2982 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 2986 drivers/video/fbdev/amifb.c m = n % BITS_PER_LONG; n 2987 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 2988 drivers/video/fbdev/amifb.c while (n >= 4) { n 3001 drivers/video/fbdev/amifb.c n -= 4; n 3003 drivers/video/fbdev/amifb.c while (n--) { n 3030 drivers/video/fbdev/amifb.c static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n) n 3035 drivers/video/fbdev/amifb.c if (!n) n 3043 drivers/video/fbdev/amifb.c last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG)); n 3045 drivers/video/fbdev/amifb.c if (dst_idx + n <= BITS_PER_LONG) { n 3056 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 3060 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 3061 drivers/video/fbdev/amifb.c while (n >= 8) { n 3070 drivers/video/fbdev/amifb.c n -= 8; n 3072 drivers/video/fbdev/amifb.c while (n--) n 3086 drivers/video/fbdev/amifb.c static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n) n 3091 drivers/video/fbdev/amifb.c if (!n) n 3099 drivers/video/fbdev/amifb.c last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG)); n 3101 drivers/video/fbdev/amifb.c if (dst_idx + n <= BITS_PER_LONG) { n 3112 drivers/video/fbdev/amifb.c n -= BITS_PER_LONG - dst_idx; n 3116 drivers/video/fbdev/amifb.c n /= BITS_PER_LONG; n 3117 drivers/video/fbdev/amifb.c while (n >= 4) { n 3122 drivers/video/fbdev/amifb.c n -= 4; n 3124 drivers/video/fbdev/amifb.c while (n--) n 3134 drivers/video/fbdev/amifb.c unsigned long *dst, int dst_idx, u32 n, n 3140 drivers/video/fbdev/amifb.c bitfill32(dst, dst_idx, color & 1 ? ~0 : 0, n); n 3149 drivers/video/fbdev/amifb.c unsigned long *dst, int dst_idx, u32 n, n 3155 drivers/video/fbdev/amifb.c bitxor32(dst, dst_idx, color & 1 ? ~0 : 0, n); n 3209 drivers/video/fbdev/amifb.c unsigned long *src, int src_idx, u32 n) n 3216 drivers/video/fbdev/amifb.c bitcpy(dst, dst_idx, src, src_idx, n); n 3226 drivers/video/fbdev/amifb.c unsigned long *src, int src_idx, u32 n) n 3233 drivers/video/fbdev/amifb.c bitcpy_rev(dst, dst_idx, src, src_idx, n); n 3307 drivers/video/fbdev/amifb.c unsigned long *dst, int dst_idx, u32 n, n 3321 drivers/video/fbdev/amifb.c bitcpy(dst, dst_idx, src, src_idx, n); n 3323 drivers/video/fbdev/amifb.c bitcpy_not(dst, dst_idx, src, src_idx, n); n 3326 drivers/video/fbdev/amifb.c bitfill32(dst, dst_idx, fgcolor & 1 ? ~0 : 0, n); n 410 drivers/video/fbdev/arkfb.c u16 m, n, r; n 415 drivers/video/fbdev/arkfb.c freq, &m, &n, &r, 0); n 420 drivers/video/fbdev/arkfb.c u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)}; n 114 drivers/video/fbdev/asiliantfb.c unsigned n; n 137 drivers/video/fbdev/asiliantfb.c for (n = 3; n <= 257; n++) { n 138 drivers/video/fbdev/asiliantfb.c unsigned m = n * ratio + (n * remainder) / Fref; n 142 drivers/video/fbdev/asiliantfb.c unsigned new_error = Ftarget * n >= Fref * m ? n 143 drivers/video/fbdev/asiliantfb.c ((Ftarget * n) - (Fref * m)) : ((Fref * m) - (Ftarget * n)); n 145 drivers/video/fbdev/asiliantfb.c best_n = n; n 154 drivers/video/fbdev/asiliantfb.c unsigned new_error = Ftarget * n >= Fref * (m & ~3) ? n 155 drivers/video/fbdev/asiliantfb.c ((Ftarget * n) - (Fref * (m & ~3))) : ((Fref * (m & ~3)) - (Ftarget * n)); n 157 drivers/video/fbdev/asiliantfb.c best_n = n; n 469 drivers/video/fbdev/aty/aty128fb.c #define round_div(n, d) ((n+(d/2))/d) n 1374 drivers/video/fbdev/aty/aty128fb.c u32 n, d; n 1397 drivers/video/fbdev/aty/aty128fb.c n = c.ref_divider * output_freq; n 1400 drivers/video/fbdev/aty/aty128fb.c pll->feedback_divider = round_div(n, d); n 1439 drivers/video/fbdev/aty/aty128fb.c u32 n, d, bpp; n 1444 drivers/video/fbdev/aty/aty128fb.c n = xclk * fifo_width; n 1446 drivers/video/fbdev/aty/aty128fb.c x = round_div(n, d); n 1467 drivers/video/fbdev/aty/aty128fb.c n <<= (11 - p); n 1468 drivers/video/fbdev/aty/aty128fb.c x = round_div(n, d); n 72 drivers/video/fbdev/aty/atyfb.h u8 n; n 133 drivers/video/fbdev/aty/mach64_gx.c u8 n; /* ref_div_count */ n 155 drivers/video/fbdev/aty/mach64_gx.c pll->ibm514.n = RGB514_clocks[i].n; n 169 drivers/video/fbdev/aty/mach64_gx.c ref_div_count = pll->ibm514.n; n 187 drivers/video/fbdev/aty/mach64_gx.c aty_st_514(0x21, pll->ibm514.n, par); /* F1 / N0 */ n 622 drivers/video/fbdev/aty/mach64_gx.c u16 m, n, k = 0, save_m, save_n, twoToKth; n 654 drivers/video/fbdev/aty/mach64_gx.c for (n = MIN_N; n <= MAX_N; n++) { n 656 drivers/video/fbdev/aty/mach64_gx.c tempA *= (n + 8); /* 43..256 */ n 668 drivers/video/fbdev/aty/mach64_gx.c save_n = n; n 583 drivers/video/fbdev/aty/radeon_base.c int hTotal, vTotal, num, denom, m, n; n 633 drivers/video/fbdev/aty/radeon_base.c n = ((INPLL(M_SPLL_REF_FB_DIV) >> 16) & 0xff); n 635 drivers/video/fbdev/aty/radeon_base.c num = 2*n; n 639 drivers/video/fbdev/aty/radeon_base.c n = ((INPLL(M_SPLL_REF_FB_DIV) >> 8) & 0xff); n 641 drivers/video/fbdev/aty/radeon_base.c num = 2*n; n 649 drivers/video/fbdev/aty/radeon_base.c n = (INPLL(PPLL_DIV_0 + ppll_div_sel) & 0x7ff); n 652 drivers/video/fbdev/aty/radeon_base.c num *= n; n 32 drivers/video/fbdev/c2p_core.h static __always_inline u32 get_mask(unsigned int n) n 34 drivers/video/fbdev/c2p_core.h switch (n) { n 60 drivers/video/fbdev/c2p_core.h static __always_inline void transp8(u32 d[], unsigned int n, unsigned int m) n 62 drivers/video/fbdev/c2p_core.h u32 mask = get_mask(n); n 67 drivers/video/fbdev/c2p_core.h _transp(d, 0, 1, n, mask); n 69 drivers/video/fbdev/c2p_core.h _transp(d, 2, 3, n, mask); n 71 drivers/video/fbdev/c2p_core.h _transp(d, 4, 5, n, mask); n 73 drivers/video/fbdev/c2p_core.h _transp(d, 6, 7, n, mask); n 78 drivers/video/fbdev/c2p_core.h _transp(d, 0, 2, n, mask); n 79 drivers/video/fbdev/c2p_core.h _transp(d, 1, 3, n, mask); n 81 drivers/video/fbdev/c2p_core.h _transp(d, 4, 6, n, mask); n 82 drivers/video/fbdev/c2p_core.h _transp(d, 5, 7, n, mask); n 87 drivers/video/fbdev/c2p_core.h _transp(d, 0, 4, n, mask); n 88 drivers/video/fbdev/c2p_core.h _transp(d, 1, 5, n, mask); n 89 drivers/video/fbdev/c2p_core.h _transp(d, 2, 6, n, mask); n 90 drivers/video/fbdev/c2p_core.h _transp(d, 3, 7, n, mask); n 102 drivers/video/fbdev/c2p_core.h static __always_inline void transp4(u32 d[], unsigned int n, unsigned int m) n 104 drivers/video/fbdev/c2p_core.h u32 mask = get_mask(n); n 109 drivers/video/fbdev/c2p_core.h _transp(d, 0, 1, n, mask); n 111 drivers/video/fbdev/c2p_core.h _transp(d, 2, 3, n, mask); n 116 drivers/video/fbdev/c2p_core.h _transp(d, 0, 2, n, mask); n 117 drivers/video/fbdev/c2p_core.h _transp(d, 1, 3, n, mask); n 129 drivers/video/fbdev/c2p_core.h static __always_inline void transp4x(u32 d[], unsigned int n, unsigned int m) n 131 drivers/video/fbdev/c2p_core.h u32 mask = get_mask(n); n 136 drivers/video/fbdev/c2p_core.h _transp(d, 2, 0, n, mask); n 137 drivers/video/fbdev/c2p_core.h _transp(d, 3, 1, n, mask); n 2740 drivers/video/fbdev/cirrusfb.c int n, d; n 2756 drivers/video/fbdev/cirrusfb.c for (n = 32; n < 128; n++) { n 2759 drivers/video/fbdev/cirrusfb.c d = (14318 * n) / freq; n 2767 drivers/video/fbdev/cirrusfb.c h = ((14318 * n) / temp) >> s; n 2771 drivers/video/fbdev/cirrusfb.c *nom = n; n 2782 drivers/video/fbdev/cirrusfb.c h = ((14318 * n) / d) >> s; n 2786 drivers/video/fbdev/cirrusfb.c *nom = n; n 781 drivers/video/fbdev/controlfb.c unsigned long p0, p1, p2, k, l, m, n, min; n 794 drivers/video/fbdev/controlfb.c n = m / l; n 796 drivers/video/fbdev/controlfb.c if (n && (n < 128) && rem < min) { n 798 drivers/video/fbdev/controlfb.c p1 = n; n 48 drivers/video/fbdev/core/cfbcopyarea.c unsigned n, u32 bswapmask) n 59 drivers/video/fbdev/core/cfbcopyarea.c (char *)src + ((src_idx & (bits - 1))) / 8, n / 8); n 64 drivers/video/fbdev/core/cfbcopyarea.c last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); n 69 drivers/video/fbdev/core/cfbcopyarea.c if (dst_idx+n <= bits) { n 82 drivers/video/fbdev/core/cfbcopyarea.c n -= bits - dst_idx; n 86 drivers/video/fbdev/core/cfbcopyarea.c n /= bits; n 87 drivers/video/fbdev/core/cfbcopyarea.c while (n >= 8) { n 96 drivers/video/fbdev/core/cfbcopyarea.c n -= 8; n 98 drivers/video/fbdev/core/cfbcopyarea.c while (n--) n 113 drivers/video/fbdev/core/cfbcopyarea.c if (dst_idx+n <= bits) { n 122 drivers/video/fbdev/core/cfbcopyarea.c } else if (src_idx+n <= bits) { n 147 drivers/video/fbdev/core/cfbcopyarea.c n -= bits - dst_idx; n 154 drivers/video/fbdev/core/cfbcopyarea.c n -= bits - dst_idx; n 162 drivers/video/fbdev/core/cfbcopyarea.c m = n % bits; n 163 drivers/video/fbdev/core/cfbcopyarea.c n /= bits; n 164 drivers/video/fbdev/core/cfbcopyarea.c while ((n >= 4) && !bswapmask) { n 177 drivers/video/fbdev/core/cfbcopyarea.c n -= 4; n 179 drivers/video/fbdev/core/cfbcopyarea.c while (n--) { n 214 drivers/video/fbdev/core/cfbcopyarea.c unsigned n, u32 bswapmask) n 225 drivers/video/fbdev/core/cfbcopyarea.c (char *)src + ((src_idx & (bits - 1))) / 8, n / 8); n 229 drivers/video/fbdev/core/cfbcopyarea.c dst += (dst_idx + n - 1) / bits; n 230 drivers/video/fbdev/core/cfbcopyarea.c src += (src_idx + n - 1) / bits; n 231 drivers/video/fbdev/core/cfbcopyarea.c dst_idx = (dst_idx + n - 1) % bits; n 232 drivers/video/fbdev/core/cfbcopyarea.c src_idx = (src_idx + n - 1) % bits; n 237 drivers/video/fbdev/core/cfbcopyarea.c last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask); n 242 drivers/video/fbdev/core/cfbcopyarea.c if ((unsigned long)dst_idx+1 >= n) { n 255 drivers/video/fbdev/core/cfbcopyarea.c n -= dst_idx+1; n 259 drivers/video/fbdev/core/cfbcopyarea.c n /= bits; n 260 drivers/video/fbdev/core/cfbcopyarea.c while (n >= 8) { n 269 drivers/video/fbdev/core/cfbcopyarea.c n -= 8; n 271 drivers/video/fbdev/core/cfbcopyarea.c while (n--) n 286 drivers/video/fbdev/core/cfbcopyarea.c if ((unsigned long)dst_idx+1 >= n) { n 294 drivers/video/fbdev/core/cfbcopyarea.c } else if (1+(unsigned long)src_idx >= n) { n 333 drivers/video/fbdev/core/cfbcopyarea.c n -= dst_idx+1; n 336 drivers/video/fbdev/core/cfbcopyarea.c m = n % bits; n 337 drivers/video/fbdev/core/cfbcopyarea.c n /= bits; n 338 drivers/video/fbdev/core/cfbcopyarea.c while ((n >= 4) && !bswapmask) { n 351 drivers/video/fbdev/core/cfbcopyarea.c n -= 4; n 353 drivers/video/fbdev/core/cfbcopyarea.c while (n--) { n 36 drivers/video/fbdev/core/cfbfillrect.c unsigned long pat, unsigned n, int bits, u32 bswapmask) n 40 drivers/video/fbdev/core/cfbfillrect.c if (!n) n 44 drivers/video/fbdev/core/cfbfillrect.c last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); n 46 drivers/video/fbdev/core/cfbfillrect.c if (dst_idx+n <= bits) { n 58 drivers/video/fbdev/core/cfbfillrect.c n -= bits - dst_idx; n 62 drivers/video/fbdev/core/cfbfillrect.c n /= bits; n 63 drivers/video/fbdev/core/cfbfillrect.c while (n >= 8) { n 72 drivers/video/fbdev/core/cfbfillrect.c n -= 8; n 74 drivers/video/fbdev/core/cfbfillrect.c while (n--) n 93 drivers/video/fbdev/core/cfbfillrect.c unsigned long pat, int left, int right, unsigned n, int bits) n 97 drivers/video/fbdev/core/cfbfillrect.c if (!n) n 101 drivers/video/fbdev/core/cfbfillrect.c last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); n 103 drivers/video/fbdev/core/cfbfillrect.c if (dst_idx+n <= bits) { n 115 drivers/video/fbdev/core/cfbfillrect.c n -= bits - dst_idx; n 119 drivers/video/fbdev/core/cfbfillrect.c n /= bits; n 120 drivers/video/fbdev/core/cfbfillrect.c while (n >= 4) { n 129 drivers/video/fbdev/core/cfbfillrect.c n -= 4; n 131 drivers/video/fbdev/core/cfbfillrect.c while (n--) { n 147 drivers/video/fbdev/core/cfbfillrect.c int dst_idx, unsigned long pat, unsigned n, int bits, n 153 drivers/video/fbdev/core/cfbfillrect.c if (!n) n 157 drivers/video/fbdev/core/cfbfillrect.c last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask); n 159 drivers/video/fbdev/core/cfbfillrect.c if (dst_idx+n <= bits) { n 172 drivers/video/fbdev/core/cfbfillrect.c n -= bits - dst_idx; n 176 drivers/video/fbdev/core/cfbfillrect.c n /= bits; n 177 drivers/video/fbdev/core/cfbfillrect.c while (n >= 8) { n 194 drivers/video/fbdev/core/cfbfillrect.c n -= 8; n 196 drivers/video/fbdev/core/cfbfillrect.c while (n--) { n 219 drivers/video/fbdev/core/cfbfillrect.c unsigned n, int bits) n 223 drivers/video/fbdev/core/cfbfillrect.c if (!n) n 227 drivers/video/fbdev/core/cfbfillrect.c last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); n 229 drivers/video/fbdev/core/cfbfillrect.c if (dst_idx+n <= bits) { n 244 drivers/video/fbdev/core/cfbfillrect.c n -= bits - dst_idx; n 248 drivers/video/fbdev/core/cfbfillrect.c n /= bits; n 249 drivers/video/fbdev/core/cfbfillrect.c while (n >= 4) { n 262 drivers/video/fbdev/core/cfbfillrect.c n -= 4; n 264 drivers/video/fbdev/core/cfbfillrect.c while (n--) { n 309 drivers/video/fbdev/core/cfbfillrect.c unsigned long pat, unsigned n, int bits, n 335 drivers/video/fbdev/core/cfbfillrect.c int right, unsigned n, int bits) = NULL; n 83 drivers/video/fbdev/core/cfbimgblt.c int i, n, bpp = p->var.bits_per_pixel; n 91 drivers/video/fbdev/core/cfbimgblt.c n = image->width; n 102 drivers/video/fbdev/core/cfbimgblt.c while (n--) { n 1620 drivers/video/fbdev/core/fbcon.c unsigned long n; n 1626 drivers/video/fbdev/core/fbcon.c n = softback_curr + delta * vc->vc_size_row; n 1629 drivers/video/fbdev/core/fbcon.c if (softback_curr < softback_top && n < softback_buf) { n 1630 drivers/video/fbdev/core/fbcon.c n += softback_end - softback_buf; n 1631 drivers/video/fbdev/core/fbcon.c if (n < softback_top) { n 1633 drivers/video/fbdev/core/fbcon.c (softback_top - n) / vc->vc_size_row; n 1634 drivers/video/fbdev/core/fbcon.c n = softback_top; n 1637 drivers/video/fbdev/core/fbcon.c && n < softback_top) { n 1639 drivers/video/fbdev/core/fbcon.c (softback_top - n) / vc->vc_size_row; n 1640 drivers/video/fbdev/core/fbcon.c n = softback_top; n 1643 drivers/video/fbdev/core/fbcon.c if (softback_curr > softback_in && n >= softback_end) { n 1644 drivers/video/fbdev/core/fbcon.c n += softback_buf - softback_end; n 1645 drivers/video/fbdev/core/fbcon.c if (n > softback_in) { n 1646 drivers/video/fbdev/core/fbcon.c n = softback_in; n 1649 drivers/video/fbdev/core/fbcon.c } else if (softback_curr <= softback_in && n > softback_in) { n 1650 drivers/video/fbdev/core/fbcon.c n = softback_in; n 1654 drivers/video/fbdev/core/fbcon.c if (n == softback_curr) n 1656 drivers/video/fbdev/core/fbcon.c softback_curr = n; n 188 drivers/video/fbdev/core/fbmem.c static inline unsigned safe_shift(unsigned d, int n) n 190 drivers/video/fbdev/core/fbmem.c return n < 0 ? d >> -n : d << n; n 200 drivers/video/fbdev/core/fbmem.c int i, j, n; n 210 drivers/video/fbdev/core/fbmem.c for (i = 0; i < logo->clutsize; i += n) { n 211 drivers/video/fbdev/core/fbmem.c n = logo->clutsize - i; n 213 drivers/video/fbdev/core/fbmem.c if (n > 16) n 214 drivers/video/fbdev/core/fbmem.c n = 16; n 216 drivers/video/fbdev/core/fbmem.c palette_cmap.len = n; n 217 drivers/video/fbdev/core/fbmem.c for (j = 0; j < n; ++j) { n 459 drivers/video/fbdev/core/fbmem.c unsigned int n) n 513 drivers/video/fbdev/core/fbmem.c while (n && (n * (logo->width + 8) - 8 > xres)) n 514 drivers/video/fbdev/core/fbmem.c --n; n 515 drivers/video/fbdev/core/fbmem.c image.dx = (xres - n * (logo->width + 8) - 8) / 2; n 532 drivers/video/fbdev/core/fbmem.c fb_do_show_logo(info, &image, rotate, n); n 548 drivers/video/fbdev/core/fbmem.c unsigned int n; n 552 drivers/video/fbdev/core/fbmem.c void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n) n 554 drivers/video/fbdev/core/fbmem.c if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX) n 558 drivers/video/fbdev/core/fbmem.c fb_logo_ex[fb_logo_ex_num].n = n; n 592 drivers/video/fbdev/core/fbmem.c fb_logo_ex[i].logo, y, fb_logo_ex[i].n); n 1947 drivers/video/fbdev/core/fbmem.c struct list_head *pos, *n; n 1952 drivers/video/fbdev/core/fbmem.c list_for_each_safe(pos, n, &info->modelist) { n 1095 drivers/video/fbdev/core/modedb.c struct list_head *pos, *n; n 1099 drivers/video/fbdev/core/modedb.c list_for_each_safe(pos, n, head) { n 1115 drivers/video/fbdev/core/modedb.c struct list_head *pos, *n; n 1117 drivers/video/fbdev/core/modedb.c list_for_each_safe(pos, n, head) { n 381 drivers/video/fbdev/core/svgalib.c int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node) n 409 drivers/video/fbdev/core/svgalib.c *n = 0; n 422 drivers/video/fbdev/core/svgalib.c *n = an; n 432 drivers/video/fbdev/core/svgalib.c f_current = (pll->f_base * *m) / *n; n 434 drivers/video/fbdev/core/svgalib.c pr_debug("fb%d: m = %d n = %d r = %d\n", node, (unsigned int) *m, (unsigned int) *n, (unsigned int) *r); n 29 drivers/video/fbdev/core/syscopyarea.c const unsigned long *src, unsigned src_idx, int bits, unsigned n) n 36 drivers/video/fbdev/core/syscopyarea.c last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); n 40 drivers/video/fbdev/core/syscopyarea.c if (dst_idx+n <= bits) { n 52 drivers/video/fbdev/core/syscopyarea.c n -= bits - dst_idx; n 56 drivers/video/fbdev/core/syscopyarea.c n /= bits; n 57 drivers/video/fbdev/core/syscopyarea.c while (n >= 8) { n 66 drivers/video/fbdev/core/syscopyarea.c n -= 8; n 68 drivers/video/fbdev/core/syscopyarea.c while (n--) n 83 drivers/video/fbdev/core/syscopyarea.c if (dst_idx+n <= bits) { n 90 drivers/video/fbdev/core/syscopyarea.c } else if (src_idx+n <= bits) { n 114 drivers/video/fbdev/core/syscopyarea.c n -= bits - dst_idx; n 122 drivers/video/fbdev/core/syscopyarea.c n -= bits - dst_idx; n 126 drivers/video/fbdev/core/syscopyarea.c m = n % bits; n 127 drivers/video/fbdev/core/syscopyarea.c n /= bits; n 128 drivers/video/fbdev/core/syscopyarea.c while (n >= 4) { n 141 drivers/video/fbdev/core/syscopyarea.c n -= 4; n 143 drivers/video/fbdev/core/syscopyarea.c while (n--) { n 172 drivers/video/fbdev/core/syscopyarea.c unsigned n) n 177 drivers/video/fbdev/core/syscopyarea.c dst += (dst_idx + n - 1) / bits; n 178 drivers/video/fbdev/core/syscopyarea.c src += (src_idx + n - 1) / bits; n 179 drivers/video/fbdev/core/syscopyarea.c dst_idx = (dst_idx + n - 1) % bits; n 180 drivers/video/fbdev/core/syscopyarea.c src_idx = (src_idx + n - 1) % bits; n 185 drivers/video/fbdev/core/syscopyarea.c last = FB_SHIFT_HIGH(p, ~0UL, (bits + dst_idx + 1 - n) % bits); n 189 drivers/video/fbdev/core/syscopyarea.c if ((unsigned long)dst_idx+1 >= n) { n 202 drivers/video/fbdev/core/syscopyarea.c n -= dst_idx+1; n 206 drivers/video/fbdev/core/syscopyarea.c n /= bits; n 207 drivers/video/fbdev/core/syscopyarea.c while (n >= 8) { n 216 drivers/video/fbdev/core/syscopyarea.c n -= 8; n 218 drivers/video/fbdev/core/syscopyarea.c while (n--) n 230 drivers/video/fbdev/core/syscopyarea.c if ((unsigned long)dst_idx+1 >= n) { n 237 drivers/video/fbdev/core/syscopyarea.c } else if (1+(unsigned long)src_idx >= n) { n 273 drivers/video/fbdev/core/syscopyarea.c n -= dst_idx+1; n 276 drivers/video/fbdev/core/syscopyarea.c m = n % bits; n 277 drivers/video/fbdev/core/syscopyarea.c n /= bits; n 278 drivers/video/fbdev/core/syscopyarea.c while (n >= 4) { n 291 drivers/video/fbdev/core/syscopyarea.c n -= 4; n 293 drivers/video/fbdev/core/syscopyarea.c while (n--) { n 26 drivers/video/fbdev/core/sysfillrect.c unsigned long pat, unsigned n, int bits) n 30 drivers/video/fbdev/core/sysfillrect.c if (!n) n 34 drivers/video/fbdev/core/sysfillrect.c last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); n 36 drivers/video/fbdev/core/sysfillrect.c if (dst_idx+n <= bits) { n 48 drivers/video/fbdev/core/sysfillrect.c n -= bits - dst_idx; n 52 drivers/video/fbdev/core/sysfillrect.c n /= bits; n 53 drivers/video/fbdev/core/sysfillrect.c while (n >= 8) { n 62 drivers/video/fbdev/core/sysfillrect.c n -= 8; n 64 drivers/video/fbdev/core/sysfillrect.c while (n--) n 82 drivers/video/fbdev/core/sysfillrect.c unsigned long pat, int left, int right, unsigned n, int bits) n 86 drivers/video/fbdev/core/sysfillrect.c if (!n) n 90 drivers/video/fbdev/core/sysfillrect.c last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); n 92 drivers/video/fbdev/core/sysfillrect.c if (dst_idx+n <= bits) { n 104 drivers/video/fbdev/core/sysfillrect.c n -= bits - dst_idx; n 108 drivers/video/fbdev/core/sysfillrect.c n /= bits; n 109 drivers/video/fbdev/core/sysfillrect.c while (n >= 4) { n 118 drivers/video/fbdev/core/sysfillrect.c n -= 4; n 120 drivers/video/fbdev/core/sysfillrect.c while (n--) { n 136 drivers/video/fbdev/core/sysfillrect.c unsigned long pat, unsigned n, int bits) n 141 drivers/video/fbdev/core/sysfillrect.c if (!n) n 145 drivers/video/fbdev/core/sysfillrect.c last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); n 147 drivers/video/fbdev/core/sysfillrect.c if (dst_idx+n <= bits) { n 158 drivers/video/fbdev/core/sysfillrect.c n -= bits - dst_idx; n 162 drivers/video/fbdev/core/sysfillrect.c n /= bits; n 163 drivers/video/fbdev/core/sysfillrect.c while (n >= 8) { n 172 drivers/video/fbdev/core/sysfillrect.c n -= 8; n 174 drivers/video/fbdev/core/sysfillrect.c while (n--) n 192 drivers/video/fbdev/core/sysfillrect.c unsigned long pat, int left, int right, unsigned n, n 197 drivers/video/fbdev/core/sysfillrect.c if (!n) n 201 drivers/video/fbdev/core/sysfillrect.c last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits)); n 203 drivers/video/fbdev/core/sysfillrect.c if (dst_idx+n <= bits) { n 216 drivers/video/fbdev/core/sysfillrect.c n -= bits - dst_idx; n 220 drivers/video/fbdev/core/sysfillrect.c n /= bits; n 221 drivers/video/fbdev/core/sysfillrect.c while (n >= 4) { n 230 drivers/video/fbdev/core/sysfillrect.c n -= 4; n 232 drivers/video/fbdev/core/sysfillrect.c while (n--) { n 272 drivers/video/fbdev/core/sysfillrect.c int dst_idx, unsigned long pat, unsigned n, n 298 drivers/video/fbdev/core/sysfillrect.c int right, unsigned n, int bits) = NULL; n 58 drivers/video/fbdev/core/sysimgblt.c int i, n, bpp = p->var.bits_per_pixel; n 65 drivers/video/fbdev/core/sysimgblt.c n = image->width; n 76 drivers/video/fbdev/core/sysimgblt.c while (n--) { n 376 drivers/video/fbdev/ffb.c static void FFBFifo(struct ffb_par *par, int n) n 381 drivers/video/fbdev/ffb.c if (cache - n < 0) { n 386 drivers/video/fbdev/ffb.c } while (cache - n < 0); n 388 drivers/video/fbdev/ffb.c par->fifo_cache = cache - n; n 238 drivers/video/fbdev/gxt4500.c int m, n, pdiv1, pdiv2, postdiv; n 257 drivers/video/fbdev/gxt4500.c n = intf * postdiv / period_ps; n 258 drivers/video/fbdev/gxt4500.c if (n < 3 || n > 160) n 260 drivers/video/fbdev/gxt4500.c t = par->refclk_ps * m * postdiv / n; n 264 drivers/video/fbdev/gxt4500.c par->pll_n = n; n 351 drivers/video/fbdev/i740fb.c int m, n; n 357 drivers/video/fbdev/i740fb.c n = 2; n 359 drivers/video/fbdev/i740fb.c n++; n 360 drivers/video/fbdev/i740fb.c m = ((f_vco * n) / I740_REF_FREQ + 2) / 4; n 367 drivers/video/fbdev/i740fb.c / n) + ((1 << p_best) / 2)) / (1 << p_best); n 373 drivers/video/fbdev/i740fb.c n_best = n; n 378 drivers/video/fbdev/i740fb.c ((n <= TARGET_MAX_N) || (abs(err_best) > err_max))); n 382 drivers/video/fbdev/i740fb.c n_best = n; n 25 drivers/video/fbdev/i810/i810_accel.c #define PUT_RING(n) { \ n 26 drivers/video/fbdev/i810/i810_accel.c i810_writel(par->cur_tail, par->iring.virtual, n); \ n 127 drivers/video/fbdev/i810/i810_gtf.c int n, blank_s, blank_e; n 133 drivers/video/fbdev/i810/i810_gtf.c n = ((xres + var->right_margin + var->hsync_len + n 135 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr00 = (u8) n; n 136 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr35 = (u8) ((n >> 8) & 1); n 160 drivers/video/fbdev/i810/i810_gtf.c n = yres + var->lower_margin + var->vsync_len + var->upper_margin - 2; n 161 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr06 = (u8) (n & 0xFF); n 162 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr30 = (u8) ((n >> 8) & 0x0F); n 165 drivers/video/fbdev/i810/i810_gtf.c n = yres + var->lower_margin; n 166 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr10 = (u8) (n & 0xFF); n 167 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr32 = (u8) ((n >> 8) & 0x0F); n 173 drivers/video/fbdev/i810/i810_gtf.c n = yres - 1; n 174 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr12 = (u8) (n & 0xFF); n 175 drivers/video/fbdev/i810/i810_gtf.c par->regs.cr31 = (u8) ((n >> 8) & 0x0F); n 706 drivers/video/fbdev/i810/i810_main.c static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p) n 756 drivers/video/fbdev/i810/i810_main.c if (n) *n = (n_best - 2) & 0x3FF; n 659 drivers/video/fbdev/intelfb/intelfbhw.c static int calc_vclock3(int index, int m, int n, int p) n 661 drivers/video/fbdev/intelfb/intelfbhw.c if (p == 0 || n == 0) n 663 drivers/video/fbdev/intelfb/intelfbhw.c return plls[index].ref_clk * m / n / p; n 666 drivers/video/fbdev/intelfb/intelfbhw.c static int calc_vclock(int index, int m1, int m2, int n, int p1, int p2, n 673 drivers/video/fbdev/intelfb/intelfbhw.c n += 2; n 674 drivers/video/fbdev/intelfb/intelfbhw.c vco = pll->ref_clk * m / n; n 716 drivers/video/fbdev/intelfb/intelfbhw.c int i, m1, m2, n, p1, p2; n 727 drivers/video/fbdev/intelfb/intelfbhw.c n = (hw->vga0_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; n 734 drivers/video/fbdev/intelfb/intelfbhw.c m1, m2, n, p1, p2); n 736 drivers/video/fbdev/intelfb/intelfbhw.c calc_vclock(index, m1, m2, n, p1, p2, 0)); n 738 drivers/video/fbdev/intelfb/intelfbhw.c n = (hw->vga1_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; n 744 drivers/video/fbdev/intelfb/intelfbhw.c m1, m2, n, p1, p2); n 746 drivers/video/fbdev/intelfb/intelfbhw.c calc_vclock(index, m1, m2, n, p1, p2, 0)); n 755 drivers/video/fbdev/intelfb/intelfbhw.c n = (hw->fpa0 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; n 762 drivers/video/fbdev/intelfb/intelfbhw.c m1, m2, n, p1, p2); n 764 drivers/video/fbdev/intelfb/intelfbhw.c calc_vclock(index, m1, m2, n, p1, p2, 0)); n 766 drivers/video/fbdev/intelfb/intelfbhw.c n = (hw->fpa1 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK; n 773 drivers/video/fbdev/intelfb/intelfbhw.c m1, m2, n, p1, p2); n 775 drivers/video/fbdev/intelfb/intelfbhw.c calc_vclock(index, m1, m2, n, p1, p2, 0)); n 940 drivers/video/fbdev/intelfb/intelfbhw.c u32 m1, m2, n, p1, p2, n1, testm; n 968 drivers/video/fbdev/intelfb/intelfbhw.c n = pll->min_n; n 972 drivers/video/fbdev/intelfb/intelfbhw.c m = ROUND_UP_TO(f_vco * n, pll->ref_clk) / pll->ref_clk; n 978 drivers/video/fbdev/intelfb/intelfbhw.c f_out = calc_vclock3(index, testm, n, p); n 991 drivers/video/fbdev/intelfb/intelfbhw.c n_best = n; n 996 drivers/video/fbdev/intelfb/intelfbhw.c n++; n 997 drivers/video/fbdev/intelfb/intelfbhw.c } while ((n <= pll->max_n) && (f_out >= clock)); n 1006 drivers/video/fbdev/intelfb/intelfbhw.c n = n_best; n 1010 drivers/video/fbdev/intelfb/intelfbhw.c n1 = n - 2; n 1014 drivers/video/fbdev/intelfb/intelfbhw.c m, m1, m2, n, n1, p, p1, p2, n 1015 drivers/video/fbdev/intelfb/intelfbhw.c calc_vclock3(index, m, n, p), n 1017 drivers/video/fbdev/intelfb/intelfbhw.c calc_vclock3(index, m, n, p) * p); n 1046 drivers/video/fbdev/intelfb/intelfbhw.c u32 m1, m2, n, p1, p2, clock_target, clock; n 1116 drivers/video/fbdev/intelfb/intelfbhw.c &n, &p1, &p2, &clock)) { n 1130 drivers/video/fbdev/intelfb/intelfbhw.c if (check_overflow(n, FP_DIVISOR_MASK, "PLL N parameter")) n 1143 drivers/video/fbdev/intelfb/intelfbhw.c *fp0 = (n << FP_N_DIVISOR_SHIFT) | n 1504 drivers/video/fbdev/intelfb/intelfbhw.c static int wait_ring(struct intelfb_info *dinfo, int n) n 1511 drivers/video/fbdev/intelfb/intelfbhw.c DBG_MSG("wait_ring: %d\n", n); n 1515 drivers/video/fbdev/intelfb/intelfbhw.c while (dinfo->ring_space < n) { n 1534 drivers/video/fbdev/intelfb/intelfbhw.c dinfo->ring_space, n); n 534 drivers/video/fbdev/intelfb/intelfbhw.h #define OUT_RING(n) do { \ n 535 drivers/video/fbdev/intelfb/intelfbhw.h writel((n), (u32 __iomem *)(dinfo->ring.virtual + dinfo->ring_tail));\ n 540 drivers/video/fbdev/intelfb/intelfbhw.h #define START_RING(n) do { \ n 541 drivers/video/fbdev/intelfb/intelfbhw.h if (dinfo->ring_space < (n) * 4) \ n 542 drivers/video/fbdev/intelfb/intelfbhw.h wait_ring(dinfo,(n) * 4); \ n 543 drivers/video/fbdev/intelfb/intelfbhw.h dinfo->ring_space -= (n) * 4; \ n 31 drivers/video/fbdev/kyro/STG4000Reg.h #define SET_BIT(n) (1<<(n)) n 32 drivers/video/fbdev/kyro/STG4000Reg.h #define CLEAR_BIT(n) (tmp &= ~(1<<n)) n 42 drivers/video/fbdev/kyro/STG4000Reg.h #define CLEAR_BIT_2(n) (usTemp &= ~(1<<n)) n 31 drivers/video/fbdev/matrox/g450_pll.c unsigned int m, n; n 34 drivers/video/fbdev/matrox/g450_pll.c n = ((mnp >> 7) & 0x1FE) + 4; n 35 drivers/video/fbdev/matrox/g450_pll.c return (minfo->features.pll.ref_freq * n + (m >> 1)) / m; n 59 drivers/video/fbdev/matrox/g450_pll.c unsigned int m, n, p; n 99 drivers/video/fbdev/matrox/g450_pll.c n = ((tvco * (m+1) + minfo->features.pll.ref_freq) / (minfo->features.pll.ref_freq * 2)) - 2; n 100 drivers/video/fbdev/matrox/g450_pll.c } while (n < 0x03 || n > 0x7A); n 101 drivers/video/fbdev/matrox/g450_pll.c return (m << 16) | (n << 8) | p; n 177 drivers/video/fbdev/matrox/g450_pll.c unsigned char n = mnp >> 8; n 183 drivers/video/fbdev/matrox/g450_pll.c matroxfb_DAC_in(minfo, M1064_XPIXPLLAN) != n || n 188 drivers/video/fbdev/matrox/g450_pll.c matroxfb_DAC_in(minfo, M1064_XPIXPLLBN) != n || n 193 drivers/video/fbdev/matrox/g450_pll.c matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) != n || n 198 drivers/video/fbdev/matrox/g450_pll.c matroxfb_DAC_in(minfo, DAC1064_XSYSPLLN) != n || n 203 drivers/video/fbdev/matrox/g450_pll.c matroxfb_DAC_in(minfo, M1064_XVIDPLLN) != n || n 90 drivers/video/fbdev/matrox/matroxfb_DAC1064.c unsigned int m, n, p; n 94 drivers/video/fbdev/matrox/matroxfb_DAC1064.c DAC1064_calcclock(minfo, fout, minfo->max_pixel_clock, &m, &n, &p); n 96 drivers/video/fbdev/matrox/matroxfb_DAC1064.c minfo->hw.DACclk[1] = n; n 127 drivers/video/fbdev/matrox/matroxfb_DAC1064.c unsigned int m, n, p; n 142 drivers/video/fbdev/matrox/matroxfb_DAC1064.c DAC1064_calcclock(minfo, fmem, minfo->max_pixel_clock, &m, &n, &p); n 144 drivers/video/fbdev/matrox/matroxfb_DAC1064.c outDAC1064(minfo, DAC1064_XSYSPLLN, hw->DACclk[4] = n); n 611 drivers/video/fbdev/matrox/matroxfb_DAC1064.c int m, int n, int p) n 627 drivers/video/fbdev/matrox/matroxfb_DAC1064.c outDAC1064(minfo, reg++, n); n 659 drivers/video/fbdev/matrox/matroxfb_DAC1064.c unsigned int m, n, p; n 663 drivers/video/fbdev/matrox/matroxfb_DAC1064.c DAC1064_calcclock(minfo, freq, minfo->max_pixel_clock, &m, &n, &p); n 664 drivers/video/fbdev/matrox/matroxfb_DAC1064.c MGAG100_progPixClock(minfo, flags, m, n, p); n 674 drivers/video/fbdev/matrox/matroxfb_base.h #define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n)) n 257 drivers/video/fbdev/matrox/matroxfb_maven.c unsigned int n; n 261 drivers/video/fbdev/matrox/matroxfb_maven.c n = (fwant * m) / fxtal; n 262 drivers/video/fbdev/matrox/matroxfb_maven.c if (n < pll->feed_div_min) n 264 drivers/video/fbdev/matrox/matroxfb_maven.c if (n > pll->feed_div_max) n 267 drivers/video/fbdev/matrox/matroxfb_maven.c ln = fxtal * n; n 279 drivers/video/fbdev/matrox/matroxfb_maven.c dprintk(KERN_DEBUG "Match: %u / %u / %u / %u\n", n, m, p, ln); n 285 drivers/video/fbdev/matrox/matroxfb_maven.c *feed = n; n 165 drivers/video/fbdev/matrox/matroxfb_misc.c unsigned int n; n 167 drivers/video/fbdev/matrox/matroxfb_misc.c n = (fwant * (m + 1) + (fxtal >> 1)) / fxtal - 1; n 168 drivers/video/fbdev/matrox/matroxfb_misc.c if (n > pll->feed_div_max) n 170 drivers/video/fbdev/matrox/matroxfb_misc.c if (n < pll->feed_div_min) n 171 drivers/video/fbdev/matrox/matroxfb_misc.c n = pll->feed_div_min; n 172 drivers/video/fbdev/matrox/matroxfb_misc.c fvco = (fxtal * (n + 1)) / (m + 1); n 181 drivers/video/fbdev/matrox/matroxfb_misc.c *feed = n; n 117 drivers/video/fbdev/mbx/mbxfb.c u8 n; n 124 drivers/video/fbdev/mbx/mbxfb.c u8 m, n, p; n 147 drivers/video/fbdev/mbx/mbxfb.c for (n = 2; n < 8; n++) { n 149 drivers/video/fbdev/mbx/mbxfb.c clk = (ref_clk * m) / (n * (1 << p)); n 156 drivers/video/fbdev/mbx/mbxfb.c div->n = n; n 284 drivers/video/fbdev/mbx/mbxfb.c write_reg_dly((Disp_Pll_M(div.m) | Disp_Pll_N(div.n) | n 205 drivers/video/fbdev/neofb.c int n, d, f; n 211 drivers/video/fbdev/neofb.c for (n = 0; n <= MAX_N; n++) { n 215 drivers/video/fbdev/neofb.c f_out = ((14318 * (n + 1)) / (d + 1)) >> f; n 219 drivers/video/fbdev/neofb.c n_best = n; n 22 drivers/video/fbdev/nvidia/nv_type.h #define SetBit(n) (1<<(n)) n 34 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ n 35 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_BA0_OFFSET(n)) n 36 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_BA1(n) (DISPC_OVL_BASE(n) + \ n 37 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_BA1_OFFSET(n)) n 38 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_BA0_UV(n) (DISPC_OVL_BASE(n) + \ n 39 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_BA0_UV_OFFSET(n)) n 40 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_BA1_UV(n) (DISPC_OVL_BASE(n) + \ n 41 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_BA1_UV_OFFSET(n)) n 42 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_POSITION(n) (DISPC_OVL_BASE(n) + \ n 43 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_POS_OFFSET(n)) n 44 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_SIZE(n) (DISPC_OVL_BASE(n) + \ n 45 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_SIZE_OFFSET(n)) n 46 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_ATTRIBUTES(n) (DISPC_OVL_BASE(n) + \ n 47 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_ATTR_OFFSET(n)) n 48 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_ATTRIBUTES2(n) (DISPC_OVL_BASE(n) + \ n 49 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_ATTR2_OFFSET(n)) n 50 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIFO_THRESHOLD(n) (DISPC_OVL_BASE(n) + \ n 51 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIFO_THRESH_OFFSET(n)) n 52 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIFO_SIZE_STATUS(n) (DISPC_OVL_BASE(n) + \ n 53 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIFO_SIZE_STATUS_OFFSET(n)) n 54 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_ROW_INC(n) (DISPC_OVL_BASE(n) + \ n 55 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_ROW_INC_OFFSET(n)) n 56 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_PIXEL_INC(n) (DISPC_OVL_BASE(n) + \ n 57 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_PIX_INC_OFFSET(n)) n 58 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_WINDOW_SKIP(n) (DISPC_OVL_BASE(n) + \ n 59 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_WINDOW_SKIP_OFFSET(n)) n 60 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_TABLE_BA(n) (DISPC_OVL_BASE(n) + \ n 61 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_TABLE_BA_OFFSET(n)) n 62 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR(n) (DISPC_OVL_BASE(n) + \ n 63 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR_OFFSET(n)) n 64 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR2(n) (DISPC_OVL_BASE(n) + \ n 65 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR2_OFFSET(n)) n 66 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_PICTURE_SIZE(n) (DISPC_OVL_BASE(n) + \ n 67 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_PIC_SIZE_OFFSET(n)) n 68 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_ACCU0(n) (DISPC_OVL_BASE(n) + \ n 69 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_ACCU0_OFFSET(n)) n 70 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_ACCU1(n) (DISPC_OVL_BASE(n) + \ n 71 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_ACCU1_OFFSET(n)) n 72 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_ACCU2_0(n) (DISPC_OVL_BASE(n) + \ n 73 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_ACCU2_0_OFFSET(n)) n 74 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_ACCU2_1(n) (DISPC_OVL_BASE(n) + \ n 75 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_ACCU2_1_OFFSET(n)) n 76 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR_COEF_H(n, i) (DISPC_OVL_BASE(n) + \ n 77 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR_COEF_H_OFFSET(n, i)) n 78 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR_COEF_HV(n, i) (DISPC_OVL_BASE(n) + \ n 79 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR_COEF_HV_OFFSET(n, i)) n 80 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR_COEF_H2(n, i) (DISPC_OVL_BASE(n) + \ n 81 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR_COEF_H2_OFFSET(n, i)) n 82 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR_COEF_HV2(n, i) (DISPC_OVL_BASE(n) + \ n 83 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR_COEF_HV2_OFFSET(n, i)) n 84 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_CONV_COEF(n, i) (DISPC_OVL_BASE(n) + \ n 85 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_CONV_COEF_OFFSET(n, i)) n 86 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR_COEF_V(n, i) (DISPC_OVL_BASE(n) + \ n 87 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR_COEF_V_OFFSET(n, i)) n 88 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_FIR_COEF_V2(n, i) (DISPC_OVL_BASE(n) + \ n 89 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_FIR_COEF_V2_OFFSET(n, i)) n 90 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \ n 91 drivers/video/fbdev/omap2/omapfb/dss/dispc.h DISPC_PRELOAD_OFFSET(n)) n 92 drivers/video/fbdev/omap2/omapfb/dss/dispc.h #define DISPC_OVL_MFLAG_THRESHOLD(n) DISPC_MFLAG_THRESHOLD_OFFSET(n) n 193 drivers/video/fbdev/omap2/omapfb/dss/dpi.c static bool dpi_calc_pll_cb(int n, int m, unsigned long fint, n 199 drivers/video/fbdev/omap2/omapfb/dss/dpi.c ctx->dsi_cinfo.n = n; n 78 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20)) n 79 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20)) n 80 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20)) n 81 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20)) n 82 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20)) n 83 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20)) n 84 drivers/video/fbdev/omap2/omapfb/dss/dsi.c #define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20)) n 1480 drivers/video/fbdev/omap2/omapfb/dss/dsi.c seq_printf(s, "Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n); n 4434 drivers/video/fbdev/omap2/omapfb/dss/dsi.c static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, n 4439 drivers/video/fbdev/omap2/omapfb/dss/dsi.c ctx->dsi_cinfo.n = n; n 4732 drivers/video/fbdev/omap2/omapfb/dss/dsi.c static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, n 4737 drivers/video/fbdev/omap2/omapfb/dss/dsi.c ctx->dsi_cinfo.n = n; n 126 drivers/video/fbdev/omap2/omapfb/dss/dss.h u16 n; n 477 drivers/video/fbdev/omap2/omapfb/dss/dss.h typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint, n 210 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h u32 n; n 317 drivers/video/fbdev/omap2/omapfb/dss/hdmi.h int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts); n 529 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0); n 530 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0); n 531 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c REG_FLD_MOD(av_base, HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0); n 682 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c int err, n, cts, channel_count; n 744 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); n 747 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c acore.n = n; n 142 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.h #define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110) n 147 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.h #define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190) n 152 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.h #define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210) n 157 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.h #define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290) n 158 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.h #define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300) n 160 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.h #define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380) n 190 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c int r, n, i; n 202 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c n = edid[0x7e]; n 204 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c if (n > max_ext_blocks) n 205 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c n = max_ext_blocks; n 207 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c for (i = 1; i <= n; i++) { n 644 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_AUD_N1, cfg->n, 7, 0); n 645 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_AUD_N2, cfg->n >> 8, 7, 0); n 646 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c REG_FLD_MOD(base, HDMI_CORE_AUD_N3, cfg->n >> 16, 3, 0); n 793 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c int err, n, cts, channel_count; n 836 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); n 837 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c core_cfg.n = n; n 104 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_VSDPAYLOAD(n) (n * 4 + 0x040C8) n 105 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_SPDVENDORNAME(n) (n * 4 + 0x04128) n 106 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_SPDPRODUCTNAME(n) (n * 4 + 0x04148) n 112 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_AUDSCHNLS(n) (n * 4 + 0x0419C) n 116 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_ACP(n) ((16-n) * 4 + 0x04208) n 118 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_ISCR1(n) ((16-n) * 4 + 0x0424C) n 119 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_ISCR2(n) ((15-n) * 4 + 0x0428C) n 125 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_RDRB(n) (n * 4 + 0x042E0) n 144 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h #define HDMI_CORE_FC_GMD_PB(n) (n * 4 + 0x04414) n 52 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts) n 57 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c if (n == NULL || cts == NULL) n 95 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 8192; n 98 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 12544; n 101 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 8192; n 104 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 25088; n 107 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 16384; n 110 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 50176; n 113 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 32768; n 121 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 4096; n 124 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 6272; n 127 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 6144; n 130 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 12544; n 133 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 12288; n 136 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 25088; n 139 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *n = 24576; n 146 drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c *cts = (pclk/1000) * (*n / 128) * deep_color / (sample_freq / 10); n 45 drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c unsigned n, m, mf, m2, sd; n 56 drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c n = DIV_ROUND_UP(clkin, hw->fint_max); n 57 drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c fint = clkin / n; n 85 drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c n, m, mf, m2, sd); n 88 drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c pi->n = n; n 113 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node), n 115 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c if (n) { n 116 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c n->node = node; n 117 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c n->root = root; n 118 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c list_add(&n->list, &dss_conv_list); n 124 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c struct dss_conv_node *n; n 126 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c list_for_each_entry(n, &dss_conv_list, list) { n 127 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c if (n->node == node) n 136 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c struct device_node *n; n 144 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c n = of_get_child_by_name(node, "ports"); n 145 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c if (!n) n 146 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c n = of_get_child_by_name(node, "port"); n 147 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c if (!n) n 150 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c of_node_put(n); n 152 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c n = NULL; n 153 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c while ((n = of_graph_get_next_endpoint(node, n)) != NULL) { n 156 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c pn = of_graph_get_remote_port_parent(n); n 202 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c struct dss_conv_node *n; n 204 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c n = list_first_entry(&dss_conv_list, struct dss_conv_node, n 207 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c if (!n->root) n 208 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c omapdss_omapify_node(n->node); n 210 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c list_del(&n->list); n 211 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c of_node_put(n->node); n 212 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c kfree(n); n 151 drivers/video/fbdev/omap2/omapfb/dss/pll.c int n, n_start, n_stop; n 167 drivers/video/fbdev/omap2/omapfb/dss/pll.c for (n = n_start; n <= n_stop; ++n) { n 168 drivers/video/fbdev/omap2/omapfb/dss/pll.c fint = clkin / n; n 179 drivers/video/fbdev/omap2/omapfb/dss/pll.c if (func(n, m, fint, clkdco, data)) n 249 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */ n 339 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */ n 254 drivers/video/fbdev/pm2fb.c unsigned char n; n 261 drivers/video/fbdev/pm2fb.c for (n = 2; n < 15; n++) { n 263 drivers/video/fbdev/pm2fb.c f = PM2_REFERENCE_CLOCK * m / n; n 270 drivers/video/fbdev/pm2fb.c *nn = n; n 283 drivers/video/fbdev/pm2fb.c unsigned char n; n 290 drivers/video/fbdev/pm2fb.c for (n = 2 * m + 1; n; n++) { n 292 drivers/video/fbdev/pm2fb.c f = (PM2_REFERENCE_CLOCK >> (p + 1)) * n / m; n 296 drivers/video/fbdev/pm2fb.c *nn = n; n 449 drivers/video/fbdev/pm2fb.c unsigned char m, n, p; n 453 drivers/video/fbdev/pm2fb.c pm2v_mnp(clk/2, &m, &n, &p); n 458 drivers/video/fbdev/pm2fb.c pm2v_RDAC_WR(par, PM2VI_RD_MCLK_FEEDBACK, n); n 468 drivers/video/fbdev/pm2fb.c pm2_mnp(clk, &m, &n, &p); n 472 drivers/video/fbdev/pm2fb.c pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_2, n); n 486 drivers/video/fbdev/pm2fb.c unsigned char m, n, p; n 490 drivers/video/fbdev/pm2fb.c pm2_mnp(clk, &m, &n, &p); n 494 drivers/video/fbdev/pm2fb.c pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A2, n); n 503 drivers/video/fbdev/pm2fb.c pm2v_mnp(clk/2, &m, &n, &p); n 507 drivers/video/fbdev/pm2fb.c pm2v_RDAC_WR(par, PM2VI_RD_CLK0_FEEDBACK, n); n 104 drivers/video/fbdev/pm3fb.c static inline void PM3_WAIT(struct pm3_par *par, u32 n) n 106 drivers/video/fbdev/pm3fb.c while (PM3_READ_REG(par, PM3InFIFOSpace) < n) n 825 drivers/video/fbdev/pm3fb.c unsigned char uninitialized_var(n); /* ClkFeedBackScale */ n 829 drivers/video/fbdev/pm3fb.c (void)pm3fb_calculate_clock(pixclock, &m, &n, &p); n 832 drivers/video/fbdev/pm3fb.c pixclock, (int) m, (int) n, (int) p); n 835 drivers/video/fbdev/pm3fb.c PM3_WRITE_DAC_REG(par, PM3RD_DClk0FeedbackScale, n); n 84 drivers/video/fbdev/riva/fbdev.c #define SetBit(n) (1<<(n)) n 1249 drivers/video/fbdev/riva/riva_hw.c uninitialized_var(n), uninitialized_var(p); n 1261 drivers/video/fbdev/riva/riva_hw.c if (!CalcVClock(dotClock, &VClk, &m, &n, &p, chip)) n 1329 drivers/video/fbdev/riva/riva_hw.c state->vpll = (p << 16) | (n << 8) | m; n 461 drivers/video/fbdev/s3fb.c u16 m, n, r; n 466 drivers/video/fbdev/s3fb.c 1000000000 / pixclock, &m, &n, &r, info->node); n 483 drivers/video/fbdev/s3fb.c vga_wseq(par->state.vgabase, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */ n 486 drivers/video/fbdev/s3fb.c vga_wseq(par->state.vgabase, 0x12, (n - 2) | (r << 5)); n 550 drivers/video/fbdev/s3fb.c u16 m, n, r; n 590 drivers/video/fbdev/s3fb.c rv = svga_compute_pll(&s3_pll, PICOS2KHZ(var->pixclock), &m, &n, &r, n 975 drivers/video/fbdev/savage/savagefb_driver.c unsigned int m, n, r; n 1081 drivers/video/fbdev/savage/savagefb_driver.c SavageCalcClock(dclk, 1, 1, 127, 0, 4, 180000, 360000, &m, &n, &r); n 1094 drivers/video/fbdev/savage/savagefb_driver.c reg->SR12 = (r << 6) | (n & 0x3f); n 1096 drivers/video/fbdev/savage/savagefb_driver.c reg->SR29 = (r & 4) | (m & 0x100) >> 5 | (n & 0x40) >> 2; n 1810 drivers/video/fbdev/savage/savagefb_driver.c unsigned char config1, m, n, n1, n2, sr8, cr3f, cr66 = 0, tmp; n 1927 drivers/video/fbdev/savage/savagefb_driver.c n = vga_in8(0x3c5, par); n 1933 drivers/video/fbdev/savage/savagefb_driver.c n1 = n & 0x1f; n 1934 drivers/video/fbdev/savage/savagefb_driver.c n2 = (n >> 5) & 0x03; n 12 drivers/video/fbdev/sbuslib.h #define SBUS_MMAP_FBSIZE(n) (-n) n 41 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBCR_UPC(n) (1 << ((n) + 16)) n 42 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBCR_UPF(n) (1 << ((n) + 8)) n 43 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBCR_UPD(n) (1 << ((n) + 0)) n 44 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00) n 75 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04) n 80 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08) n 85 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c) n 90 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10) n 99 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14) n 108 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18) n 117 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c) n 126 drivers/video/fbdev/sh_mobile_lcdcfb.c #define LDBnBBGCL(n) (0xb10 + (n) * 0x04) n 2876 drivers/video/fbdev/sis/init.c unsigned short data, data2, time, i, j, k, m, n, o; n 2929 drivers/video/fbdev/sis/init.c for(n = 0; n < 3; n++) { n 2931 drivers/video/fbdev/sis/init.c SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[bx], table[si]); n 2936 drivers/video/fbdev/sis/init.c SiS_WriteDAC(SiS_Pr, DACData, sf, n, table[di], table[si], table[bx]); n 293 drivers/video/fbdev/sstfb.c int m, m2, n, p, best_err, fout; n 304 drivers/video/fbdev/sstfb.c for (n = 1; n < 32; n++) { n 306 drivers/video/fbdev/sstfb.c m2 = (2 * freq * (1 << p) * (n + 2) ) / DAC_FREF - 4 ; n 311 drivers/video/fbdev/sstfb.c fout = (DAC_FREF * (m + 2)) / ((1 << p) * (n + 2)); n 313 drivers/video/fbdev/sstfb.c best_n = n; n 323 drivers/video/fbdev/sstfb.c t->n = best_n; n 325 drivers/video/fbdev/sstfb.c *freq_out = (DAC_FREF * (t->m + 2)) / ((1 << t->p) * (t->n + 2)); n 327 drivers/video/fbdev/sstfb.c t->m, t->n, t->p, *freq_out); n 997 drivers/video/fbdev/sstfb.c dac_i_write(DACREG_AC1_I, t->p << 6 | t->n); n 1003 drivers/video/fbdev/sstfb.c dac_i_write(DACREG_BD1_I, t->p << 6 | t->n); n 1032 drivers/video/fbdev/sstfb.c sst_dac_write(DACREG_ICS_PLLDATA, t->p << 5 | t->n); n 1043 drivers/video/fbdev/sstfb.c sst_dac_write(DACREG_ICS_PLLDATA, t->p << 5 | t->n); n 291 drivers/video/fbdev/tdfxfb.c int m, n, k, best_m, best_n, best_k, best_error; n 306 drivers/video/fbdev/tdfxfb.c for (n = max(0, n_estimated); n 307 drivers/video/fbdev/tdfxfb.c n <= min(255, n_estimated + 1); n 308 drivers/video/fbdev/tdfxfb.c n++) { n 313 drivers/video/fbdev/tdfxfb.c int f = (fref * (n + 2) / (m + 2)) >> k; n 322 drivers/video/fbdev/tdfxfb.c best_n = n; n 330 drivers/video/fbdev/tdfxfb.c n = best_n; n 333 drivers/video/fbdev/tdfxfb.c *freq_out = (fref * (n + 2) / (m + 2)) >> k; n 335 drivers/video/fbdev/tdfxfb.c return (n << 8) | (m << 2) | k; n 429 drivers/video/fbdev/tgafb.c int n, shift, base, min_diff, target; n 479 drivers/video/fbdev/tgafb.c for (n = base < 7 ? 7 : base; n < base + target && n < 449; n++) { n 480 drivers/video/fbdev/tgafb.c m = ((n + 3) / 7) - 1; n 485 drivers/video/fbdev/tgafb.c m = (n / 6) - 1; n 486 drivers/video/fbdev/tgafb.c if ((a = n % 6)) n 487 drivers/video/fbdev/tgafb.c DIFFCHECK(n); n 835 drivers/video/fbdev/tmiofb.c #define CCR_PR(n) printk(KERN_DEBUG "\t" #n " = \t%04x\n",\ n 836 drivers/video/fbdev/tmiofb.c tmio_ioread16(par->ccr + CCR_ ## n)); n 850 drivers/video/fbdev/tmiofb.c #define LCR_PR(n) printk(KERN_DEBUG "\t" #n " = \t%04x\n",\ n 851 drivers/video/fbdev/tmiofb.c tmio_ioread16(par->lcr + LCR_ ## n)); n 838 drivers/video/fbdev/tridentfb.c int m, n, k; n 847 drivers/video/fbdev/tridentfb.c n = ((m + 2) << shift) - 8; n 848 drivers/video/fbdev/tridentfb.c for (n = (n < 0 ? 0 : n); n < 122; n++) { n 849 drivers/video/fbdev/tridentfb.c fi = ((14318l * (n + 8)) / (m + 2)) >> k; n 853 drivers/video/fbdev/tridentfb.c best_n = n; n 42 drivers/video/fbdev/via/via_aux.c struct via_aux_drv *pos, *n; n 47 drivers/video/fbdev/via/via_aux.c list_for_each_entry_safe(pos, n, &bus->drivers, chain) { n 340 drivers/video/fbdev/via/viamode.c const struct fb_videomode *modes, int n, n 346 drivers/video/fbdev/via/viamode.c for (i = 0; i < n; i++) { n 253 drivers/video/fbdev/vt8623fb.c u16 m, n, r; n 257 drivers/video/fbdev/vt8623fb.c rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node); n 268 drivers/video/fbdev/vt8623fb.c vga_wseq(par->state.vgabase, 0x46, (n | (r << 6))); n 821 drivers/virt/fsl_hypervisor.c struct doorbell_isr *dbisr, *n; n 902 drivers/virt/fsl_hypervisor.c list_for_each_entry_safe(dbisr, n, &isr_list, list) { n 920 drivers/virt/fsl_hypervisor.c struct doorbell_isr *dbisr, *n; n 922 drivers/virt/fsl_hypervisor.c list_for_each_entry_safe(dbisr, n, &isr_list, list) { n 340 drivers/virtio/virtio_mmio.c struct virtqueue *vq, *n; n 342 drivers/virtio/virtio_mmio.c list_for_each_entry_safe(vq, n, &vdev->vqs, list) n 229 drivers/virtio/virtio_pci_common.c struct virtqueue *vq, *n; n 232 drivers/virtio/virtio_pci_common.c list_for_each_entry_safe(vq, n, &vdev->vqs, list) { n 427 drivers/virtio/virtio_ring.c unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx; n 481 drivers/virtio/virtio_ring.c for (n = 0; n < out_sgs; n++) { n 482 drivers/virtio/virtio_ring.c for (sg = sgs[n]; sg; sg = sg_next(sg)) { n 494 drivers/virtio/virtio_ring.c for (; n < (out_sgs + in_sgs); n++) { n 495 drivers/virtio/virtio_ring.c for (sg = sgs[n]; sg; sg = sg_next(sg)) { n 575 drivers/virtio/virtio_ring.c for (n = 0; n < total_sg; n++) { n 989 drivers/virtio/virtio_ring.c unsigned int i, n, err_idx; n 1007 drivers/virtio/virtio_ring.c for (n = 0; n < out_sgs + in_sgs; n++) { n 1008 drivers/virtio/virtio_ring.c for (sg = sgs[n]; sg; sg = sg_next(sg)) { n 1009 drivers/virtio/virtio_ring.c addr = vring_map_one_sg(vq, sg, n < out_sgs ? n 1014 drivers/virtio/virtio_ring.c desc[i].flags = cpu_to_le16(n < out_sgs ? n 1055 drivers/virtio/virtio_ring.c n = head + 1; n 1056 drivers/virtio/virtio_ring.c if (n >= vq->packed.vring.num) { n 1057 drivers/virtio/virtio_ring.c n = 0; n 1063 drivers/virtio/virtio_ring.c vq->packed.next_avail_idx = n; n 1103 drivers/virtio/virtio_ring.c unsigned int i, n, c, descs_used, err_idx; n 1146 drivers/virtio/virtio_ring.c for (n = 0; n < out_sgs + in_sgs; n++) { n 1147 drivers/virtio/virtio_ring.c for (sg = sgs[n]; sg; sg = sg_next(sg)) { n 1148 drivers/virtio/virtio_ring.c dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? n 1155 drivers/virtio/virtio_ring.c (n < out_sgs ? 0 : VRING_DESC_F_WRITE)); n 1219 drivers/virtio/virtio_ring.c for (n = 0; n < total_sg; n++) { n 1600 drivers/vme/bridges/vme_ca91cx42.c struct list_head *pos = NULL, *n; n 1798 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) { n 1805 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) { n 1812 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) { n 1819 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) { n 1845 drivers/vme/bridges/vme_ca91cx42.c struct list_head *pos = NULL, *n; n 1882 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) { n 1889 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) { n 1896 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) { n 1903 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) { n 385 drivers/vme/bridges/vme_fake.c struct list_head *pos = NULL, *n; n 391 drivers/vme/bridges/vme_fake.c list_for_each_safe(pos, n, &fake_bridge->lm_resources) { n 1067 drivers/vme/bridges/vme_fake.c struct list_head *pos = NULL, *n; n 1212 drivers/vme/bridges/vme_fake.c list_for_each_safe(pos, n, &fake_bridge->lm_resources) { n 1219 drivers/vme/bridges/vme_fake.c list_for_each_safe(pos, n, &fake_bridge->slave_resources) { n 1226 drivers/vme/bridges/vme_fake.c list_for_each_safe(pos, n, &fake_bridge->master_resources) { n 2279 drivers/vme/bridges/vme_tsi148.c struct list_head *pos = NULL, *n; n 2519 drivers/vme/bridges/vme_tsi148.c list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) { n 2526 drivers/vme/bridges/vme_tsi148.c list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) { n 2533 drivers/vme/bridges/vme_tsi148.c list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) { n 2540 drivers/vme/bridges/vme_tsi148.c list_for_each_safe(pos, n, &tsi148_bridge->master_resources) { n 371 drivers/w1/w1.c struct list_head *ent, *n; n 376 drivers/w1/w1.c list_for_each_safe(ent, n, &md->slist) { n 23 drivers/w1/w1_family.c struct list_head *ent, *n; n 28 drivers/w1/w1_family.c list_for_each_safe(ent, n, &w1_families) { n 56 drivers/w1/w1_family.c struct list_head *ent, *n; n 60 drivers/w1/w1_family.c list_for_each_safe(ent, n, &w1_families) { n 88 drivers/w1/w1_family.c struct list_head *ent, *n; n 92 drivers/w1/w1_family.c list_for_each_safe(ent, n, &w1_families) { n 169 drivers/watchdog/machzwd.c static inline void zf_set_timer(unsigned short new, unsigned char n) n 171 drivers/watchdog/machzwd.c switch (n) { n 26 drivers/watchdog/mtk_wdt.c #define WDT_LENGTH_TIMEOUT(n) ((n) << 5) n 67 drivers/watchdog/watchdog_core.c struct list_head *p, *n; n 70 drivers/watchdog/watchdog_core.c list_for_each_safe(p, n, &wtd_deferred_reg_list) { n 569 drivers/watchdog/watchdog_dev.c int n) n 27 drivers/watchdog/zx2967_wdt.c #define ZX2967_WDT_CFG_DIV(n) ((((n) & 0xff) - 1) << 8) n 114 drivers/xen/gntalloc.c struct gntalloc_gref *gref, *n; n 115 drivers/xen/gntalloc.c list_for_each_entry_safe(gref, n, &gref_list, next_gref) { n 351 drivers/xen/gntalloc.c struct gntalloc_gref *gref, *n; n 368 drivers/xen/gntalloc.c n = list_entry(gref->next_file.next, n 372 drivers/xen/gntalloc.c gref = n; n 95 drivers/xen/privcmd.c struct page *p, *n; n 97 drivers/xen/privcmd.c list_for_each_entry_safe(p, n, pages, lru) n 469 drivers/xen/pvcalls-back.c struct sock_mapping *map, *n; n 477 drivers/xen/pvcalls-back.c list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { n 975 drivers/xen/pvcalls-back.c struct sock_mapping *map, *n; n 984 drivers/xen/pvcalls-back.c list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { n 1089 drivers/xen/pvcalls-front.c struct sock_mapping *map = NULL, *n; n 1097 drivers/xen/pvcalls-front.c list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) { n 1108 drivers/xen/pvcalls-front.c list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) { n 134 drivers/xen/xen-acpi-memhotplug.c struct acpi_memory_info *info, *n; n 143 drivers/xen/xen-acpi-memhotplug.c list_for_each_entry_safe(info, n, &mem_device->res_list, list) n 214 drivers/xen/xen-scsiback.c int i = free_pages_num + num, n = num; n 219 drivers/xen/xen-scsiback.c n = min(num, i - scsiback_max_buffer_pages); n 220 drivers/xen/xen-scsiback.c gnttab_free_pages(n, page + num - n); n 221 drivers/xen/xen-scsiback.c n = num - n; n 224 drivers/xen/xen-scsiback.c for (i = 0; i < n; i++) n 226 drivers/xen/xen-scsiback.c free_pages_num += n; n 91 drivers/zorro/names.c char *n = name + sprintf(name, "%s %s", manuf_p->name, prod_p->name); n 95 drivers/zorro/names.c sprintf(n, " (#%d)", nr); n 81 fs/9p/fid.c int n = 0, i; n 86 fs/9p/fid.c n++; n 88 fs/9p/fid.c wnames = kmalloc_array(n, sizeof(char *), GFP_KERNEL); n 92 fs/9p/fid.c for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) n 96 fs/9p/fid.c return n; n 106 fs/9p/fid.c int i, n, l, clone, access; n 158 fs/9p/fid.c n = build_path_from_dentry(v9ses, dentry, &wnames); n 159 fs/9p/fid.c if (n < 0) { n 160 fs/9p/fid.c fid = ERR_PTR(n); n 165 fs/9p/fid.c while (i < n) { n 166 fs/9p/fid.c l = min(n - i, P9_MAXWELEM); n 556 fs/9p/v9fs.c ssize_t n = 0, count = 0, limit = PAGE_SIZE; n 562 fs/9p/v9fs.c n = snprintf(buf, limit, "%s\n", v9ses->cachetag); n 563 fs/9p/v9fs.c if (n < 0) { n 564 fs/9p/v9fs.c count = n; n 568 fs/9p/v9fs.c count += n; n 569 fs/9p/v9fs.c limit -= n; n 239 fs/9p/vfs_addr.c ssize_t n; n 242 fs/9p/vfs_addr.c n = p9_client_write(file->private_data, pos, iter, &err); n 243 fs/9p/vfs_addr.c if (n) { n 246 fs/9p/vfs_addr.c if (pos + n > i_size) n 247 fs/9p/vfs_addr.c inode_add_bytes(inode, pos + n - i_size); n 250 fs/9p/vfs_addr.c n = p9_client_read(file->private_data, pos, iter, &err); n 252 fs/9p/vfs_addr.c return n ? n : err; n 54 fs/9p/vfs_dentry.c struct hlist_node *p, *n; n 57 fs/9p/vfs_dentry.c hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata) n 110 fs/9p/vfs_dir.c int n; n 112 fs/9p/vfs_dir.c n = p9_client_read(file->private_data, ctx->pos, &to, n 116 fs/9p/vfs_dir.c if (n == 0) n 120 fs/9p/vfs_dir.c rdir->tail = n; n 210 fs/affs/super.c int token, n, option; n 217 fs/affs/super.c if (match_int(&args[0], &n)) n 219 fs/affs/super.c if (n != 512 && n != 1024 && n != 2048 n 220 fs/affs/super.c && n != 4096) { n 224 fs/affs/super.c *blocksize = n; n 52 fs/afs/cell.c int n, seq = 0, ret = 0; n 88 fs/afs/cell.c n = strncasecmp(cell->name, name, n 90 fs/afs/cell.c if (n == 0) n 91 fs/afs/cell.c n = cell->name_len - namesz; n 92 fs/afs/cell.c if (n < 0) { n 94 fs/afs/cell.c } else if (n > 0) { n 231 fs/afs/cell.c int ret, n; n 267 fs/afs/cell.c n = strncasecmp(cursor->name, name, n 269 fs/afs/cell.c if (n == 0) n 270 fs/afs/cell.c n = cursor->name_len - namesz; n 271 fs/afs/cell.c if (n < 0) n 273 fs/afs/cell.c else if (n > 0) n 219 fs/afs/dir.c int nr_pages, nr_inline, i, n; n 266 fs/afs/dir.c n = find_get_pages_contig(dvnode->vfs_inode.i_mapping, i, n 269 fs/afs/dir.c _debug("find %u at %u/%u", n, i, req->nr_pages); n 270 fs/afs/dir.c if (n == 0) { n 291 fs/afs/dir.c i += n; n 27 fs/afs/dir_edit.c int bit, n; n 43 fs/afs/dir_edit.c n = ffz(bitmap); n 45 fs/afs/dir_edit.c n = ((u32)bitmap) != 0 ? n 48 fs/afs/dir_edit.c bitmap >>= n; n 49 fs/afs/dir_edit.c bit += n; n 57 fs/afs/dir_edit.c n = __ffs(bitmap); n 58 fs/afs/dir_edit.c bitmap >>= n; n 59 fs/afs/dir_edit.c bit += n; n 115 fs/afs/dir_edit.c int d, len, n; n 143 fs/afs/dir_edit.c n = round_up(12 + len + 1 + 4, AFS_DIR_DIRENT_SIZE); n 144 fs/afs/dir_edit.c n /= AFS_DIR_DIRENT_SIZE; n 145 fs/afs/dir_edit.c d += n - 1; n 449 fs/afs/file.c int ret, n, i; n 456 fs/afs/file.c n = 1; n 462 fs/afs/file.c n++; n 465 fs/afs/file.c req = kzalloc(struct_size(req, array, n), GFP_NOFS); n 500 fs/afs/file.c } while (req->nr_pages < n); n 1065 fs/afs/internal.h #define afs_stat_v(vnode, n) __afs_stat(&afs_v2net(vnode)->n) n 168 fs/afs/rxrpc.c int n = atomic_dec_return(&call->usage); n 171 fs/afs/rxrpc.c trace_afs_call(call, afs_call_trace_put, n, o, n 174 fs/afs/rxrpc.c ASSERTCMP(n, >=, 0); n 175 fs/afs/rxrpc.c if (n == 0) { n 287 fs/afs/rxrpc.c unsigned int nr, n, i, to, bytes = 0; n 290 fs/afs/rxrpc.c n = find_get_pages_contig(call->mapping, first, nr, pages); n 291 fs/afs/rxrpc.c ASSERTCMP(n, ==, nr); n 887 fs/afs/rxrpc.c int n; n 902 fs/afs/rxrpc.c n = rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, len, n 904 fs/afs/rxrpc.c if (n >= 0) { n 910 fs/afs/rxrpc.c if (n == -ENOMEM) { n 59 fs/afs/vlclient.c int n = entry->nr_servers; n 66 fs/afs/vlclient.c entry->fs_mask[n] |= AFS_VOL_VTM_RW; n 68 fs/afs/vlclient.c entry->fs_mask[n] |= AFS_VOL_VTM_BAK; n 71 fs/afs/vlclient.c entry->fs_mask[n] |= AFS_VOL_VTM_RO; n 72 fs/afs/vlclient.c if (!entry->fs_mask[n]) n 76 fs/afs/vlclient.c uuid = (struct afs_uuid *)&entry->fs_server[n]; n 459 fs/afs/write.c unsigned n, offset, to, f, t; n 493 fs/afs/write.c n = final_page - start + 1; n 494 fs/afs/write.c if (n > ARRAY_SIZE(pages)) n 495 fs/afs/write.c n = ARRAY_SIZE(pages); n 496 fs/afs/write.c n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); n 497 fs/afs/write.c _debug("fgpc %u", n); n 498 fs/afs/write.c if (n == 0) n 502 fs/afs/write.c put_page(pages[--n]); n 503 fs/afs/write.c } while (n > 0); n 507 fs/afs/write.c for (loop = 0; loop < n; loop++) { n 542 fs/afs/write.c if (loop < n) { n 543 fs/afs/write.c for (; loop < n; loop++) n 636 fs/afs/write.c int ret, n; n 641 fs/afs/write.c n = find_get_pages_range_tag(mapping, &index, end, n 643 fs/afs/write.c if (!n) n 38 fs/afs/yfsclient.c static __be32 *xdr_encode_u32(__be32 *bp, u32 n) n 40 fs/afs/yfsclient.c *bp++ = htonl(n); n 44 fs/afs/yfsclient.c static __be32 *xdr_encode_u64(__be32 *bp, u64 n) n 48 fs/afs/yfsclient.c *x = u64_to_xdr(n); n 217 fs/befs/debug.c befs_block_run n = fsrun_to_cpu(sb, run); n 219 fs/befs/debug.c befs_debug(sb, "[%u, %hu, %hu]", n.allocation_group, n.start, n.len); n 16 fs/befs/endian.h fs64_to_cpu(const struct super_block *sb, fs64 n) n 19 fs/befs/endian.h return le64_to_cpu((__force __le64)n); n 21 fs/befs/endian.h return be64_to_cpu((__force __be64)n); n 25 fs/befs/endian.h cpu_to_fs64(const struct super_block *sb, u64 n) n 28 fs/befs/endian.h return (__force fs64)cpu_to_le64(n); n 30 fs/befs/endian.h return (__force fs64)cpu_to_be64(n); n 34 fs/befs/endian.h fs32_to_cpu(const struct super_block *sb, fs32 n) n 37 fs/befs/endian.h return le32_to_cpu((__force __le32)n); n 39 fs/befs/endian.h return be32_to_cpu((__force __be32)n); n 43 fs/befs/endian.h cpu_to_fs32(const struct super_block *sb, u32 n) n 46 fs/befs/endian.h return (__force fs32)cpu_to_le32(n); n 48 fs/befs/endian.h return (__force fs32)cpu_to_be32(n); n 52 fs/befs/endian.h fs16_to_cpu(const struct super_block *sb, fs16 n) n 55 fs/befs/endian.h return le16_to_cpu((__force __le16)n); n 57 fs/befs/endian.h return be16_to_cpu((__force __be16)n); n 61 fs/befs/endian.h cpu_to_fs16(const struct super_block *sb, u16 n) n 64 fs/befs/endian.h return (__force fs16)cpu_to_le16(n); n 66 fs/befs/endian.h return (__force fs16)cpu_to_be16(n); n 72 fs/befs/endian.h fsrun_to_cpu(const struct super_block *sb, befs_disk_block_run n) n 77 fs/befs/endian.h run.allocation_group = le32_to_cpu((__force __le32)n.allocation_group); n 78 fs/befs/endian.h run.start = le16_to_cpu((__force __le16)n.start); n 79 fs/befs/endian.h run.len = le16_to_cpu((__force __le16)n.len); n 81 fs/befs/endian.h run.allocation_group = be32_to_cpu((__force __be32)n.allocation_group); n 82 fs/befs/endian.h run.start = be16_to_cpu((__force __be16)n.start); n 83 fs/befs/endian.h run.len = be16_to_cpu((__force __be16)n.len); n 89 fs/befs/endian.h cpu_to_fsrun(const struct super_block *sb, befs_block_run n) n 94 fs/befs/endian.h run.allocation_group = cpu_to_le32(n.allocation_group); n 95 fs/befs/endian.h run.start = cpu_to_le16(n.start); n 96 fs/befs/endian.h run.len = cpu_to_le16(n.len); n 98 fs/befs/endian.h run.allocation_group = cpu_to_be32(n.allocation_group); n 99 fs/befs/endian.h run.start = cpu_to_be16(n.start); n 100 fs/befs/endian.h run.len = cpu_to_be16(n.len); n 106 fs/befs/endian.h fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n) n 112 fs/befs/endian.h data.direct[i] = fsrun_to_cpu(sb, n->direct[i]); n 114 fs/befs/endian.h data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range); n 115 fs/befs/endian.h data.indirect = fsrun_to_cpu(sb, n->indirect); n 116 fs/befs/endian.h data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range); n 117 fs/befs/endian.h data.double_indirect = fsrun_to_cpu(sb, n->double_indirect); n 119 fs/befs/endian.h n-> n 121 fs/befs/endian.h data.size = fs64_to_cpu(sb, n->size); n 1584 fs/binfmt_elf.c unsigned count, size, names_ofs, remaining, n; n 1627 fs/binfmt_elf.c n = (name_curpos + remaining) - filename; n 1629 fs/binfmt_elf.c memmove(name_curpos, filename, n); n 1630 fs/binfmt_elf.c name_curpos += n; n 1645 fs/binfmt_elf.c n = current->mm->map_count - count; n 1646 fs/binfmt_elf.c if (n != 0) { n 1647 fs/binfmt_elf.c unsigned shift_bytes = n * 3 * sizeof(data[0]); n 1727 fs/binfmt_elf.c for (i = 1; i < view->n; ++i) { n 1786 fs/binfmt_elf.c for (i = 0; i < view->n; ++i) n 768 fs/btrfs/backref.c struct rb_node *n; n 776 fs/btrfs/backref.c for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { n 777 fs/btrfs/backref.c node = rb_entry(n, struct btrfs_delayed_ref_node, n 195 fs/btrfs/block-group.c struct rb_node *n; n 199 fs/btrfs/block-group.c n = info->block_group_cache_tree.rb_node; n 201 fs/btrfs/block-group.c while (n) { n 202 fs/btrfs/block-group.c cache = rb_entry(n, struct btrfs_block_group_cache, n 210 fs/btrfs/block-group.c n = n->rb_left; n 216 fs/btrfs/block-group.c n = n->rb_right; n 3098 fs/btrfs/block-group.c struct rb_node *n; n 3120 fs/btrfs/block-group.c while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { n 3121 fs/btrfs/block-group.c block_group = rb_entry(n, struct btrfs_block_group_cache, n 254 fs/btrfs/check-integrity.c static void btrfsic_block_link_init(struct btrfsic_block_link *n); n 256 fs/btrfs/check-integrity.c static void btrfsic_block_link_free(struct btrfsic_block_link *n); n 1242 fs/btrfs/compression.c static inline u32 ilog2_w(u64 n) n 1244 fs/btrfs/compression.c return ilog2(n * n * n * n); n 1223 fs/btrfs/ctree.c u32 n; n 1230 fs/btrfs/ctree.c n = btrfs_header_nritems(eb); n 1240 fs/btrfs/ctree.c BUG_ON(tm->slot < n); n 1248 fs/btrfs/ctree.c n++; n 1251 fs/btrfs/ctree.c BUG_ON(tm->slot >= n); n 1259 fs/btrfs/ctree.c n--; n 1287 fs/btrfs/ctree.c btrfs_set_header_nritems(eb, n); n 1952 fs/btrfs/delayed-inode.c int i, n; n 1956 fs/btrfs/delayed-inode.c n = radix_tree_gang_lookup(&root->delayed_nodes_tree, n 1959 fs/btrfs/delayed-inode.c if (!n) { n 1964 fs/btrfs/delayed-inode.c inode_id = delayed_nodes[n - 1]->inode_id + 1; n 1965 fs/btrfs/delayed-inode.c for (i = 0; i < n; i++) { n 1975 fs/btrfs/delayed-inode.c for (i = 0; i < n; i++) { n 344 fs/btrfs/delayed-ref.c struct rb_node *n; n 347 fs/btrfs/delayed-ref.c n = rb_first_cached(&dr->href_root); n 348 fs/btrfs/delayed-ref.c if (!n) n 351 fs/btrfs/delayed-ref.c entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); n 366 fs/btrfs/delayed-ref.c struct rb_node *n; n 369 fs/btrfs/delayed-ref.c n = root->rb_node; n 371 fs/btrfs/delayed-ref.c while (n) { n 372 fs/btrfs/delayed-ref.c entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); n 375 fs/btrfs/delayed-ref.c n = n->rb_left; n 377 fs/btrfs/delayed-ref.c n = n->rb_right; n 383 fs/btrfs/delayed-ref.c n = rb_next(&entry->href_node); n 384 fs/btrfs/delayed-ref.c if (!n) n 386 fs/btrfs/delayed-ref.c entry = rb_entry(n, struct btrfs_delayed_ref_head, n 4284 fs/btrfs/disk-io.c struct rb_node *n; n 4293 fs/btrfs/disk-io.c while ((n = rb_first_cached(&head->ref_tree)) != NULL) { n 4294 fs/btrfs/disk-io.c ref = rb_entry(n, struct btrfs_delayed_ref_node, n 2081 fs/btrfs/extent-tree.c struct rb_node *n = root->rb_node; n 2087 fs/btrfs/extent-tree.c n = rb_first(root); n 2088 fs/btrfs/extent-tree.c if (n) { n 2089 fs/btrfs/extent-tree.c entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); n 2092 fs/btrfs/extent-tree.c n = rb_last(root); n 2093 fs/btrfs/extent-tree.c if (n) { n 2094 fs/btrfs/extent-tree.c entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); n 2097 fs/btrfs/extent-tree.c n = root->rb_node; n 2099 fs/btrfs/extent-tree.c while (n) { n 2100 fs/btrfs/extent-tree.c entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); n 2106 fs/btrfs/extent-tree.c n = n->rb_left; n 2108 fs/btrfs/extent-tree.c n = n->rb_right; n 387 fs/btrfs/extent_io.c struct rb_node **n = &root->rb_node; n 393 fs/btrfs/extent_io.c while (*n) { n 394 fs/btrfs/extent_io.c prev = *n; n 399 fs/btrfs/extent_io.c n = &(*n)->rb_left; n 401 fs/btrfs/extent_io.c n = &(*n)->rb_right; n 403 fs/btrfs/extent_io.c return *n; n 407 fs/btrfs/extent_io.c *p_ret = n; n 149 fs/btrfs/extent_map.c struct rb_node *n = root->rb_node; n 155 fs/btrfs/extent_map.c while (n) { n 156 fs/btrfs/extent_map.c entry = rb_entry(n, struct extent_map, rb_node); n 157 fs/btrfs/extent_map.c prev = n; n 161 fs/btrfs/extent_map.c n = n->rb_left; n 163 fs/btrfs/extent_map.c n = n->rb_right; n 165 fs/btrfs/extent_map.c return n; n 640 fs/btrfs/free-space-cache.c struct rb_node *n; n 644 fs/btrfs/free-space-cache.c for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { n 645 fs/btrfs/free-space-cache.c e = rb_entry(n, struct btrfs_free_space, offset_index); n 675 fs/btrfs/free-space-cache.c struct btrfs_free_space *e, *n; n 804 fs/btrfs/free-space-cache.c list_for_each_entry_safe(e, n, &bitmaps, list) { n 1511 fs/btrfs/free-space-cache.c struct rb_node *n = ctl->free_space_offset.rb_node; n 1516 fs/btrfs/free-space-cache.c if (!n) { n 1521 fs/btrfs/free-space-cache.c entry = rb_entry(n, struct btrfs_free_space, offset_index); n 1525 fs/btrfs/free-space-cache.c n = n->rb_left; n 1527 fs/btrfs/free-space-cache.c n = n->rb_right; n 1542 fs/btrfs/free-space-cache.c n = rb_next(n); n 1543 fs/btrfs/free-space-cache.c if (!n) n 1545 fs/btrfs/free-space-cache.c entry = rb_entry(n, struct btrfs_free_space, offset_index); n 1557 fs/btrfs/free-space-cache.c n = rb_prev(&entry->offset_index); n 1558 fs/btrfs/free-space-cache.c if (n) { n 1559 fs/btrfs/free-space-cache.c prev = rb_entry(n, struct btrfs_free_space, n 1575 fs/btrfs/free-space-cache.c n = rb_prev(&entry->offset_index); n 1576 fs/btrfs/free-space-cache.c if (n) { n 1577 fs/btrfs/free-space-cache.c entry = rb_entry(n, struct btrfs_free_space, n 1589 fs/btrfs/free-space-cache.c n = rb_prev(&entry->offset_index); n 1590 fs/btrfs/free-space-cache.c if (n) { n 1591 fs/btrfs/free-space-cache.c prev = rb_entry(n, struct btrfs_free_space, n 1615 fs/btrfs/free-space-cache.c n = rb_next(&entry->offset_index); n 1616 fs/btrfs/free-space-cache.c if (!n) n 1618 fs/btrfs/free-space-cache.c entry = rb_entry(n, struct btrfs_free_space, offset_index); n 2493 fs/btrfs/free-space-cache.c struct rb_node *n; n 2497 fs/btrfs/free-space-cache.c for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { n 2498 fs/btrfs/free-space-cache.c info = rb_entry(n, struct btrfs_free_space, offset_index); n 3686 fs/btrfs/free-space-cache.c struct rb_node *n; n 3703 fs/btrfs/free-space-cache.c n = rb_prev(&info->offset_index); n 3704 fs/btrfs/free-space-cache.c while (n) { n 3705 fs/btrfs/free-space-cache.c tmp = rb_entry(n, struct btrfs_free_space, n 3710 fs/btrfs/free-space-cache.c n = rb_prev(&tmp->offset_index); n 3717 fs/btrfs/free-space-cache.c n = rb_next(&info->offset_index); n 3718 fs/btrfs/free-space-cache.c while (n) { n 3719 fs/btrfs/free-space-cache.c tmp = rb_entry(n, struct btrfs_free_space, n 3724 fs/btrfs/free-space-cache.c n = rb_next(&tmp->offset_index); n 257 fs/btrfs/inode-map.c struct rb_node *n; n 265 fs/btrfs/inode-map.c n = rb_first(rbroot); n 266 fs/btrfs/inode-map.c if (!n) { n 271 fs/btrfs/inode-map.c info = rb_entry(n, struct btrfs_free_space, offset_index); n 299 fs/btrfs/inode-map.c struct rb_node *n; n 303 fs/btrfs/inode-map.c n = rb_last(&ctl->free_space_offset); n 304 fs/btrfs/inode-map.c if (!n) { n 308 fs/btrfs/inode-map.c info = rb_entry(n, struct btrfs_free_space, offset_index); n 70 fs/btrfs/ordered-data.c struct rb_node *n = root->rb_node; n 76 fs/btrfs/ordered-data.c while (n) { n 77 fs/btrfs/ordered-data.c entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); n 78 fs/btrfs/ordered-data.c prev = n; n 82 fs/btrfs/ordered-data.c n = n->rb_left; n 84 fs/btrfs/ordered-data.c n = n->rb_right; n 86 fs/btrfs/ordered-data.c return n; n 159 fs/btrfs/qgroup.c static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) n 161 fs/btrfs/qgroup.c return (struct btrfs_qgroup *)(uintptr_t)n->aux; n 173 fs/btrfs/qgroup.c struct rb_node *n = fs_info->qgroup_tree.rb_node; n 176 fs/btrfs/qgroup.c while (n) { n 177 fs/btrfs/qgroup.c qgroup = rb_entry(n, struct btrfs_qgroup, node); n 179 fs/btrfs/qgroup.c n = n->rb_left; n 181 fs/btrfs/qgroup.c n = n->rb_right; n 516 fs/btrfs/qgroup.c struct rb_node *n; n 519 fs/btrfs/qgroup.c while ((n = rb_first(&fs_info->qgroup_tree))) { n 520 fs/btrfs/qgroup.c qgroup = rb_entry(n, struct btrfs_qgroup, node); n 521 fs/btrfs/qgroup.c rb_erase(n, &fs_info->qgroup_tree); n 3294 fs/btrfs/qgroup.c struct rb_node *n; n 3299 fs/btrfs/qgroup.c for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { n 3300 fs/btrfs/qgroup.c qgroup = rb_entry(n, struct btrfs_qgroup, node); n 100 fs/btrfs/ref-verify.c struct rb_node *n; n 103 fs/btrfs/ref-verify.c n = root->rb_node; n 104 fs/btrfs/ref-verify.c while (n) { n 105 fs/btrfs/ref-verify.c entry = rb_entry(n, struct block_entry, node); n 107 fs/btrfs/ref-verify.c n = n->rb_right; n 109 fs/btrfs/ref-verify.c n = n->rb_left; n 189 fs/btrfs/ref-verify.c struct rb_node *n; n 192 fs/btrfs/ref-verify.c n = root->rb_node; n 193 fs/btrfs/ref-verify.c while (n) { n 194 fs/btrfs/ref-verify.c entry = rb_entry(n, struct root_entry, node); n 196 fs/btrfs/ref-verify.c n = n->rb_right; n 198 fs/btrfs/ref-verify.c n = n->rb_left; n 237 fs/btrfs/ref-verify.c struct rb_node *n; n 239 fs/btrfs/ref-verify.c while ((n = rb_first(&be->roots))) { n 240 fs/btrfs/ref-verify.c re = rb_entry(n, struct root_entry, node); n 245 fs/btrfs/ref-verify.c while((n = rb_first(&be->refs))) { n 246 fs/btrfs/ref-verify.c ref = rb_entry(n, struct ref_entry, node); n 636 fs/btrfs/ref-verify.c struct rb_node *n; n 643 fs/btrfs/ref-verify.c for (n = rb_first(&be->refs); n; n = rb_next(n)) { n 644 fs/btrfs/ref-verify.c ref = rb_entry(n, struct ref_entry, node); n 651 fs/btrfs/ref-verify.c for (n = rb_first(&be->roots); n; n = rb_next(n)) { n 652 fs/btrfs/ref-verify.c re = rb_entry(n, struct root_entry, node); n 897 fs/btrfs/ref-verify.c struct rb_node *n; n 903 fs/btrfs/ref-verify.c while ((n = rb_first(&fs_info->block_tree))) { n 904 fs/btrfs/ref-verify.c be = rb_entry(n, struct block_entry, node); n 916 fs/btrfs/ref-verify.c struct rb_node *n; n 922 fs/btrfs/ref-verify.c n = fs_info->block_tree.rb_node; n 923 fs/btrfs/ref-verify.c while (n) { n 924 fs/btrfs/ref-verify.c entry = rb_entry(n, struct block_entry, node); n 926 fs/btrfs/ref-verify.c n = n->rb_right; n 928 fs/btrfs/ref-verify.c n = n->rb_left; n 949 fs/btrfs/ref-verify.c n = &be->node; n 950 fs/btrfs/ref-verify.c while (n) { n 951 fs/btrfs/ref-verify.c be = rb_entry(n, struct block_entry, node); n 952 fs/btrfs/ref-verify.c n = rb_next(n); n 308 fs/btrfs/relocation.c struct rb_node *n = root->rb_node; n 311 fs/btrfs/relocation.c while (n) { n 312 fs/btrfs/relocation.c entry = rb_entry(n, struct tree_entry, rb_node); n 315 fs/btrfs/relocation.c n = n->rb_left; n 317 fs/btrfs/relocation.c n = n->rb_right; n 319 fs/btrfs/relocation.c return n; n 2892 fs/btrfs/send.c struct rb_node *n = sctx->orphan_dirs.rb_node; n 2895 fs/btrfs/send.c while (n) { n 2896 fs/btrfs/send.c entry = rb_entry(n, struct orphan_dir_info, node); n 2898 fs/btrfs/send.c n = n->rb_left; n 2900 fs/btrfs/send.c n = n->rb_right; n 3061 fs/btrfs/send.c struct rb_node *n = sctx->waiting_dir_moves.rb_node; n 3064 fs/btrfs/send.c while (n) { n 3065 fs/btrfs/send.c entry = rb_entry(n, struct waiting_dir_move, node); n 3067 fs/btrfs/send.c n = n->rb_left; n 3069 fs/btrfs/send.c n = n->rb_right; n 3156 fs/btrfs/send.c struct rb_node *n = sctx->pending_dir_moves.rb_node; n 3159 fs/btrfs/send.c while (n) { n 3160 fs/btrfs/send.c entry = rb_entry(n, struct pending_dir_move, node); n 3162 fs/btrfs/send.c n = n->rb_left; n 3164 fs/btrfs/send.c n = n->rb_right; n 7309 fs/btrfs/send.c struct rb_node *n; n 7312 fs/btrfs/send.c n = rb_first(&sctx->pending_dir_moves); n 7313 fs/btrfs/send.c pm = rb_entry(n, struct pending_dir_move, node); n 7326 fs/btrfs/send.c struct rb_node *n; n 7329 fs/btrfs/send.c n = rb_first(&sctx->waiting_dir_moves); n 7330 fs/btrfs/send.c dm = rb_entry(n, struct waiting_dir_move, node); n 7337 fs/btrfs/send.c struct rb_node *n; n 7340 fs/btrfs/send.c n = rb_first(&sctx->orphan_dirs); n 7341 fs/btrfs/send.c odi = rb_entry(n, struct orphan_dir_info, node); n 4375 fs/btrfs/tree-log.c struct extent_map *em, *n; n 4387 fs/btrfs/tree-log.c list_for_each_entry_safe(em, n, &tree->modified_extents, list) { n 5340 fs/btrfs/tree-log.c struct extent_map *em, *n; n 5361 fs/btrfs/tree-log.c list_for_each_entry_safe(em, n, &em_tree->modified_extents, n 120 fs/btrfs/ulist.c struct rb_node *n = ulist->root.rb_node; n 123 fs/btrfs/ulist.c while (n) { n 124 fs/btrfs/ulist.c u = rb_entry(n, struct ulist_node, rb_node); n 126 fs/btrfs/ulist.c n = n->rb_right; n 128 fs/btrfs/ulist.c n = n->rb_left; n 1866 fs/btrfs/volumes.c struct rb_node *n; n 1871 fs/btrfs/volumes.c n = rb_last(&em_tree->map.rb_root); n 1872 fs/btrfs/volumes.c if (n) { n 1873 fs/btrfs/volumes.c em = rb_entry(n, struct extent_map, rb_node); n 384 fs/btrfs/volumes.h #define map_lookup_size(n) (sizeof(struct map_lookup) + \ n 385 fs/btrfs/volumes.h (sizeof(struct btrfs_bio_stripe) * (n))) n 164 fs/cachefiles/daemon.c int n; n 179 fs/cachefiles/daemon.c n = snprintf(buffer, sizeof(buffer), n 199 fs/cachefiles/daemon.c if (n > buflen) n 202 fs/cachefiles/daemon.c if (copy_to_user(_buffer, buffer, n) != 0) n 205 fs/cachefiles/daemon.c return n; n 1006 fs/ceph/addr.c unsigned j, n = 0; n 1011 fs/ceph/addr.c if (n < j) n 1012 fs/ceph/addr.c pvec.pages[n] = pvec.pages[j]; n 1013 fs/ceph/addr.c n++; n 1015 fs/ceph/addr.c pvec.nr = n; n 2056 fs/ceph/addr.c struct rb_node *n; n 2059 fs/ceph/addr.c n = rb_first(&mdsc->pool_perm_tree); n 2060 fs/ceph/addr.c perm = rb_entry(n, struct ceph_pool_perm, node); n 2061 fs/ceph/addr.c rb_erase(n, &mdsc->pool_perm_tree); n 436 fs/ceph/caps.c struct rb_node *n = ci->i_caps.rb_node; n 438 fs/ceph/caps.c while (n) { n 439 fs/ceph/caps.c cap = rb_entry(n, struct ceph_cap, ci_node); n 441 fs/ceph/caps.c n = n->rb_left; n 443 fs/ceph/caps.c n = n->rb_right; n 1169 fs/ceph/file.c int n; n 1196 fs/ceph/file.c for (n = 0; n < num_pages; n++) { n 1198 fs/ceph/file.c ret = copy_page_from_iter(pages[n], 0, plen, from); n 158 fs/ceph/inode.c struct rb_node *n = ci->i_fragtree.rb_node; n 160 fs/ceph/inode.c while (n) { n 162 fs/ceph/inode.c rb_entry(n, struct ceph_inode_frag, node); n 165 fs/ceph/inode.c n = n->rb_left; n 167 fs/ceph/inode.c n = n->rb_right; n 185 fs/ceph/inode.c u32 n; n 208 fs/ceph/inode.c n = ceph_frag_make_child(t, frag->split_by, i); n 209 fs/ceph/inode.c if (ceph_frag_contains_value(n, v)) { n 210 fs/ceph/inode.c t = n; n 535 fs/ceph/inode.c struct rb_node *n; n 573 fs/ceph/inode.c while ((n = rb_first(&ci->i_fragtree)) != NULL) { n 574 fs/ceph/inode.c frag = rb_entry(n, struct ceph_inode_frag, node); n 575 fs/ceph/inode.c rb_erase(n, &ci->i_fragtree); n 3042 fs/ceph/mds_client.c u32 n; n 3044 fs/ceph/mds_client.c ceph_decode_32_safe(p, end, n, bad); n 3045 fs/ceph/mds_client.c while (n-- > 0) { n 4257 fs/ceph/mds_client.c struct rb_node *n; n 4265 fs/ceph/mds_client.c n = rb_next(&req->r_node); n 4266 fs/ceph/mds_client.c if (n) n 4267 fs/ceph/mds_client.c nextreq = rb_entry(n, struct ceph_mds_request, r_node); n 22 fs/ceph/mdsmap.c int n = 0; n 32 fs/ceph/mdsmap.c n++; n 33 fs/ceph/mdsmap.c if (n == 0) n 37 fs/ceph/mdsmap.c n = prandom_u32() % n; n 38 fs/ceph/mdsmap.c for (i = 0; n > 0; i++, n--) n 54 fs/ceph/mdsmap.c u32 n; \ n 56 fs/ceph/mdsmap.c ceph_decode_32_safe(p, end, n, bad); \ n 57 fs/ceph/mdsmap.c need = sizeof(type) * n; \ n 64 fs/ceph/mdsmap.c u32 n; \ n 66 fs/ceph/mdsmap.c ceph_decode_32_safe(p, end, n, bad); \ n 67 fs/ceph/mdsmap.c need = (sizeof(ktype) + sizeof(vtype)) * n; \ n 78 fs/ceph/mdsmap.c u32 n; n 83 fs/ceph/mdsmap.c n = ceph_decode_32(p); n 84 fs/ceph/mdsmap.c while (n-- > 0) { n 109 fs/ceph/mdsmap.c int i, j, n; n 145 fs/ceph/mdsmap.c n = ceph_decode_32(p); n 146 fs/ceph/mdsmap.c for (i = 0; i < n; i++) { n 208 fs/ceph/mdsmap.c i+1, n, global_id, mds, inc, n 255 fs/ceph/mdsmap.c ceph_decode_32_safe(p, end, n, bad); n 256 fs/ceph/mdsmap.c m->m_num_data_pg_pools = n; n 257 fs/ceph/mdsmap.c m->m_data_pg_pools = kcalloc(n, sizeof(u64), GFP_NOFS); n 260 fs/ceph/mdsmap.c ceph_decode_need(p, end, sizeof(u64)*(n+1), bad); n 261 fs/ceph/mdsmap.c for (i = 0; i < n; i++) n 289 fs/ceph/mdsmap.c ceph_decode_32_safe(p, end, n, bad_ext); n 290 fs/ceph/mdsmap.c ceph_decode_need(p, end, sizeof(u32) * n, bad_ext); n 292 fs/ceph/mdsmap.c for (i = 0; i < n; i++) { n 301 fs/ceph/mdsmap.c if (n > m->m_num_mds) { n 303 fs/ceph/mdsmap.c n * sizeof(*m->m_info), n 309 fs/ceph/mdsmap.c m->m_num_mds = n; n 346 fs/ceph/mdsmap.c ceph_decode_32_safe(p, end, n, bad_ext); n 347 fs/ceph/mdsmap.c need = sizeof(u32) * n; n 350 fs/ceph/mdsmap.c m->m_damaged = n > 0; n 143 fs/ceph/snap.c struct rb_node *n = mdsc->snap_realms.rb_node; n 146 fs/ceph/snap.c while (n) { n 147 fs/ceph/snap.c r = rb_entry(n, struct ceph_snap_realm, node); n 149 fs/ceph/snap.c n = n->rb_left; n 151 fs/ceph/snap.c n = n->rb_right; n 441 fs/ceph/snap.c struct ceph_snap_context *n) n 443 fs/ceph/snap.c if (n->num_snaps == 0) n 446 fs/ceph/snap.c return n->snaps[0] > o->seq; n 204 fs/char_dev.c dev_t n, next; n 206 fs/char_dev.c for (n = from; n < to; n = next) { n 207 fs/char_dev.c next = MKDEV(MAJOR(n)+1, 0); n 210 fs/char_dev.c cd = __register_chrdev_region(MAJOR(n), MINOR(n), n 211 fs/char_dev.c next - n, name); n 217 fs/char_dev.c to = n; n 218 fs/char_dev.c for (n = from; n < to; n = next) { n 219 fs/char_dev.c next = MKDEV(MAJOR(n)+1, 0); n 220 fs/char_dev.c kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); n 314 fs/char_dev.c dev_t n, next; n 316 fs/char_dev.c for (n = from; n < to; n = next) { n 317 fs/char_dev.c next = MKDEV(MAJOR(n)+1, 0); n 320 fs/char_dev.c kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); n 210 fs/cifs/cifs_unicode.h UniStrncat(wchar_t *ucs1, const wchar_t *ucs2, size_t n) n 216 fs/cifs/cifs_unicode.h while (n-- && (*ucs1 = *ucs2)) { /* copy s2 after s1 */ n 228 fs/cifs/cifs_unicode.h UniStrncmp(const wchar_t *ucs1, const wchar_t *ucs2, size_t n) n 230 fs/cifs/cifs_unicode.h if (!n) n 232 fs/cifs/cifs_unicode.h while ((*ucs1 == *ucs2) && *ucs1 && --n) { n 243 fs/cifs/cifs_unicode.h UniStrncmp_le(const wchar_t *ucs1, const wchar_t *ucs2, size_t n) n 245 fs/cifs/cifs_unicode.h if (!n) n 247 fs/cifs/cifs_unicode.h while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) { n 258 fs/cifs/cifs_unicode.h UniStrncpy(wchar_t *ucs1, const wchar_t *ucs2, size_t n) n 262 fs/cifs/cifs_unicode.h while (n-- && *ucs2) /* Copy the strings */ n 265 fs/cifs/cifs_unicode.h n++; n 266 fs/cifs/cifs_unicode.h while (n--) /* Pad with nulls */ n 275 fs/cifs/cifs_unicode.h UniStrncpy_le(wchar_t *ucs1, const wchar_t *ucs2, size_t n) n 279 fs/cifs/cifs_unicode.h while (n-- && *ucs2) /* Copy the strings */ n 282 fs/cifs/cifs_unicode.h n++; n 283 fs/cifs/cifs_unicode.h while (n--) /* Pad with nulls */ n 65 fs/cifs/cifsroot.c int n = snprintf(root_opts, n 68 fs/cifs/cifsroot.c if (n >= sizeof(root_opts)) { n 113 fs/cifs/dfs_cache.c struct dfs_cache_tgt *t, *n; n 115 fs/cifs/dfs_cache.c list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) { n 277 fs/cifs/dfs_cache.c #define dump_refs(r, n) n 3421 fs/cifs/file.c size_t n; n 3438 fs/cifs/file.c n = len; n 3441 fs/cifs/file.c n = segment_size; n 3444 fs/cifs/file.c len -= n; n 3448 fs/cifs/file.c page, page_offset, n, iter); n 3451 fs/cifs/file.c result = n; n 3455 fs/cifs/file.c server, page, page_offset, n); n 4146 fs/cifs/file.c size_t n; n 4153 fs/cifs/file.c n = to_read; n 4160 fs/cifs/file.c n = rdata->tailsz = len; n 4192 fs/cifs/file.c page, page_offset, n, iter); n 4195 fs/cifs/file.c result = n; n 4199 fs/cifs/file.c server, page, page_offset, n); n 3900 fs/cifs/smb2ops.c size_t n; n 3902 fs/cifs/smb2ops.c n = len; n 3905 fs/cifs/smb2ops.c n = PAGE_SIZE; n 3906 fs/cifs/smb2ops.c len -= n; n 3911 fs/cifs/smb2ops.c length = cifs_read_page_from_socket(server, page, 0, n); n 727 fs/configfs/dir.c struct config_group *g, *n; n 729 fs/configfs/dir.c list_for_each_entry_safe(g, n, &group->default_groups, group_entry) { n 1705 fs/configfs/dir.c loff_t n = file->f_pos - 2; n 1710 fs/configfs/dir.c while (n && p != &sd->s_children) { n 1715 fs/configfs/dir.c n--; n 831 fs/coredump.c ssize_t n; n 837 fs/coredump.c n = __kernel_write(file, addr, nr, &pos); n 838 fs/coredump.c if (n <= 0) n 841 fs/coredump.c cprm->written += n; n 842 fs/coredump.c cprm->pos += n; n 843 fs/coredump.c nr -= n; n 401 fs/crypto/crypto.c struct fscrypt_ctx *pos, *n; n 403 fs/crypto/crypto.c list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) n 388 fs/crypto/fname.c const struct fscrypt_digested_name *n = n 390 fs/crypto/fname.c fname->hash = n->hash; n 391 fs/crypto/fname.c fname->minor_hash = n->minor_hash; n 2486 fs/dcache.c unsigned n = dir->i_dir_seq; n 2487 fs/dcache.c if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) n 2488 fs/dcache.c return n; n 2493 fs/dcache.c static inline void end_dir_add(struct inode *dir, unsigned n) n 2495 fs/dcache.c smp_store_release(&dir->i_dir_seq, n + 2); n 2642 fs/dcache.c unsigned n; n 2646 fs/dcache.c n = start_dir_add(dir); n 2659 fs/dcache.c end_dir_add(dir, n); n 2807 fs/dcache.c unsigned n; n 2835 fs/dcache.c n = start_dir_add(dir); n 2872 fs/dcache.c end_dir_add(dir, n); n 426 fs/dlm/debug_fs.c loff_t n = *pos; n 430 fs/dlm/debug_fs.c bucket = n >> 32; n 431 fs/dlm/debug_fs.c entry = n & ((1LL << 32) - 1); n 439 fs/dlm/debug_fs.c if (n == 0) n 472 fs/dlm/debug_fs.c n &= ~((1LL << 32) - 1); n 476 fs/dlm/debug_fs.c n += 1LL << 32; n 492 fs/dlm/debug_fs.c *pos = n; n 506 fs/dlm/debug_fs.c loff_t n = *pos; n 510 fs/dlm/debug_fs.c bucket = n >> 32; n 537 fs/dlm/debug_fs.c n &= ~((1LL << 32) - 1); n 541 fs/dlm/debug_fs.c n += 1LL << 32; n 557 fs/dlm/debug_fs.c *pos = n; n 1085 fs/dlm/lock.c struct rb_node *n; n 1091 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { n 1092 fs/dlm/lock.c r = rb_entry(n, struct dlm_rsb, res_hashnode); n 1650 fs/dlm/lock.c struct rb_node *n, *next; n 1667 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { n 1668 fs/dlm/lock.c next = rb_next(n); n 1669 fs/dlm/lock.c r = rb_entry(n, struct dlm_rsb, res_hashnode); n 5473 fs/dlm/lock.c struct rb_node *n; n 5477 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { n 5478 fs/dlm/lock.c r = rb_entry(n, struct dlm_rsb, res_hashnode); n 38 fs/dlm/lockspace.c int n; n 39 fs/dlm/lockspace.c int rc = kstrtoint(buf, 0, &n); n 47 fs/dlm/lockspace.c switch (n) { n 768 fs/dlm/lockspace.c struct rb_node *n; n 822 fs/dlm/lockspace.c while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) { n 823 fs/dlm/lockspace.c rsb = rb_entry(n, struct dlm_rsb, res_hashnode); n 824 fs/dlm/lockspace.c rb_erase(n, &ls->ls_rsbtbl[i].keep); n 828 fs/dlm/lockspace.c while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) { n 829 fs/dlm/lockspace.c rsb = rb_entry(n, struct dlm_rsb, res_hashnode); n 830 fs/dlm/lockspace.c rb_erase(n, &ls->ls_rsbtbl[i].toss); n 73 fs/dlm/lowcomms.c static void cbuf_add(struct cbuf *cb, int n) n 75 fs/dlm/lowcomms.c cb->len += n; n 89 fs/dlm/lowcomms.c static void cbuf_eat(struct cbuf *cb, int n) n 91 fs/dlm/lowcomms.c cb->len -= n; n 92 fs/dlm/lowcomms.c cb->base += n; n 238 fs/dlm/lowcomms.c struct hlist_node *n; n 242 fs/dlm/lowcomms.c hlist_for_each_entry_safe(con, n, &connection_hash[i], list) n 1691 fs/dlm/lowcomms.c struct hlist_node *n; n 1706 fs/dlm/lowcomms.c hlist_for_each_entry_safe(con, n, n 895 fs/dlm/recover.c struct rb_node *n; n 908 fs/dlm/recover.c for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { n 909 fs/dlm/recover.c r = rb_entry(n, struct dlm_rsb, res_hashnode); n 937 fs/dlm/recover.c struct rb_node *n, *next; n 944 fs/dlm/recover.c for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { n 945 fs/dlm/recover.c next = rb_next(n); n 946 fs/dlm/recover.c r = rb_entry(n, struct dlm_rsb, res_hashnode); n 947 fs/dlm/recover.c rb_erase(n, &ls->ls_rsbtbl[i].toss); n 427 fs/ecryptfs/messaging.c struct hlist_node *n; n 434 fs/ecryptfs/messaging.c hlist_for_each_entry_safe(daemon, n, n 17 fs/erofs/tagptr.h #define __MAKE_TAGPTR(n) \ n 20 fs/erofs/tagptr.h } tagptr##n##_t; n 36 fs/erofs/tagptr.h #define __tagptr_mask_1(ptr, n) \ n 37 fs/erofs/tagptr.h __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \ n 38 fs/erofs/tagptr.h (1UL << (n)) - 1 : n 82 fs/erofs/tagptr.h typeof(_n) n = (_n); \ n 83 fs/erofs/tagptr.h (void)(&o == &n); \ n 85 fs/erofs/tagptr.h tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); }) n 61 fs/eventfd.c __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) n 78 fs/eventfd.c if (ULLONG_MAX - ctx->count < n) n 79 fs/eventfd.c n = ULLONG_MAX - ctx->count; n 80 fs/eventfd.c ctx->count += n; n 86 fs/eventfd.c return n; n 23 fs/ext2/acl.c int n, count; n 42 fs/ext2/acl.c for (n=0; n < count; n++) { n 47 fs/ext2/acl.c acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag); n 48 fs/ext2/acl.c acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm); n 49 fs/ext2/acl.c switch(acl->a_entries[n].e_tag) { n 62 fs/ext2/acl.c acl->a_entries[n].e_uid = n 70 fs/ext2/acl.c acl->a_entries[n].e_gid = n 96 fs/ext2/acl.c size_t n; n 105 fs/ext2/acl.c for (n=0; n < acl->a_count; n++) { n 106 fs/ext2/acl.c const struct posix_acl_entry *acl_e = &acl->a_entries[n]; n 204 fs/ext2/balloc.c struct rb_node *n; n 209 fs/ext2/balloc.c n = rb_first(root); n 214 fs/ext2/balloc.c while (n) { n 215 fs/ext2/balloc.c rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); n 237 fs/ext2/balloc.c n = rb_next(n); n 295 fs/ext2/balloc.c struct rb_node *n = root->rb_node; n 298 fs/ext2/balloc.c if (!n) n 302 fs/ext2/balloc.c rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); n 305 fs/ext2/balloc.c n = n->rb_left; n 307 fs/ext2/balloc.c n = n->rb_right; n 310 fs/ext2/balloc.c } while (n); n 318 fs/ext2/balloc.c n = rb_prev(&rsv->rsv_node); n 319 fs/ext2/balloc.c rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); n 199 fs/ext2/dir.c static struct page * ext2_get_page(struct inode *dir, unsigned long n, n 203 fs/ext2/dir.c struct page *page = read_mapping_page(mapping, n, NULL); n 270 fs/ext2/dir.c unsigned long n = pos >> PAGE_SHIFT; n 282 fs/ext2/dir.c for ( ; n < npages; n++, offset = 0) { n 285 fs/ext2/dir.c struct page *page = ext2_get_page(inode, n, 0); n 298 fs/ext2/dir.c ctx->pos = (n<<PAGE_SHIFT) + offset; n 304 fs/ext2/dir.c limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1); n 346 fs/ext2/dir.c unsigned long start, n; n 362 fs/ext2/dir.c n = start; n 365 fs/ext2/dir.c page = ext2_get_page(dir, n, dir_has_error); n 369 fs/ext2/dir.c kaddr += ext2_last_byte(dir, n) - reclen; n 385 fs/ext2/dir.c if (++n >= npages) n 386 fs/ext2/dir.c n = 0; n 388 fs/ext2/dir.c if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { n 395 fs/ext2/dir.c } while (n != start); n 401 fs/ext2/dir.c ei->i_dir_start_lookup = n; n 472 fs/ext2/dir.c unsigned long n; n 482 fs/ext2/dir.c for (n = 0; n <= npages; n++) { n 485 fs/ext2/dir.c page = ext2_get_page(dir, n, 0); n 491 fs/ext2/dir.c dir_end = kaddr + ext2_last_byte(dir, n); n 171 fs/ext2/inode.c int n = 0; n 178 fs/ext2/inode.c offsets[n++] = i_block; n 181 fs/ext2/inode.c offsets[n++] = EXT2_IND_BLOCK; n 182 fs/ext2/inode.c offsets[n++] = i_block; n 185 fs/ext2/inode.c offsets[n++] = EXT2_DIND_BLOCK; n 186 fs/ext2/inode.c offsets[n++] = i_block >> ptrs_bits; n 187 fs/ext2/inode.c offsets[n++] = i_block & (ptrs - 1); n 190 fs/ext2/inode.c offsets[n++] = EXT2_TIND_BLOCK; n 191 fs/ext2/inode.c offsets[n++] = i_block >> (ptrs_bits * 2); n 192 fs/ext2/inode.c offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); n 193 fs/ext2/inode.c offsets[n++] = i_block & (ptrs - 1); n 202 fs/ext2/inode.c return n; n 483 fs/ext2/inode.c int i, n = 0; n 499 fs/ext2/inode.c for (n = 1; n <= indirect_blks; n++) { n 505 fs/ext2/inode.c bh = sb_getblk(inode->i_sb, new_blocks[n-1]); n 510 fs/ext2/inode.c branch[n].bh = bh; n 513 fs/ext2/inode.c branch[n].p = (__le32 *) bh->b_data + offsets[n]; n 514 fs/ext2/inode.c branch[n].key = cpu_to_le32(new_blocks[n]); n 515 fs/ext2/inode.c *branch[n].p = branch[n].key; n 516 fs/ext2/inode.c if ( n == indirect_blks) { n 517 fs/ext2/inode.c current_block = new_blocks[n]; n 524 fs/ext2/inode.c *(branch[n].p + i) = cpu_to_le32(++current_block); n 540 fs/ext2/inode.c for (i = 1; i < n; i++) n 1192 fs/ext2/inode.c int n; n 1202 fs/ext2/inode.c n = ext2_block_to_path(inode, iblock, offsets, NULL); n 1203 fs/ext2/inode.c if (n == 0) n 1212 fs/ext2/inode.c if (n == 1) { n 1218 fs/ext2/inode.c partial = ext2_find_shared(inode, n, offsets, chain, &nr); n 1225 fs/ext2/inode.c ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); n 1232 fs/ext2/inode.c (chain+n-1) - partial); n 1410 fs/ext2/inode.c int n; n 1490 fs/ext2/inode.c for (n = 0; n < EXT2_N_BLOCKS; n++) n 1491 fs/ext2/inode.c ei->i_data[n] = raw_inode->i_block[n]; n 1544 fs/ext2/inode.c int n; n 1623 fs/ext2/inode.c } else for (n = 0; n < EXT2_N_BLOCKS; n++) n 1624 fs/ext2/inode.c raw_inode->i_block[n] = ei->i_data[n]; n 994 fs/ext2/xattr.c int n; n 996 fs/ext2/xattr.c for (n=0; n < entry->e_name_len; n++) { n 1005 fs/ext2/xattr.c for (n = (le32_to_cpu(entry->e_value_size) + n 1006 fs/ext2/xattr.c EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { n 21 fs/ext4/acl.c int n, count; n 40 fs/ext4/acl.c for (n = 0; n < count; n++) { n 45 fs/ext4/acl.c acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag); n 46 fs/ext4/acl.c acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm); n 48 fs/ext4/acl.c switch (acl->a_entries[n].e_tag) { n 61 fs/ext4/acl.c acl->a_entries[n].e_uid = n 69 fs/ext4/acl.c acl->a_entries[n].e_gid = n 95 fs/ext4/acl.c size_t n; n 104 fs/ext4/acl.c for (n = 0; n < acl->a_count; n++) { n 105 fs/ext4/acl.c const struct posix_acl_entry *acl_e = &acl->a_entries[n]; n 55 fs/ext4/block_validity.c struct ext4_system_zone *entry, *n; n 57 fs/ext4/block_validity.c rbtree_postorder_for_each_entry_safe(entry, n, n 72 fs/ext4/block_validity.c struct rb_node **n = &system_blks->root.rb_node, *node; n 75 fs/ext4/block_validity.c while (*n) { n 76 fs/ext4/block_validity.c parent = *n; n 79 fs/ext4/block_validity.c n = &(*n)->rb_left; n 81 fs/ext4/block_validity.c n = &(*n)->rb_right; n 87 fs/ext4/block_validity.c new_node = *n; n 103 fs/ext4/block_validity.c rb_link_node(new_node, parent, n); n 161 fs/ext4/block_validity.c struct rb_node *n; n 173 fs/ext4/block_validity.c n = system_blks->root.rb_node; n 174 fs/ext4/block_validity.c while (n) { n 175 fs/ext4/block_validity.c entry = rb_entry(n, struct ext4_system_zone, node); n 177 fs/ext4/block_validity.c n = n->rb_left; n 179 fs/ext4/block_validity.c n = n->rb_right; n 196 fs/ext4/block_validity.c int err = 0, n; n 209 fs/ext4/block_validity.c n = ext4_map_blocks(NULL, inode, &map, 0); n 210 fs/ext4/block_validity.c if (n < 0) { n 211 fs/ext4/block_validity.c err = n; n 214 fs/ext4/block_validity.c if (n == 0) { n 218 fs/ext4/block_validity.c map.m_pblk, n)) { n 225 fs/ext4/block_validity.c err = add_system_zone(system_blks, map.m_pblk, n); n 228 fs/ext4/block_validity.c i += n; n 4531 fs/ext4/extents.c unsigned int n; n 4546 fs/ext4/extents.c n = ext4_es_delayed_clu(inode, lblk, len); n 4547 fs/ext4/extents.c if (n > 0) n 4548 fs/ext4/extents.c ext4_da_update_reserve_space(inode, (int) n, 0); n 2029 fs/ext4/extents_status.c unsigned int n = 0; n 2049 fs/ext4/extents_status.c n += last_lclu - first_lclu; n 2051 fs/ext4/extents_status.c n += last_lclu - first_lclu + 1; n 2060 fs/ext4/extents_status.c return n; n 2078 fs/ext4/extents_status.c unsigned int n; n 2088 fs/ext4/extents_status.c n = __es_delayed_clu(inode, lblk, end); n 2092 fs/ext4/extents_status.c return n; n 21 fs/ext4/hash.c int n = 16; n 27 fs/ext4/hash.c } while (--n); n 83 fs/ext4/indirect.c int n = 0; n 87 fs/ext4/indirect.c offsets[n++] = i_block; n 90 fs/ext4/indirect.c offsets[n++] = EXT4_IND_BLOCK; n 91 fs/ext4/indirect.c offsets[n++] = i_block; n 94 fs/ext4/indirect.c offsets[n++] = EXT4_DIND_BLOCK; n 95 fs/ext4/indirect.c offsets[n++] = i_block >> ptrs_bits; n 96 fs/ext4/indirect.c offsets[n++] = i_block & (ptrs - 1); n 99 fs/ext4/indirect.c offsets[n++] = EXT4_TIND_BLOCK; n 100 fs/ext4/indirect.c offsets[n++] = i_block >> (ptrs_bits * 2); n 101 fs/ext4/indirect.c offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); n 102 fs/ext4/indirect.c offsets[n++] = i_block & (ptrs - 1); n 111 fs/ext4/indirect.c return n; n 1104 fs/ext4/indirect.c int n = 0; n 1114 fs/ext4/indirect.c n = ext4_block_to_path(inode, last_block, offsets, NULL); n 1115 fs/ext4/indirect.c if (n == 0) n 1136 fs/ext4/indirect.c } else if (n == 1) { /* direct blocks */ n 1142 fs/ext4/indirect.c partial = ext4_find_shared(inode, n, offsets, chain, &nr); n 1148 fs/ext4/indirect.c &nr, &nr+1, (chain+n-1) - partial); n 1159 fs/ext4/indirect.c partial->p+1, (chain+n-1) - partial); n 1166 fs/ext4/indirect.c (chain+n-1) - partial); n 1222 fs/ext4/indirect.c int n = 0, n2 = 0; n 1232 fs/ext4/indirect.c n = ext4_block_to_path(inode, start, offsets, NULL); n 1235 fs/ext4/indirect.c BUG_ON(n > n2); n 1237 fs/ext4/indirect.c if ((n == 1) && (n == n2)) { n 1242 fs/ext4/indirect.c } else if (n2 > n) { n 1250 fs/ext4/indirect.c if (n == 1) { n 1261 fs/ext4/indirect.c partial = p = ext4_find_shared(inode, n, offsets, chain, &nr); n 1266 fs/ext4/indirect.c &nr, &nr+1, (chain+n-1) - partial); n 1273 fs/ext4/indirect.c partial->p+1, (chain+n-1) - partial); n 1285 fs/ext4/indirect.c (chain+n-1) - partial); n 1326 fs/ext4/indirect.c partial = p = ext4_find_shared(inode, n, offsets, chain, &nr); n 1347 fs/ext4/indirect.c (chain+n-1) - partial); n 1355 fs/ext4/indirect.c (chain+n-1) - partial); n 1371 fs/ext4/indirect.c int depth = (chain+n-1) - partial; n 1383 fs/ext4/indirect.c (chain+n-1) - partial); n 1398 fs/ext4/indirect.c (chain+n-1) - partial); n 1427 fs/ext4/indirect.c if (++n >= n2) n 1436 fs/ext4/indirect.c if (++n >= n2) n 1445 fs/ext4/indirect.c if (++n >= n2) n 3516 fs/ext4/mballoc.c struct rb_node *n; n 3521 fs/ext4/mballoc.c n = rb_first(&(grp->bb_free_root)); n 3523 fs/ext4/mballoc.c while (n) { n 3524 fs/ext4/mballoc.c entry = rb_entry(n, struct ext4_free_data, efd_node); n 3526 fs/ext4/mballoc.c n = rb_next(n); n 4656 fs/ext4/mballoc.c struct rb_node **n = &db->bb_free_root.rb_node, *node; n 4666 fs/ext4/mballoc.c if (!*n) { n 4675 fs/ext4/mballoc.c while (*n) { n 4676 fs/ext4/mballoc.c parent = *n; n 4679 fs/ext4/mballoc.c n = &(*n)->rb_left; n 4681 fs/ext4/mballoc.c n = &(*n)->rb_right; n 4691 fs/ext4/mballoc.c rb_link_node(new_node, parent, n); n 31 fs/ext4/mballoc.h #define mb_debug(n, fmt, ...) \ n 33 fs/ext4/mballoc.h if ((n) <= ext4_mballoc_debug) { \ n 39 fs/ext4/mballoc.h #define mb_debug(n, fmt, ...) no_printk(fmt, ##__VA_ARGS__) n 597 fs/ext4/namei.c int i, n = dx_get_count (entries); n 599 fs/ext4/namei.c for (i = 0; i < n; i++) { n 835 fs/ext4/namei.c unsigned n = count - 1; n 837 fs/ext4/namei.c while (n--) n 3135 fs/ext4/super.c struct list_head *pos, *n; n 3150 fs/ext4/super.c list_for_each_safe(pos, n, &eli->li_request_list) { n 3173 fs/ext4/super.c n = pos->next; n 3235 fs/ext4/super.c struct list_head *pos, *n; n 3239 fs/ext4/super.c list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { n 45 fs/ext4/verity.c size_t n = min_t(size_t, count, n 56 fs/ext4/verity.c memcpy(buf, addr + offset_in_page(pos), n); n 61 fs/ext4/verity.c buf += n; n 62 fs/ext4/verity.c pos += n; n 63 fs/ext4/verity.c count -= n; n 79 fs/ext4/verity.c size_t n = min_t(size_t, count, n 86 fs/ext4/verity.c res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0, n 92 fs/ext4/verity.c memcpy(addr + offset_in_page(pos), buf, n); n 95 fs/ext4/verity.c res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n, n 99 fs/ext4/verity.c if (res != n) n 102 fs/ext4/verity.c buf += n; n 103 fs/ext4/verity.c pos += n; n 104 fs/ext4/verity.c count -= n; n 2523 fs/ext4/xattr.c void *from, size_t n) n 2540 fs/ext4/xattr.c memmove(to, from, n); n 982 fs/f2fs/dir.c unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK); n 1002 fs/f2fs/dir.c for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) { n 1012 fs/f2fs/dir.c if (npages - n > 1 && !ra_has_index(ra, n)) n 1013 fs/f2fs/dir.c page_cache_sync_readahead(inode->i_mapping, ra, file, n, n 1014 fs/f2fs/dir.c min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); n 1016 fs/f2fs/dir.c dentry_page = f2fs_find_data_page(inode, n); n 1032 fs/f2fs/dir.c n * NR_DENTRY_IN_BLOCK, &fstr); n 31 fs/f2fs/hash.c int n = 16; n 37 fs/f2fs/hash.c } while (--n); n 184 fs/f2fs/node.c static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) n 188 fs/f2fs/node.c ne = radix_tree_lookup(&nm_i->nat_root, n); n 585 fs/f2fs/node.c static void f2fs_ra_node_pages(struct page *parent, int start, int n) n 595 fs/f2fs/node.c end = start + n; n 650 fs/f2fs/node.c int n = 0; n 656 fs/f2fs/node.c offset[n] = block; n 661 fs/f2fs/node.c offset[n++] = NODE_DIR1_BLOCK; n 662 fs/f2fs/node.c noffset[n] = 1; n 663 fs/f2fs/node.c offset[n] = block; n 669 fs/f2fs/node.c offset[n++] = NODE_DIR2_BLOCK; n 670 fs/f2fs/node.c noffset[n] = 2; n 671 fs/f2fs/node.c offset[n] = block; n 677 fs/f2fs/node.c offset[n++] = NODE_IND1_BLOCK; n 678 fs/f2fs/node.c noffset[n] = 3; n 679 fs/f2fs/node.c offset[n++] = block / direct_blks; n 680 fs/f2fs/node.c noffset[n] = 4 + offset[n - 1]; n 681 fs/f2fs/node.c offset[n] = block % direct_blks; n 687 fs/f2fs/node.c offset[n++] = NODE_IND2_BLOCK; n 688 fs/f2fs/node.c noffset[n] = 4 + dptrs_per_blk; n 689 fs/f2fs/node.c offset[n++] = block / direct_blks; n 690 fs/f2fs/node.c noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; n 691 fs/f2fs/node.c offset[n] = block % direct_blks; n 697 fs/f2fs/node.c offset[n++] = NODE_DIND_BLOCK; n 698 fs/f2fs/node.c noffset[n] = 5 + (dptrs_per_blk * 2); n 699 fs/f2fs/node.c offset[n++] = block / indirect_blks; n 700 fs/f2fs/node.c noffset[n] = 6 + (dptrs_per_blk * 2) + n 701 fs/f2fs/node.c offset[n - 1] * (dptrs_per_blk + 1); n 702 fs/f2fs/node.c offset[n++] = (block / direct_blks) % dptrs_per_blk; n 703 fs/f2fs/node.c noffset[n] = 7 + (dptrs_per_blk * 2) + n 704 fs/f2fs/node.c offset[n - 2] * (dptrs_per_blk + 1) + n 705 fs/f2fs/node.c offset[n - 1]; n 706 fs/f2fs/node.c offset[n] = block % direct_blks; n 2049 fs/f2fs/node.c nid_t n) n 2051 fs/f2fs/node.c return radix_tree_lookup(&nm_i->free_nid_root, n); n 64 fs/f2fs/node.h #define nat_set_nid(nat, n) ((nat)->ni.nid = (n)) n 2872 fs/f2fs/super.c unsigned int n = 0; n 2921 fs/f2fs/super.c set_bit(n, FDEV(devi).blkz_seq); n 2923 fs/f2fs/super.c n++; n 3318 fs/f2fs/super.c int n = (i == META) ? 1: NR_TEMP_TYPE; n 3323 fs/f2fs/super.c array_size(n, n 3331 fs/f2fs/super.c for (j = HOT; j < n; j++) { n 38 fs/f2fs/trace.h #define f2fs_trace_ios(i, n) n 45 fs/f2fs/verity.c size_t n = min_t(size_t, count, n 56 fs/f2fs/verity.c memcpy(buf, addr + offset_in_page(pos), n); n 61 fs/f2fs/verity.c buf += n; n 62 fs/f2fs/verity.c pos += n; n 63 fs/f2fs/verity.c count -= n; n 79 fs/f2fs/verity.c size_t n = min_t(size_t, count, n 86 fs/f2fs/verity.c res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0, n 92 fs/f2fs/verity.c memcpy(addr + offset_in_page(pos), buf, n); n 95 fs/f2fs/verity.c res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n, n 99 fs/f2fs/verity.c if (res != n) n 102 fs/f2fs/verity.c buf += n; n 103 fs/f2fs/verity.c pos += n; n 104 fs/f2fs/verity.c count -= n; n 1090 fs/fat/dir.c int err, i, n; n 1094 fs/fat/dir.c n = nr_used; n 1096 fs/fat/dir.c bhs[n] = sb_getblk(sb, blknr); n 1097 fs/fat/dir.c if (!bhs[n]) { n 1102 fs/fat/dir.c lock_buffer(bhs[n]); n 1103 fs/fat/dir.c memset(bhs[n]->b_data, 0, sb->s_blocksize); n 1104 fs/fat/dir.c set_buffer_uptodate(bhs[n]); n 1105 fs/fat/dir.c unlock_buffer(bhs[n]); n 1106 fs/fat/dir.c mark_buffer_dirty_inode(bhs[n], dir); n 1108 fs/fat/dir.c n++; n 1110 fs/fat/dir.c if (n == nr_bhs) { n 1112 fs/fat/dir.c err = fat_sync_bhs(bhs, n); n 1116 fs/fat/dir.c for (i = 0; i < n; i++) n 1118 fs/fat/dir.c n = 0; n 1122 fs/fat/dir.c err = fat_sync_bhs(bhs, n); n 1126 fs/fat/dir.c for (i = 0; i < n; i++) n 1132 fs/fat/dir.c for (i = 0; i < n; i++) n 1211 fs/fat/dir.c int err, i, n, offset, cluster[2]; n 1231 fs/fat/dir.c i = n = copy = 0; n 1236 fs/fat/dir.c bhs[n] = sb_getblk(sb, blknr); n 1237 fs/fat/dir.c if (!bhs[n]) { n 1245 fs/fat/dir.c lock_buffer(bhs[n]); n 1246 fs/fat/dir.c memcpy(bhs[n]->b_data, slots, copy); n 1247 fs/fat/dir.c set_buffer_uptodate(bhs[n]); n 1248 fs/fat/dir.c unlock_buffer(bhs[n]); n 1249 fs/fat/dir.c mark_buffer_dirty_inode(bhs[n], dir); n 1254 fs/fat/dir.c n++; n 1259 fs/fat/dir.c memset(bhs[n]->b_data + copy, 0, sb->s_blocksize - copy); n 1261 fs/fat/dir.c get_bh(bhs[n]); n 1262 fs/fat/dir.c *bh = bhs[n]; n 1267 fs/fat/dir.c err = fat_zeroed_cluster(dir, start_blknr, ++n, bhs, MAX_BUF_PER_PAGE); n 1276 fs/fat/dir.c n = 0; n 1278 fs/fat/dir.c for (i = 0; i < n; i++) n 379 fs/fat/fatent.c int err, n, copy; n 385 fs/fat/fatent.c for (n = 0; n < nr_bhs; n++) { n 386 fs/fat/fatent.c c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr); n 393 fs/fat/fatent.c memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize); n 449 fs/fat/fatent.c int n, i; n 451 fs/fat/fatent.c for (n = 0; n < fatent->nr_bhs; n++) { n 453 fs/fat/fatent.c if (fatent->bhs[n] == bhs[i]) n 457 fs/fat/fatent.c get_bh(fatent->bhs[n]); n 458 fs/fat/fatent.c bhs[i] = fatent->bhs[n]; n 996 fs/file.c int iterate_fd(struct files_struct *files, unsigned n, n 1005 fs/file.c for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { n 1007 fs/file.c file = rcu_dereference_check_fdtable(files, fdt->fd[n]); n 1010 fs/file.c res = f(p, file, n); n 392 fs/file_table.c unsigned long n; n 397 fs/file_table.c n = ((nr_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; n 399 fs/file_table.c files_stat.max_files = max_t(unsigned long, n, NR_FILE); n 68 fs/freevxfs/vxfs_subr.c vxfs_get_page(struct address_space *mapping, u_long n) n 72 fs/freevxfs/vxfs_subr.c pp = read_mapping_page(mapping, n, NULL); n 26 fs/fscache/histogram.c unsigned n[5], t; n 37 fs/fscache/histogram.c n[0] = atomic_read(&fscache_obj_instantiate_histogram[index]); n 38 fs/fscache/histogram.c n[1] = atomic_read(&fscache_ops_histogram[index]); n 39 fs/fscache/histogram.c n[2] = atomic_read(&fscache_objs_histogram[index]); n 40 fs/fscache/histogram.c n[3] = atomic_read(&fscache_retrieval_delay_histogram[index]); n 41 fs/fscache/histogram.c n[4] = atomic_read(&fscache_retrieval_histogram[index]); n 42 fs/fscache/histogram.c if (!(n[0] | n[1] | n[2] | n[3] | n[4])) n 48 fs/fscache/histogram.c index, t, n[0], n[1], n[2], n[3], n[4]); n 31 fs/fscache/object.c #define __STATE_NAME(n) fscache_osm_##n n 32 fs/fscache/object.c #define STATE(n) (&__STATE_NAME(n)) n 41 fs/fscache/object.c #define WORK_STATE(n, sn, f) \ n 42 fs/fscache/object.c const struct fscache_state __STATE_NAME(n) = { \ n 43 fs/fscache/object.c .name = #n, \ n 62 fs/fscache/object.c #define WAIT_STATE(n, sn, ...) \ n 63 fs/fscache/object.c const struct fscache_state __STATE_NAME(n) = { \ n 64 fs/fscache/object.c .name = #n, \ n 121 fs/fscache/operation.c unsigned n; n 136 fs/fscache/operation.c n = 0; n 140 fs/fscache/operation.c n++; n 143 fs/fscache/operation.c kdebug("n=%u", n); n 795 fs/fscache/page.c unsigned n; n 838 fs/fscache/page.c n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, n 840 fs/fscache/page.c trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit); n 841 fs/fscache/page.c if (n != 1) n 844 fs/fscache/page.c _debug("gang %d [%lx]", n, page->index); n 898 fs/fscache/page.c int n, i; n 904 fs/fscache/page.c n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, n 907 fs/fscache/page.c if (n == 0) { n 912 fs/fscache/page.c for (i = n - 1; i >= 0; i--) { n 921 fs/fscache/page.c for (i = n - 1; i >= 0; i--) n 33 fs/fsopen.c int index, n; n 53 fs/fsopen.c n = strlen(p); n 54 fs/fsopen.c if (n > len) n 57 fs/fsopen.c if (copy_to_user(_buf, p, n) != 0) n 59 fs/fsopen.c ret = n; n 2617 fs/fuse/file.c size_t n; n 2620 fs/fuse/file.c for (n = 0; n < count; n++, iov++) { n 137 fs/gfs2/bmap.c unsigned int n = 1; n 138 fs/gfs2/bmap.c error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); n 670 fs/gfs2/bmap.c unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; n 709 fs/gfs2/bmap.c n = blks - alloced; n 710 fs/gfs2/bmap.c ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); n 713 fs/gfs2/bmap.c alloced += n; n 715 fs/gfs2/bmap.c gfs2_trans_remove_revoke(sdp, bn, n); n 724 fs/gfs2/bmap.c for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0; n 725 fs/gfs2/bmap.c i++, n--) n 747 fs/gfs2/bmap.c if (n == 0) n 753 fs/gfs2/bmap.c for (; i < mp->mp_fheight && n > 0; i++, n--) n 758 fs/gfs2/bmap.c if (n == 0) n 762 fs/gfs2/bmap.c BUG_ON(n > dblks); n 765 fs/gfs2/bmap.c dblks = n; n 769 fs/gfs2/bmap.c while (n-- > 0) n 868 fs/gfs2/dir.c unsigned int n = 1; n 876 fs/gfs2/dir.c error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); n 2033 fs/gfs2/glock.c static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) n 2038 fs/gfs2/glock.c if (n == 0) n 2047 fs/gfs2/glock.c n = 1; n 2055 fs/gfs2/glock.c if (n <= 1) { n 2062 fs/gfs2/glock.c n--; n 2072 fs/gfs2/glock.c loff_t n; n 2081 fs/gfs2/glock.c n = *pos + 1; n 2083 fs/gfs2/glock.c n = *pos - gi->last_pos; n 2088 fs/gfs2/glock.c gfs2_glock_iter_next(gi, n); n 646 fs/gfs2/lops.c unsigned n; n 664 fs/gfs2/lops.c n = 0; n 671 fs/gfs2/lops.c if (++n >= num) n 679 fs/gfs2/lops.c n = 0; n 702 fs/gfs2/lops.c if (++n >= num) n 326 fs/gfs2/rgrp.c u32 n; n 329 fs/gfs2/rgrp.c for (n = 0; n < n_unaligned; n++) { n 510 fs/gfs2/rgrp.c struct rb_node *n, *next; n 514 fs/gfs2/rgrp.c n = sdp->sd_rindex_tree.rb_node; n 515 fs/gfs2/rgrp.c while (n) { n 516 fs/gfs2/rgrp.c cur = rb_entry(n, struct gfs2_rgrpd, rd_node); n 519 fs/gfs2/rgrp.c next = n->rb_left; n 521 fs/gfs2/rgrp.c next = n->rb_right; n 532 fs/gfs2/rgrp.c n = next; n 548 fs/gfs2/rgrp.c const struct rb_node *n; n 552 fs/gfs2/rgrp.c n = rb_first(&sdp->sd_rindex_tree); n 553 fs/gfs2/rgrp.c rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); n 569 fs/gfs2/rgrp.c const struct rb_node *n; n 572 fs/gfs2/rgrp.c n = rb_next(&rgd->rd_node); n 573 fs/gfs2/rgrp.c if (n == NULL) n 574 fs/gfs2/rgrp.c n = rb_first(&sdp->sd_rindex_tree); n 576 fs/gfs2/rgrp.c if (unlikely(&rgd->rd_node == n)) { n 580 fs/gfs2/rgrp.c rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); n 709 fs/gfs2/rgrp.c struct rb_node *n; n 713 fs/gfs2/rgrp.c while ((n = rb_first(&rgd->rd_rstree))) { n 714 fs/gfs2/rgrp.c rs = rb_entry(n, struct gfs2_blkreserv, rs_node); n 722 fs/gfs2/rgrp.c struct rb_node *n; n 726 fs/gfs2/rgrp.c while ((n = rb_first(&sdp->sd_rindex_tree))) { n 727 fs/gfs2/rgrp.c rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); n 730 fs/gfs2/rgrp.c rb_erase(n, &sdp->sd_rindex_tree); n 1621 fs/gfs2/rgrp.c struct rb_node *n; n 1625 fs/gfs2/rgrp.c n = rgd->rd_rstree.rb_node; n 1626 fs/gfs2/rgrp.c while (n) { n 1627 fs/gfs2/rgrp.c rs = rb_entry(n, struct gfs2_blkreserv, rs_node); n 1630 fs/gfs2/rgrp.c n = n->rb_left; n 1632 fs/gfs2/rgrp.c n = n->rb_right; n 1637 fs/gfs2/rgrp.c if (n) { n 1640 fs/gfs2/rgrp.c n = n->rb_right; n 1641 fs/gfs2/rgrp.c if (n == NULL) n 1643 fs/gfs2/rgrp.c rs = rb_entry(n, struct gfs2_blkreserv, rs_node); n 2188 fs/gfs2/rgrp.c unsigned int *n) n 2191 fs/gfs2/rgrp.c const unsigned int elen = *n; n 2195 fs/gfs2/rgrp.c *n = 1; n 2200 fs/gfs2/rgrp.c while (*n < elen) { n 2206 fs/gfs2/rgrp.c (*n)++; n 2260 fs/gfs2/rgrp.c const struct rb_node *n; n 2278 fs/gfs2/rgrp.c for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { n 2279 fs/gfs2/rgrp.c trs = rb_entry(n, struct gfs2_blkreserv, rs_node); n 45 fs/gfs2/rgrp.h extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, n 91 fs/gfs2/sys.c int error, n; n 93 fs/gfs2/sys.c error = kstrtoint(buf, 0, &n); n 100 fs/gfs2/sys.c switch (n) { n 112 fs/gfs2/sys.c fs_warn(sdp, "freeze %d error %d\n", n, error); n 257 fs/gfs2/trans.c unsigned int n = len; n 269 fs/gfs2/trans.c if (--n == 0) n 624 fs/gfs2/xattr.c unsigned int n = 1; n 628 fs/gfs2/xattr.c error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); n 688 fs/gfs2/xattr.c unsigned int n = 1; n 690 fs/gfs2/xattr.c error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); n 993 fs/gfs2/xattr.c unsigned int n = 1; n 994 fs/gfs2/xattr.c error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL); n 32 fs/hfs/bitmap.c u32 mask, start, len, n; n 46 fs/hfs/bitmap.c n = be32_to_cpu(val); n 50 fs/hfs/bitmap.c if (!(n & mask)) n 59 fs/hfs/bitmap.c n = be32_to_cpu(val); n 62 fs/hfs/bitmap.c if (!(n & mask)) n 76 fs/hfs/bitmap.c n |= mask; n 80 fs/hfs/bitmap.c if (!--len || n & mask) n 85 fs/hfs/bitmap.c *curr++ = cpu_to_be32(n); n 88 fs/hfs/bitmap.c n = be32_to_cpu(*curr); n 91 fs/hfs/bitmap.c if (n) { n 101 fs/hfs/bitmap.c if (n & mask) n 103 fs/hfs/bitmap.c n |= mask; n 107 fs/hfs/bitmap.c *curr = cpu_to_be32(n); n 26 fs/hfsplus/bitmap.c u32 mask, start, len, n; n 54 fs/hfsplus/bitmap.c n = be32_to_cpu(val); n 57 fs/hfsplus/bitmap.c if (!(n & mask)) n 68 fs/hfsplus/bitmap.c n = be32_to_cpu(val); n 71 fs/hfsplus/bitmap.c if (!(n & mask)) n 106 fs/hfsplus/bitmap.c n |= mask; n 110 fs/hfsplus/bitmap.c if (!--len || n & mask) n 115 fs/hfsplus/bitmap.c *curr++ = cpu_to_be32(n); n 119 fs/hfsplus/bitmap.c n = be32_to_cpu(*curr); n 122 fs/hfsplus/bitmap.c if (n) { n 146 fs/hfsplus/bitmap.c if (n & mask) n 148 fs/hfsplus/bitmap.c n |= mask; n 152 fs/hfsplus/bitmap.c *curr = cpu_to_be32(n); n 142 fs/hostfs/hostfs_kern.c int len, n; n 147 fs/hostfs/hostfs_kern.c n = -ENOMEM; n 151 fs/hostfs/hostfs_kern.c n = hostfs_do_readlink(link, name, PATH_MAX); n 152 fs/hostfs/hostfs_kern.c if (n < 0) n 154 fs/hostfs/hostfs_kern.c else if (n == PATH_MAX) { n 155 fs/hostfs/hostfs_kern.c n = -E2BIG; n 171 fs/hostfs/hostfs_kern.c n = -ENOMEM; n 182 fs/hostfs/hostfs_kern.c return ERR_PTR(n); n 126 fs/hostfs/hostfs_user.c int n; n 128 fs/hostfs/hostfs_user.c n = pread64(fd, buf, len, *offset); n 129 fs/hostfs/hostfs_user.c if (n < 0) n 131 fs/hostfs/hostfs_user.c *offset += n; n 132 fs/hostfs/hostfs_user.c return n; n 137 fs/hostfs/hostfs_user.c int n; n 139 fs/hostfs/hostfs_user.c n = pwrite64(fd, buf, len, *offset); n 140 fs/hostfs/hostfs_user.c if (n < 0) n 142 fs/hostfs/hostfs_user.c *offset += n; n 143 fs/hostfs/hostfs_user.c return n; n 339 fs/hostfs/hostfs_user.c int n; n 341 fs/hostfs/hostfs_user.c n = readlink(file, buf, size); n 342 fs/hostfs/hostfs_user.c if (n < 0) n 344 fs/hostfs/hostfs_user.c if (n < size) n 345 fs/hostfs/hostfs_user.c buf[n] = '\0'; n 346 fs/hostfs/hostfs_user.c return n; n 115 fs/hpfs/alloc.c static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward) n 120 fs/hpfs/alloc.c unsigned nr = (near & 0x3fff) & ~(n - 1); n 125 fs/hpfs/alloc.c if (n != 1 && n != 4) { n 126 fs/hpfs/alloc.c hpfs_error(s, "Bad allocation size: %d", n); n 134 fs/hpfs/alloc.c if (!tstbits(bmp, nr, n + forward)) { n 138 fs/hpfs/alloc.c q = nr + n; b = 0; n 139 fs/hpfs/alloc.c while ((a = tstbits(bmp, q, n + forward)) != 0) { n 141 fs/hpfs/alloc.c if (n != 1) q = ((q-1)&~(n-1))+n; n 158 fs/hpfs/alloc.c if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; n 166 fs/hpfs/alloc.c if (n != 1) q = ((q-1)&~(n-1))+n; n 167 fs/hpfs/alloc.c while ((a = tstbits(bmp, q, n + forward)) != 0) { n 169 fs/hpfs/alloc.c if (n != 1) q = ((q-1)&~(n-1))+n; n 181 fs/hpfs/alloc.c if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { n 182 fs/hpfs/alloc.c hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); n 186 fs/hpfs/alloc.c bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); n 203 fs/hpfs/alloc.c secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) n 217 fs/hpfs/alloc.c if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; n 231 fs/hpfs/alloc.c if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) { n 236 fs/hpfs/alloc.c if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) { n 241 fs/hpfs/alloc.c if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) { n 246 fs/hpfs/alloc.c if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) { n 263 fs/hpfs/alloc.c while (unlikely(++i < n)); n 267 fs/hpfs/alloc.c if (!hpfs_alloc_if_possible(s, sec + n + i)) { n 315 fs/hpfs/alloc.c void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) n 321 fs/hpfs/alloc.c if (!n) return; n 326 fs/hpfs/alloc.c sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; n 340 fs/hpfs/alloc.c if (!--n) { n 359 fs/hpfs/alloc.c int hpfs_check_free_dnodes(struct super_block *s, int n) n 370 fs/hpfs/alloc.c for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { n 393 fs/hpfs/alloc.c if (!--n) { n 69 fs/hpfs/anode.c int n; n 81 fs/hpfs/anode.c if ((n = btree->n_used_nodes - 1) < -!!fnod) { n 87 fs/hpfs/anode.c a = le32_to_cpu(btree->u.internal[n].down); n 88 fs/hpfs/anode.c btree->u.internal[n].file_secno = cpu_to_le32(-1); n 97 fs/hpfs/anode.c if (n >= 0) { n 98 fs/hpfs/anode.c if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) { n 100 fs/hpfs/anode.c le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno, n 105 fs/hpfs/anode.c if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) { n 106 fs/hpfs/anode.c le32_add_cpu(&btree->u.external[n].length, 1); n 123 fs/hpfs/anode.c fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length); n 156 fs/hpfs/anode.c btree->n_free_nodes--; n = btree->n_used_nodes++; n 158 fs/hpfs/anode.c btree->u.external[n].disk_secno = cpu_to_le32(se); n 159 fs/hpfs/anode.c btree->u.external[n].file_secno = cpu_to_le32(fs); n 160 fs/hpfs/anode.c btree->u.external[n].length = cpu_to_le32(1); n 177 fs/hpfs/anode.c btree->n_free_nodes--; n = btree->n_used_nodes++; n 179 fs/hpfs/anode.c btree->u.internal[n].file_secno = cpu_to_le32(-1); n 180 fs/hpfs/anode.c btree->u.internal[n].down = cpu_to_le32(na); n 181 fs/hpfs/anode.c btree->u.internal[n-1].file_secno = cpu_to_le32(fs); n 245 fs/hpfs/anode.c if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) { n 247 fs/hpfs/anode.c if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { n 26 fs/hpfs/buffer.c unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n) n 31 fs/hpfs/buffer.c if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) { n 32 fs/hpfs/buffer.c n = sbi->hotfix_from[i] - sec; n 35 fs/hpfs/buffer.c return n; n 38 fs/hpfs/buffer.c void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n) n 43 fs/hpfs/buffer.c if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size)) n 46 fs/hpfs/buffer.c if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n)) n 59 fs/hpfs/buffer.c while (n > 0) { n 64 fs/hpfs/buffer.c n--; n 99 fs/hpfs/dnode.c int n = (*p & 0x3f) + c; n 100 fs/hpfs/dnode.c if (n > 0x3f) n 104 fs/hpfs/dnode.c *p = (*p & ~0x3f) | n; n 111 fs/hpfs/dnode.c int n = (*p & 0x3f) - c; n 112 fs/hpfs/dnode.c if (n < 1) n 116 fs/hpfs/dnode.c *p = (*p & ~0x3f) | n; n 796 fs/hpfs/dnode.c static struct hpfs_dirent *map_nth_dirent(struct super_block *s, dnode_secno dno, int n, n 808 fs/hpfs/dnode.c if (i == n) { n 814 fs/hpfs/dnode.c hpfs_error(s, "map_nth_dirent: n too high; dnode = %08x, requested %08x", dno, n); n 268 fs/hpfs/ea.c secno n; n 271 fs/hpfs/ea.c if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return; n 272 fs/hpfs/ea.c if (!(data = hpfs_get_sector(s, n, &bh))) { n 273 fs/hpfs/ea.c hpfs_free_sectors(s, n, 1); n 279 fs/hpfs/ea.c fnode->ea_secno = cpu_to_le32(n); n 42 fs/hpfs/file.c unsigned n, disk_secno; n 46 fs/hpfs/file.c n = file_secno - hpfs_inode->i_file_sec; n 47 fs/hpfs/file.c if (n < hpfs_inode->i_n_secs) { n 48 fs/hpfs/file.c *n_secs = hpfs_inode->i_n_secs - n; n 49 fs/hpfs/file.c return hpfs_inode->i_disk_sec + n; n 55 fs/hpfs/file.c n = file_secno - hpfs_inode->i_file_sec; n 56 fs/hpfs/file.c if (n < hpfs_inode->i_n_secs) { n 57 fs/hpfs/file.c *n_secs = hpfs_inode->i_n_secs - n; n 58 fs/hpfs/file.c return hpfs_inode->i_disk_sec + n; n 178 fs/hpfs/hpfs_fn.h int n; n 181 fs/hpfs/hpfs_fn.h n = dst->not_8x3; n 184 fs/hpfs/hpfs_fn.h dst->not_8x3 = n; n 187 fs/hpfs/hpfs_fn.h static inline unsigned tstbits(__le32 *bmp, unsigned b, unsigned n) n 190 fs/hpfs/hpfs_fn.h if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; n 192 fs/hpfs/hpfs_fn.h for (i = 1; i < n; i++) n 225 fs/hpfs/hpfs_fn.h unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n); n 115 fs/hpfs/map.c int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21; n 118 fs/hpfs/map.c if (!(b = kmalloc_array(n, 512, GFP_KERNEL))) { n 122 fs/hpfs/map.c for (i=0;i<n;i++) { n 123 fs/hpfs/map.c __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1); n 149 fs/hpfs/super.c unsigned n, count, n_bands; n 152 fs/hpfs/super.c for (n = 0; n < COUNT_RD_AHEAD; n++) { n 153 fs/hpfs/super.c hpfs_prefetch_bitmap(s, n); n 155 fs/hpfs/super.c for (n = 0; n < n_bands; n++) { n 157 fs/hpfs/super.c hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD); n 158 fs/hpfs/super.c c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); n 248 fs/hugetlbfs/inode.c size_t n; n 254 fs/hugetlbfs/inode.c n = copy_page_to_iter(&page[i], offset, chunksize, to); n 255 fs/hugetlbfs/inode.c copied += n; n 256 fs/hugetlbfs/inode.c if (n != chunksize) n 257 fs/iomap/direct-io.c size_t n; n 283 fs/iomap/direct-io.c n = bio->bi_iter.bi_size; n 290 fs/iomap/direct-io.c task_io_account_write(n); n 297 fs/iomap/direct-io.c iov_iter_advance(dio->submit.iter, n); n 299 fs/iomap/direct-io.c dio->size += n; n 300 fs/iomap/direct-io.c pos += n; n 301 fs/iomap/direct-io.c copied += n; n 368 fs/isofs/inode.c unsigned n; n 414 fs/isofs/inode.c n = option; n 419 fs/isofs/inode.c if (n >= 99) n 421 fs/isofs/inode.c popt->session = n + 1; n 468 fs/isofs/inode.c n = option; n 469 fs/isofs/inode.c if (n != 512 && n != 1024 && n != 2048) n 471 fs/isofs/inode.c popt->blocksize = n; n 1119 fs/jbd2/journal.c int n; n 1160 fs/jbd2/journal.c n = journal->j_blocksize / sizeof(journal_block_tag_t); n 1161 fs/jbd2/journal.c journal->j_wbufsize = n; n 1162 fs/jbd2/journal.c journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), n 2717 fs/jbd2/journal.c int n = atomic_read(&nr_journal_heads); n 2718 fs/jbd2/journal.c if (n) n 2719 fs/jbd2/journal.c printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n); n 47 fs/jbd2/recovery.c static void journal_brelse_array(struct buffer_head *b[], int n) n 49 fs/jbd2/recovery.c while (--n >= 0) n 50 fs/jbd2/recovery.c brelse (b[n]); n 48 fs/jffs2/gc.c int n = jiffies % 128; n 58 fs/jffs2/gc.c } else if (n < 50 && !list_empty(&c->erasable_list)) { n 63 fs/jffs2/gc.c } else if (n < 110 && !list_empty(&c->very_dirty_list)) { n 67 fs/jffs2/gc.c } else if (n < 126 && !list_empty(&c->dirty_list)) { n 583 fs/jffs2/nodemgmt.c struct jffs2_unknown_node n; n 758 fs/jffs2/nodemgmt.c ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); n 764 fs/jffs2/nodemgmt.c if (retlen != sizeof(n)) { n 769 fs/jffs2/nodemgmt.c if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { n 771 fs/jffs2/nodemgmt.c je32_to_cpu(n.totlen), freed_len); n 774 fs/jffs2/nodemgmt.c if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { n 776 fs/jffs2/nodemgmt.c ref_offset(ref), je16_to_cpu(n.nodetype)); n 780 fs/jffs2/nodemgmt.c n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); n 781 fs/jffs2/nodemgmt.c ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); n 787 fs/jffs2/nodemgmt.c if (retlen != sizeof(n)) { n 1378 fs/jffs2/readinode.c struct jffs2_raw_inode n; n 1389 fs/jffs2/readinode.c ret = jffs2_do_read_inode_internal(c, f, &n); n 1132 fs/jffs2/scan.c struct list_head *n = head->next; n 1136 fs/jffs2/scan.c n = n->next; n 1138 fs/jffs2/scan.c list_add(head, n); n 111 fs/jffs2/wbuf.c static int n; n 122 fs/jffs2/wbuf.c if ((jiffies + (n++)) & 127) { n 1357 fs/jfs/jfs_dmap.c int rc, ti, i, k, m, n, agperlev; n 1450 fs/jfs/jfs_dmap.c for (n = 0, m = (ti << 2) + 1; n < 4; n++) { n 1451 fs/jfs/jfs_dmap.c if (l2nb <= dcp->stree[m + n]) { n 1452 fs/jfs/jfs_dmap.c ti = m + n; n 1456 fs/jfs/jfs_dmap.c if (n == 4) { n 1836 fs/jfs/jfs_dmap.c s64 b, lblkno, n; n 1869 fs/jfs/jfs_dmap.c for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) { n 1892 fs/jfs/jfs_dmap.c nb = min_t(s64, n, BPERDMAP); n 1921 fs/jfs/jfs_dmap.c for (n = nblocks - n, b = blkno; n > 0; n 1922 fs/jfs/jfs_dmap.c n -= BPERDMAP, b += BPERDMAP) { n 2961 fs/jfs/jfs_dmap.c int ti, n = 0, k, x = 0; n 2974 fs/jfs/jfs_dmap.c k > 0; k--, ti = ((ti + n) << 2) + 1) { n 2978 fs/jfs/jfs_dmap.c for (x = ti, n = 0; n < 4; n++) { n 2982 fs/jfs/jfs_dmap.c if (l2nb <= tp->dmt_stree[x + n]) n 2989 fs/jfs/jfs_dmap.c assert(n < 4); n 2995 fs/jfs/jfs_dmap.c *leafidx = x + n - le32_to_cpu(tp->dmt_leafidx); n 3099 fs/jfs/jfs_dmap.c int n; n 3101 fs/jfs/jfs_dmap.c for (n = 0; n < 32; n++, word >>= 1) { n 3106 fs/jfs/jfs_dmap.c return (n); n 3124 fs/jfs/jfs_dmap.c int n; n 3126 fs/jfs/jfs_dmap.c for (n = 0; n < 32; n++, value <<= 1) { n 3130 fs/jfs/jfs_dmap.c return (n); n 3375 fs/jfs/jfs_dmap.c int i, i0 = true, j, j0 = true, k, n; n 3426 fs/jfs/jfs_dmap.c for (i = 0, n = 0; i < agno; n++) { n 3427 fs/jfs/jfs_dmap.c bmp->db_agfree[n] = 0; /* init collection point */ n 3432 fs/jfs/jfs_dmap.c bmp->db_agfree[n] += bmp->db_agfree[i]; n 3437 fs/jfs/jfs_dmap.c for (; n < MAXAG; n++) n 3438 fs/jfs/jfs_dmap.c bmp->db_agfree[n] = 0; n 3540 fs/jfs/jfs_dmap.c if ((n = blkno & (BPERDMAP - 1))) { n 3546 fs/jfs/jfs_dmap.c n = min(nblocks, (s64)BPERDMAP - n); n 3554 fs/jfs/jfs_dmap.c n = min_t(s64, nblocks, BPERDMAP); n 3558 fs/jfs/jfs_dmap.c *l0leaf = dbInitDmap(dp, blkno, n); n 3560 fs/jfs/jfs_dmap.c bmp->db_nfree += n; n 3562 fs/jfs/jfs_dmap.c bmp->db_agfree[agno] += n; n 3569 fs/jfs/jfs_dmap.c blkno += n; n 3570 fs/jfs/jfs_dmap.c nblocks -= n; n 3648 fs/jfs/jfs_dmap.c int i, n; n 3708 fs/jfs/jfs_dmap.c for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0; n 3710 fs/jfs/jfs_dmap.c bmp->db_agstart += n; n 3711 fs/jfs/jfs_dmap.c n <<= 2; n 265 fs/jfs/jfs_dmap.h #define NLSTOL2BSZ(n) (31 - cntlz((n)) + BUDMIN) n 268 fs/jfs/jfs_dmap.h #define LITOL2BSZ(n,m,b) ((((n) == 0) ? (m) : cnttz((n))) + (b)) n 820 fs/jfs/jfs_dtree.c int n; n 841 fs/jfs/jfs_dtree.c n = NDTLEAF(name->namlen); n 845 fs/jfs/jfs_dtree.c n = NDTLEAF_LEGACY(name->namlen); n 857 fs/jfs/jfs_dtree.c if (n > p->header.freecnt) { n 860 fs/jfs/jfs_dtree.c split.nslot = n; n 893 fs/jfs/jfs_dtree.c n = index >> L2DTSLOTSIZE; n 894 fs/jfs/jfs_dtree.c lv->offset = p->header.stblindex + n; n 896 fs/jfs/jfs_dtree.c ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1; n 938 fs/jfs/jfs_dtree.c int n; n 969 fs/jfs/jfs_dtree.c n = sbi->bsize >> L2DTSLOTSIZE; n 970 fs/jfs/jfs_dtree.c n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */ n 971 fs/jfs/jfs_dtree.c n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */ n 972 fs/jfs/jfs_dtree.c if (n <= split->nslot) n 1011 fs/jfs/jfs_dtree.c n = xsize >> L2DTSLOTSIZE; n 1012 fs/jfs/jfs_dtree.c n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */ n 1013 fs/jfs/jfs_dtree.c if ((n + sp->header.freecnt) <= split->nslot) n 1014 fs/jfs/jfs_dtree.c n = xlen + (xlen << 1); n 1016 fs/jfs/jfs_dtree.c n = xlen; n 1019 fs/jfs/jfs_dtree.c rc = dquot_alloc_block(ip, n); n 1022 fs/jfs/jfs_dtree.c quota_allocation += n; n 1025 fs/jfs/jfs_dtree.c (s64) n, &nxaddr))) n 1032 fs/jfs/jfs_dtree.c PXDlength(pxd, xlen + n); n 1042 fs/jfs/jfs_dtree.c xlen = lengthPXD(pxd) - n; n 1044 fs/jfs/jfs_dtree.c dbFree(ip, xaddr, (s64) n); n 1066 fs/jfs/jfs_dtree.c n = btstack->nsplit; n 1069 fs/jfs/jfs_dtree.c for (pxd = pxdlist.pxd; n > 0; n--, pxd++) { n 1196 fs/jfs/jfs_dtree.c n = NDTINTERNAL(key.namlen); n 1201 fs/jfs/jfs_dtree.c n = NDTINTERNAL(key.namlen); n 1220 fs/jfs/jfs_dtree.c if (n > sp->header.freecnt) { n 1224 fs/jfs/jfs_dtree.c split->nslot = n; n 1266 fs/jfs/jfs_dtree.c n = skip >> L2DTSLOTSIZE; n 1267 fs/jfs/jfs_dtree.c lv->offset = sp->header.stblindex + n; n 1270 fs/jfs/jfs_dtree.c 1) >> L2DTSLOTSIZE) - n + 1; n 1289 fs/jfs/jfs_dtree.c n = pxdlist.npxd; n 1290 fs/jfs/jfs_dtree.c pxd = &pxdlist.pxd[n]; n 1291 fs/jfs/jfs_dtree.c for (; n < pxdlist.maxnpxd; n++, pxd++) n 1338 fs/jfs/jfs_dtree.c int n; n 1412 fs/jfs/jfs_dtree.c n = PSIZE >> L2DTSLOTSIZE; n 1413 fs/jfs/jfs_dtree.c rp->header.maxslot = n; n 1414 fs/jfs/jfs_dtree.c stblsize = (n + 31) >> L2DTSLOTSIZE; /* in unit of slot */ n 1507 fs/jfs/jfs_dtree.c n = split->nslot; n 1514 fs/jfs/jfs_dtree.c n = NDTLEAF(ldtentry->namlen); n 1516 fs/jfs/jfs_dtree.c n = NDTLEAF_LEGACY(ldtentry-> n 1522 fs/jfs/jfs_dtree.c n = NDTINTERNAL(idtentry->namlen); n 1532 fs/jfs/jfs_dtree.c left += n; n 1574 fs/jfs/jfs_dtree.c for (n = 0; n < rp->header.nextindex; n++) { n 1575 fs/jfs/jfs_dtree.c ldtentry = (struct ldtentry *) & rp->slot[stbl[n]]; n 1577 fs/jfs/jfs_dtree.c rbn, n, &mp, &lblock); n 1594 fs/jfs/jfs_dtree.c n = skip >> L2DTSLOTSIZE; n 1595 fs/jfs/jfs_dtree.c slv->offset = sp->header.stblindex + n; n 1597 fs/jfs/jfs_dtree.c ((sp->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1; n 1645 fs/jfs/jfs_dtree.c int n; n 1698 fs/jfs/jfs_dtree.c for (n = 0; n < sp->header.nextindex; n++) { n 1700 fs/jfs/jfs_dtree.c (struct ldtentry *) & sp->slot[stbl[n]]; n 1703 fs/jfs/jfs_dtree.c xaddr, n, &mp, &lblock); n 1735 fs/jfs/jfs_dtree.c n = xsize >> L2DTSLOTSIZE; n 1736 fs/jfs/jfs_dtree.c newstblsize = (n + 31) >> L2DTSLOTSIZE; n 1764 fs/jfs/jfs_dtree.c sp->header.maxslot = n; n 1774 fs/jfs/jfs_dtree.c for (n = 0; n < oldstblsize; n++, fsi++, f++) { n 1785 fs/jfs/jfs_dtree.c fsi = n = newstblindex + newstblsize; n 1794 fs/jfs/jfs_dtree.c sp->header.freelist = n; n 1801 fs/jfs/jfs_dtree.c f->next = n; n 1804 fs/jfs/jfs_dtree.c sp->header.freecnt += sp->header.maxslot - n; n 1816 fs/jfs/jfs_dtree.c n = sp->header.maxslot >> 2; n 1817 fs/jfs/jfs_dtree.c if (sp->header.freelist < n) n 1818 fs/jfs/jfs_dtree.c dtLinelockFreelist(sp, n, &dtlck); n 1876 fs/jfs/jfs_dtree.c int fsi, stblsize, n; n 1940 fs/jfs/jfs_dtree.c n = xsize >> L2DTSLOTSIZE; n 1941 fs/jfs/jfs_dtree.c rp->header.maxslot = n; n 1942 fs/jfs/jfs_dtree.c stblsize = (n + 31) >> L2DTSLOTSIZE; n 1957 fs/jfs/jfs_dtree.c fsi = n = DTROOTMAXSLOT + stblsize; n 1966 fs/jfs/jfs_dtree.c rp->header.freelist = n; n 1975 fs/jfs/jfs_dtree.c f->next = n; n 1978 fs/jfs/jfs_dtree.c rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n; n 1989 fs/jfs/jfs_dtree.c for (n = 0; n < rp->header.nextindex; n++) { n 1990 fs/jfs/jfs_dtree.c ldtentry = (struct ldtentry *) & rp->slot[stbl[n]]; n 1992 fs/jfs/jfs_dtree.c rbn, n, &mp, &lblock); n 3906 fs/jfs/jfs_dtree.c int xsi, n; n 3954 fs/jfs/jfs_dtree.c n = 1; n 3970 fs/jfs/jfs_dtree.c lv->length = n; n 3982 fs/jfs/jfs_dtree.c n = 0; n 3989 fs/jfs/jfs_dtree.c n++; n 3995 fs/jfs/jfs_dtree.c lv->length = n; n 4025 fs/jfs/jfs_dtree.c for (n = index + 1; n <= nextindex; n++) { n 4026 fs/jfs/jfs_dtree.c lh = (struct ldtentry *) & (p->slot[stbl[n]]); n 4028 fs/jfs/jfs_dtree.c le32_to_cpu(lh->index), bn, n, n 4260 fs/jfs/jfs_dtree.c int xsi, n; n 4282 fs/jfs/jfs_dtree.c n = freecnt = 1; n 4290 fs/jfs/jfs_dtree.c lv->length = n; n 4302 fs/jfs/jfs_dtree.c n = 0; n 4305 fs/jfs/jfs_dtree.c n++; n 4315 fs/jfs/jfs_dtree.c lv->length = n; n 4355 fs/jfs/jfs_dtree.c int fsi, xsi, n; n 4375 fs/jfs/jfs_dtree.c n = 1; n 4385 fs/jfs/jfs_dtree.c lv->length = n; n 4397 fs/jfs/jfs_dtree.c n = 0; n 4400 fs/jfs/jfs_dtree.c n++; n 4410 fs/jfs/jfs_dtree.c lv->length = n; n 4436 fs/jfs/jfs_dtree.c int xsi, n; n 4448 fs/jfs/jfs_dtree.c n = 1; n 4459 fs/jfs/jfs_dtree.c lv->length = n; n 4471 fs/jfs/jfs_dtree.c n = 0; n 4474 fs/jfs/jfs_dtree.c n++; n 4482 fs/jfs/jfs_dtree.c lv->length = n; n 599 fs/jfs/jfs_imap.c int n; n 690 fs/jfs/jfs_imap.c for (n = 0; n < ilinelock->index; n++, lv++) { n 697 fs/jfs/jfs_imap.c for (n = XTENTRYSTART; n 698 fs/jfs/jfs_imap.c n < le16_to_cpu(xp->header.nextindex); n++, xad++) n 726 fs/jfs/jfs_imap.c for (n = 0; n < ilinelock->index; n++, lv++) { n 733 fs/jfs/jfs_imap.c for (n = XTENTRYSTART; n 734 fs/jfs/jfs_imap.c n < le16_to_cpu(xp->header.nextindex); n++, xad++) n 750 fs/jfs/jfs_imap.c for (n = 0; n < ilinelock->index; n++, lv++) { n 2846 fs/jfs/jfs_imap.c int i, n, head; n 2894 fs/jfs/jfs_imap.c n = agstart >> mp->db_agl2size; n 2895 fs/jfs/jfs_imap.c iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size); n 2902 fs/jfs/jfs_imap.c imap->im_agctl[n].numinos += numinos; n 2908 fs/jfs/jfs_imap.c if ((head = imap->im_agctl[n].inofree) == -1) { n 2923 fs/jfs/jfs_imap.c imap->im_agctl[n].inofree = n 2927 fs/jfs/jfs_imap.c imap->im_agctl[n].numfree += n 2934 fs/jfs/jfs_imap.c if ((head = imap->im_agctl[n].extfree) == -1) { n 2949 fs/jfs/jfs_imap.c imap->im_agctl[n].extfree = n 1134 fs/jfs/jfs_txnmgr.c int k, n; n 1192 fs/jfs/jfs_txnmgr.c for (n = k + 1; n < cd.nip; n++) { n 1193 fs/jfs/jfs_txnmgr.c ip = cd.iplist[n]; n 1196 fs/jfs/jfs_txnmgr.c cd.iplist[n] = cd.iplist[k]; n 2434 fs/jfs/jfs_txnmgr.c int n; n 2442 fs/jfs/jfs_txnmgr.c for (n = 0; n < xadlistlock->count; n++, xad++) { n 2463 fs/jfs/jfs_txnmgr.c for (n = 0; n < pxdlistlock->count; n++, pxd++) { n 2492 fs/jfs/jfs_txnmgr.c int n; n 2504 fs/jfs/jfs_txnmgr.c for (n = 0; n < xadlistlock->count; n++, xad++) { n 2526 fs/jfs/jfs_txnmgr.c for (n = 0; n < pxdlistlock->count; n++, pxd++) { n 2544 fs/jfs/jfs_txnmgr.c for (n = 0; n < xadlistlock->count; n++, xad++) { n 2563 fs/jfs/jfs_txnmgr.c for (n = 0; n < pxdlistlock->count; n++, pxd++) { n 80 fs/jfs/jfs_types.h __u64 n = le32_to_cpu(pxd->len_addr) & ~0xffffff; n 81 fs/jfs/jfs_types.h return (n << 8) + le32_to_cpu(pxd->addr2); n 43 fs/jfs/jfs_unicode.h size_t n) n 47 fs/jfs/jfs_unicode.h while (n-- && *ucs2) /* Copy the strings */ n 50 fs/jfs/jfs_unicode.h n++; n 51 fs/jfs/jfs_unicode.h while (n--) /* Pad with nulls */ n 60 fs/jfs/jfs_unicode.h size_t n) n 62 fs/jfs/jfs_unicode.h if (!n) n 64 fs/jfs/jfs_unicode.h while ((*ucs1 == __le16_to_cpu(*ucs2)) && *ucs1 && --n) { n 75 fs/jfs/jfs_unicode.h size_t n) n 79 fs/jfs/jfs_unicode.h while (n-- && *ucs2) /* Copy the strings */ n 82 fs/jfs/jfs_unicode.h n++; n 83 fs/jfs/jfs_unicode.h while (n--) /* Pad with nulls */ n 92 fs/jfs/jfs_unicode.h size_t n) n 96 fs/jfs/jfs_unicode.h while (n-- && *ucs2) /* Copy the strings */ n 99 fs/jfs/jfs_unicode.h n++; n 100 fs/jfs/jfs_unicode.h while (n--) /* Pad with nulls */ n 957 fs/jfs/jfs_xtree.c int skip, maxentry, middle, righthalf, n; n 1140 fs/jfs/jfs_xtree.c n = skip - middle; n 1142 fs/jfs/jfs_xtree.c n << L2XTSLOTSIZE); n 1145 fs/jfs/jfs_xtree.c n += XTENTRYSTART; n 1146 fs/jfs/jfs_xtree.c xad = &rp->xad[n]; n 1152 fs/jfs/jfs_xtree.c memmove(&rp->xad[n + 1], &sp->xad[skip], n 740 fs/libfs.c void simple_transaction_set(struct file *file, size_t n) n 744 fs/libfs.c BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); n 751 fs/libfs.c ar->size = n; n 69 fs/lockd/host.c static unsigned int __nlm_hash32(const __be32 n) n 71 fs/lockd/host.c unsigned int hash = (__force u32)n ^ ((__force u32)n >> 16); n 67 fs/minix/dir.c static struct page * dir_get_page(struct inode *dir, unsigned long n) n 70 fs/minix/dir.c struct page *page = read_mapping_page(mapping, n, NULL); n 90 fs/minix/dir.c unsigned long n; n 97 fs/minix/dir.c n = pos >> PAGE_SHIFT; n 99 fs/minix/dir.c for ( ; n < npages; n++, offset = 0) { n 101 fs/minix/dir.c struct page *page = dir_get_page(inode, n); n 107 fs/minix/dir.c limit = kaddr + minix_last_byte(inode, n) - chunk_size; n 158 fs/minix/dir.c unsigned long n; n 167 fs/minix/dir.c for (n = 0; n < npages; n++) { n 170 fs/minix/dir.c page = dir_get_page(dir, n); n 175 fs/minix/dir.c limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; n 209 fs/minix/dir.c unsigned long n; n 223 fs/minix/dir.c for (n = 0; n <= npages; n++) { n 226 fs/minix/dir.c page = dir_get_page(dir, n); n 232 fs/minix/dir.c dir_end = kaddr + minix_last_byte(dir, n); n 75 fs/minix/itree_common.c int n = 0; n 80 fs/minix/itree_common.c if (parent) for (n = 1; n < num; n++) { n 86 fs/minix/itree_common.c branch[n].key = cpu_to_block(nr); n 90 fs/minix/itree_common.c branch[n].bh = bh; n 91 fs/minix/itree_common.c branch[n].p = (block_t*) bh->b_data + offsets[n]; n 92 fs/minix/itree_common.c *branch[n].p = branch[n].key; n 98 fs/minix/itree_common.c if (n == num) n 102 fs/minix/itree_common.c for (i = 1; i < n; i++) n 104 fs/minix/itree_common.c for (i = 0; i < n; i++) n 302 fs/minix/itree_common.c int n; n 309 fs/minix/itree_common.c n = block_to_path(inode, iblock, offsets); n 310 fs/minix/itree_common.c if (!n) n 313 fs/minix/itree_common.c if (n == 1) { n 320 fs/minix/itree_common.c partial = find_shared(inode, n, offsets, chain, &nr); n 326 fs/minix/itree_common.c free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); n 331 fs/minix/itree_common.c (chain+n-1) - partial); n 10 fs/minix/itree_v1.c static inline unsigned long block_to_cpu(block_t n) n 12 fs/minix/itree_v1.c return n; n 15 fs/minix/itree_v1.c static inline block_t cpu_to_block(unsigned long n) n 17 fs/minix/itree_v1.c return n; n 27 fs/minix/itree_v1.c int n = 0; n 38 fs/minix/itree_v1.c offsets[n++] = block; n 40 fs/minix/itree_v1.c offsets[n++] = 7; n 41 fs/minix/itree_v1.c offsets[n++] = block; n 44 fs/minix/itree_v1.c offsets[n++] = 8; n 45 fs/minix/itree_v1.c offsets[n++] = block>>9; n 46 fs/minix/itree_v1.c offsets[n++] = block & 511; n 48 fs/minix/itree_v1.c return n; n 9 fs/minix/itree_v2.c static inline unsigned long block_to_cpu(block_t n) n 11 fs/minix/itree_v2.c return n; n 14 fs/minix/itree_v2.c static inline block_t cpu_to_block(unsigned long n) n 16 fs/minix/itree_v2.c return n; n 29 fs/minix/itree_v2.c int n = 0; n 42 fs/minix/itree_v2.c offsets[n++] = block; n 44 fs/minix/itree_v2.c offsets[n++] = DIRCOUNT; n 45 fs/minix/itree_v2.c offsets[n++] = block; n 47 fs/minix/itree_v2.c offsets[n++] = DIRCOUNT + 1; n 48 fs/minix/itree_v2.c offsets[n++] = block / INDIRCOUNT(sb); n 49 fs/minix/itree_v2.c offsets[n++] = block % INDIRCOUNT(sb); n 52 fs/minix/itree_v2.c offsets[n++] = DIRCOUNT + 2; n 53 fs/minix/itree_v2.c offsets[n++] = (block / INDIRCOUNT(sb)) / INDIRCOUNT(sb); n 54 fs/minix/itree_v2.c offsets[n++] = (block / INDIRCOUNT(sb)) % INDIRCOUNT(sb); n 55 fs/minix/itree_v2.c offsets[n++] = block % INDIRCOUNT(sb); n 57 fs/minix/itree_v2.c return n; n 145 fs/namespace.c static inline void mnt_add_count(struct mount *mnt, int n) n 148 fs/namespace.c this_cpu_add(mnt->mnt_pcp->mnt_count, n); n 151 fs/namespace.c mnt->mnt_count += n; n 881 fs/namespace.c struct mnt_namespace *n = parent->mnt_ns; n 887 fs/namespace.c m->mnt_ns = n; n 889 fs/namespace.c list_splice(&head, n->list.prev); n 891 fs/namespace.c n->mounts += n->pending_mounts; n 892 fs/namespace.c n->pending_mounts = 0; n 895 fs/namespace.c touch_mnt_namespace(n); n 2050 fs/namespace.c struct hlist_node *n; n 2093 fs/namespace.c hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { n 2989 fs/namespace.c unsigned long n) n 2995 fs/namespace.c if (!access_ok(from, n)) n 2996 fs/namespace.c return n; n 2998 fs/namespace.c while (n) { n 3000 fs/namespace.c memset(t, 0, n); n 3005 fs/namespace.c n--; n 3007 fs/namespace.c return n; n 264 fs/nfs/callback_xdr.c int n, i; n 273 fs/nfs/callback_xdr.c n = ntohl(*p++); n 274 fs/nfs/callback_xdr.c if (n <= 0) n 276 fs/nfs/callback_xdr.c if (n > ULONG_MAX / sizeof(*args->devs)) { n 281 fs/nfs/callback_xdr.c args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL); n 288 fs/nfs/callback_xdr.c for (i = 0; i < n; i++) { n 2268 fs/nfs/dir.c struct rb_node *n; n 2272 fs/nfs/dir.c while ((n = rb_first(root_node)) != NULL) { n 2273 fs/nfs/dir.c entry = rb_entry(n, struct nfs_access_entry, rb_node); n 2274 fs/nfs/dir.c rb_erase(n, root_node); n 2301 fs/nfs/dir.c struct rb_node *n = NFS_I(inode)->access_cache.rb_node; n 2303 fs/nfs/dir.c while (n != NULL) { n 2305 fs/nfs/dir.c rb_entry(n, struct nfs_access_entry, rb_node); n 2309 fs/nfs/dir.c n = n->rb_left; n 2311 fs/nfs/dir.c n = n->rb_right; n 62 fs/nfs/flexfilelayout/flexfilelayout.c struct nfs4_ff_layout_ds_err *err, *n; n 64 fs/nfs/flexfilelayout/flexfilelayout.c list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list, n 2208 fs/nfs/flexfilelayout/flexfilelayout.c size_t n = 0; n 2211 fs/nfs/flexfilelayout/flexfilelayout.c errors[n].offset = pos->offset; n 2212 fs/nfs/flexfilelayout/flexfilelayout.c errors[n].length = pos->length; n 2213 fs/nfs/flexfilelayout/flexfilelayout.c nfs4_stateid_copy(&errors[n].stateid, &pos->stateid); n 2214 fs/nfs/flexfilelayout/flexfilelayout.c errors[n].errors[0].dev_id = pos->deviceid; n 2215 fs/nfs/flexfilelayout/flexfilelayout.c errors[n].errors[0].status = pos->status; n 2216 fs/nfs/flexfilelayout/flexfilelayout.c errors[n].errors[0].opnum = pos->opnum; n 2217 fs/nfs/flexfilelayout/flexfilelayout.c n++; n 2219 fs/nfs/flexfilelayout/flexfilelayout.c n < NFS42_LAYOUTERROR_MAX) n 2221 fs/nfs/flexfilelayout/flexfilelayout.c if (nfs42_proc_layouterror(lseg, errors, n) < 0) n 2223 fs/nfs/flexfilelayout/flexfilelayout.c n = 0; n 513 fs/nfs/flexfilelayout/flexfilelayoutdev.c struct nfs4_ff_layout_ds_err *err, *n; n 517 fs/nfs/flexfilelayout/flexfilelayoutdev.c list_for_each_entry_safe(err, n, &flo->error_list, list) { n 25 fs/nfs/nfs42.h size_t n); n 796 fs/nfs/nfs42proc.c const struct nfs42_layout_error *errors, size_t n) n 813 fs/nfs/nfs42proc.c if (n > NFS42_LAYOUTERROR_MAX) n 818 fs/nfs/nfs42proc.c for (i = 0; i < n; i++) { n 37 fs/nfs/nfs4namespace.c int n; n 42 fs/nfs/nfs4namespace.c n = pathname->ncomponents; n 43 fs/nfs/nfs4namespace.c while (--n >= 0) { n 44 fs/nfs/nfs4namespace.c const struct nfs4_string *component = &pathname->components[n]; n 969 fs/nfs/nfs4xdr.c static void encode_uint32(struct xdr_stream *xdr, u32 n) n 971 fs/nfs/nfs4xdr.c WARN_ON_ONCE(xdr_stream_encode_u32(xdr, n) < 0); n 974 fs/nfs/nfs4xdr.c static void encode_uint64(struct xdr_stream *xdr, u64 n) n 976 fs/nfs/nfs4xdr.c WARN_ON_ONCE(xdr_stream_encode_u64(xdr, n) < 0); n 3618 fs/nfs/nfs4xdr.c u32 n; n 3625 fs/nfs/nfs4xdr.c n = be32_to_cpup(p); n 3626 fs/nfs/nfs4xdr.c if (n == 0) n 3629 fs/nfs/nfs4xdr.c if (n > NFS4_PATHNAME_MAXCOMPONENTS) { n 3630 fs/nfs/nfs4xdr.c dprintk("cannot parse %d components in path\n", n); n 3633 fs/nfs/nfs4xdr.c for (path->ncomponents = 0; path->ncomponents < n; path->ncomponents++) { n 3640 fs/nfs/nfs4xdr.c (path->ncomponents != n ? "/ " : ""), n 3660 fs/nfs/nfs4xdr.c int n; n 3681 fs/nfs/nfs4xdr.c n = be32_to_cpup(p); n 3682 fs/nfs/nfs4xdr.c if (n <= 0) n 3684 fs/nfs/nfs4xdr.c for (res->nlocations = 0; res->nlocations < n; res->nlocations++) { n 138 fs/nfs/nfsroot.c size_t n = strlen(line) + sizeof(NFS_ROOT) - 1; n 139 fs/nfs/nfsroot.c if (n >= sizeof(nfs_root_parms)) n 204 fs/nfsd/nfs2acl.c int n; n 215 fs/nfsd/nfs2acl.c n = nfsacl_decode(&rqstp->rq_arg, base, NULL, n 218 fs/nfsd/nfs2acl.c if (n > 0) n 219 fs/nfsd/nfs2acl.c n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, n 222 fs/nfsd/nfs2acl.c return (n > 0); n 268 fs/nfsd/nfs2acl.c int n; n 295 fs/nfsd/nfs2acl.c n = nfsacl_encode(&rqstp->rq_res, base, inode, n 298 fs/nfsd/nfs2acl.c if (n > 0) n 299 fs/nfsd/nfs2acl.c n = nfsacl_encode(&rqstp->rq_res, base + n, inode, n 303 fs/nfsd/nfs2acl.c return (n > 0); n 146 fs/nfsd/nfs3acl.c int n; n 157 fs/nfsd/nfs3acl.c n = nfsacl_decode(&rqstp->rq_arg, base, NULL, n 160 fs/nfsd/nfs3acl.c if (n > 0) n 161 fs/nfsd/nfs3acl.c n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, n 164 fs/nfsd/nfs3acl.c return (n > 0); n 182 fs/nfsd/nfs3acl.c int n; n 199 fs/nfsd/nfs3acl.c n = nfsacl_encode(&rqstp->rq_res, base, inode, n 202 fs/nfsd/nfs3acl.c if (n > 0) n 203 fs/nfsd/nfs3acl.c n = nfsacl_encode(&rqstp->rq_res, base + n, inode, n 207 fs/nfsd/nfs3acl.c if (n <= 0) n 435 fs/nfsd/nfs4acl.c int n; n 511 fs/nfsd/nfs4acl.c if (!state->users->n && !state->groups->n) n 514 fs/nfsd/nfs4acl.c nace = 4 + state->users->n + state->groups->n; n 523 fs/nfsd/nfs4acl.c for (i=0; i < state->users->n; i++) { n 537 fs/nfsd/nfs4acl.c for (i=0; i < state->groups->n; i++) { n 546 fs/nfsd/nfs4acl.c if (state->users->n || state->groups->n) { n 576 fs/nfsd/nfs4acl.c for (i = 0; i < a->n; i++) n 580 fs/nfsd/nfs4acl.c a->n++; n 593 fs/nfsd/nfs4acl.c for (i = 0; i < a->n; i++) n 597 fs/nfsd/nfs4acl.c a->n++; n 609 fs/nfsd/nfs4acl.c for (i=0; i < a->n; i++) n 617 fs/nfsd/nfs4acl.c for (i=0; i < a->n; i++) n 386 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *l, *n; n 391 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) { n 494 fs/nfsd/nfs4layouts.c struct nfs4_layout *lp, *n; n 508 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) { n 536 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls, *n; n 544 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { n 579 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls, *n; n 583 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) n 593 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls, *n; n 597 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) { n 781 fs/nfsd/nfs4layouts.c struct nfsd4_deviceid_map *map, *n; n 783 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash) n 872 fs/nfsd/nfs4state.c long n; n 875 fs/nfsd/nfs4state.c n = atomic_long_inc_return(&num_delegations); n 876 fs/nfsd/nfs4state.c if (n < 0 || n > max_delegations) n 83 fs/nfsd/nfsd.h int nfsd_get_nrthreads(int n, int *, struct net *); n 84 fs/nfsd/nfsd.h int nfsd_set_nrthreads(int n, int *, struct net *); n 298 fs/nfsd/nfsd.h #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) n 641 fs/nfsd/nfssvc.c int nfsd_get_nrthreads(int n, int *nthreads, struct net *net) n 647 fs/nfsd/nfssvc.c for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++) n 666 fs/nfsd/nfssvc.c int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) n 675 fs/nfsd/nfssvc.c if (nn->nfsd_serv == NULL || n <= 0) n 678 fs/nfsd/nfssvc.c if (n > nn->nfsd_serv->sv_nrpools) n 679 fs/nfsd/nfssvc.c n = nn->nfsd_serv->sv_nrpools; n 683 fs/nfsd/nfssvc.c for (i = 0; i < n; i++) { n 689 fs/nfsd/nfssvc.c for (i = 0; i < n && tot > 0; i++) { n 694 fs/nfsd/nfssvc.c for (i = 0; i < n && tot > 0; i++) { n 709 fs/nfsd/nfssvc.c for (i = 0; i < n; i++) { n 44 fs/nfsd/xdr4.h #define XDR_LEN(n) (((n) + 3) & ~3) n 149 fs/nilfs2/alloc.c spinlock_t *lock, u32 n) n 154 fs/nilfs2/alloc.c le32_add_cpu(&desc->pg_nfrees, n); n 186 fs/nilfs2/alloc.c unsigned long n = nilfs_palloc_groups_per_desc_block(inode); n 190 fs/nilfs2/alloc.c while (n-- > 0) { n 507 fs/nilfs2/alloc.c unsigned long n, entries_per_group; n 517 fs/nilfs2/alloc.c for (i = 0; i < ngroups; i += n) { n 530 fs/nilfs2/alloc.c n = nilfs_palloc_rest_groups_in_desc_block(inode, group, n 532 fs/nilfs2/alloc.c for (j = 0; j < n; j++, desc++, group++) { n 741 fs/nilfs2/alloc.c int nempties = 0, n = 0; n 771 fs/nilfs2/alloc.c n++; n 820 fs/nilfs2/alloc.c nfree = nilfs_palloc_group_desc_add_entries(desc, lock, n); n 102 fs/nilfs2/bmap.c int ret, n; n 107 fs/nilfs2/bmap.c n = bmap->b_ops->bop_gather_data( n 109 fs/nilfs2/bmap.c if (n < 0) n 110 fs/nilfs2/bmap.c return n; n 112 fs/nilfs2/bmap.c bmap, key, ptr, keys, ptrs, n); n 157 fs/nilfs2/bmap.c int ret, n; n 162 fs/nilfs2/bmap.c n = bmap->b_ops->bop_gather_data( n 164 fs/nilfs2/bmap.c if (n < 0) n 165 fs/nilfs2/bmap.c return n; n 167 fs/nilfs2/bmap.c bmap, key, keys, ptrs, n); n 184 fs/nilfs2/btree.c int n, int lncmax, int rncmax) n 198 fs/nilfs2/btree.c memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys)); n 199 fs/nilfs2/btree.c memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs)); n 200 fs/nilfs2/btree.c memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys)); n 201 fs/nilfs2/btree.c memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs)); n 203 fs/nilfs2/btree.c lnchildren += n; n 204 fs/nilfs2/btree.c rnchildren -= n; n 212 fs/nilfs2/btree.c int n, int lncmax, int rncmax) n 226 fs/nilfs2/btree.c memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys)); n 227 fs/nilfs2/btree.c memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs)); n 228 fs/nilfs2/btree.c memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys)); n 229 fs/nilfs2/btree.c memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs)); n 231 fs/nilfs2/btree.c lnchildren -= n; n 232 fs/nilfs2/btree.c rnchildren += n; n 487 fs/nilfs2/btree.c int i, n; n 491 fs/nilfs2/btree.c for (n = ra->max_ra_blocks, i = ra->index + 1; n 492 fs/nilfs2/btree.c n > 0 && i < ra->ncmax; n--, i++) { n 830 fs/nilfs2/btree.c int nchildren, lnchildren, n, move, ncblk; n 839 fs/nilfs2/btree.c n = (nchildren + lnchildren + 1) / 2 - lnchildren; n 840 fs/nilfs2/btree.c if (n > path[level].bp_index) { n 842 fs/nilfs2/btree.c n--; n 846 fs/nilfs2/btree.c nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); n 865 fs/nilfs2/btree.c path[level].bp_index -= n; n 876 fs/nilfs2/btree.c int nchildren, rnchildren, n, move, ncblk; n 885 fs/nilfs2/btree.c n = (nchildren + rnchildren + 1) / 2 - rnchildren; n 886 fs/nilfs2/btree.c if (n > nchildren - path[level].bp_index) { n 888 fs/nilfs2/btree.c n--; n 892 fs/nilfs2/btree.c nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); n 923 fs/nilfs2/btree.c int nchildren, n, move, ncblk; n 931 fs/nilfs2/btree.c n = (nchildren + 1) / 2; n 932 fs/nilfs2/btree.c if (n > nchildren - path[level].bp_index) { n 933 fs/nilfs2/btree.c n--; n 937 fs/nilfs2/btree.c nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); n 973 fs/nilfs2/btree.c int n, ncblk; n 979 fs/nilfs2/btree.c n = nilfs_btree_node_get_nchildren(root); n 981 fs/nilfs2/btree.c nilfs_btree_node_move_right(root, child, n, n 1278 fs/nilfs2/btree.c int nchildren, lnchildren, n, ncblk; n 1288 fs/nilfs2/btree.c n = (nchildren + lnchildren) / 2 - nchildren; n 1290 fs/nilfs2/btree.c nilfs_btree_node_move_right(left, node, n, ncblk, ncblk); n 1302 fs/nilfs2/btree.c path[level].bp_index += n; n 1310 fs/nilfs2/btree.c int nchildren, rnchildren, n, ncblk; n 1320 fs/nilfs2/btree.c n = (nchildren + rnchildren) / 2 - nchildren; n 1322 fs/nilfs2/btree.c nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); n 1343 fs/nilfs2/btree.c int n, ncblk; n 1351 fs/nilfs2/btree.c n = nilfs_btree_node_get_nchildren(node); n 1353 fs/nilfs2/btree.c nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); n 1369 fs/nilfs2/btree.c int n, ncblk; n 1377 fs/nilfs2/btree.c n = nilfs_btree_node_get_nchildren(right); n 1379 fs/nilfs2/btree.c nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); n 1394 fs/nilfs2/btree.c int n, ncblk; n 1405 fs/nilfs2/btree.c n = nilfs_btree_node_get_nchildren(child); n 1406 fs/nilfs2/btree.c nilfs_btree_node_move_left(root, child, n, n 1782 fs/nilfs2/btree.c int n, n 1809 fs/nilfs2/btree.c nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs); n 1810 fs/nilfs2/btree.c nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); n 1829 fs/nilfs2/btree.c nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n, n 1832 fs/nilfs2/btree.c nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, n 1853 fs/nilfs2/btree.c const __u64 *keys, const __u64 *ptrs, int n) n 1860 fs/nilfs2/btree.c if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) { n 1863 fs/nilfs2/btree.c } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX( n 1877 fs/nilfs2/btree.c nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n, n 72 fs/nilfs2/cpfile.c unsigned int n) n 77 fs/nilfs2/cpfile.c count = le32_to_cpu(cp->cp_checkpoints_count) + n; n 86 fs/nilfs2/cpfile.c unsigned int n) n 91 fs/nilfs2/cpfile.c WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); n 92 fs/nilfs2/cpfile.c count = le32_to_cpu(cp->cp_checkpoints_count) - n; n 120 fs/nilfs2/cpfile.c int n = nilfs_cpfile_checkpoints_per_block(cpfile); n 122 fs/nilfs2/cpfile.c while (n-- > 0) { n 433 fs/nilfs2/cpfile.c int n, ret; n 440 fs/nilfs2/cpfile.c for (n = 0; n < nci; cno += ncps) { n 452 fs/nilfs2/cpfile.c for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { n 457 fs/nilfs2/cpfile.c n++; n 464 fs/nilfs2/cpfile.c ret = n; n 465 fs/nilfs2/cpfile.c if (n > 0) { n 486 fs/nilfs2/cpfile.c int n = 0, ret; n 516 fs/nilfs2/cpfile.c while (n < nci) { n 524 fs/nilfs2/cpfile.c n++; n 547 fs/nilfs2/cpfile.c ret = n; n 428 fs/nilfs2/dat.c int i, j, n, ret; n 430 fs/nilfs2/dat.c for (i = 0; i < nvi; i += n) { n 441 fs/nilfs2/dat.c for (j = i, n = 0; n 444 fs/nilfs2/dat.c j++, n++, vinfo = (void *)vinfo + visz) { n 189 fs/nilfs2/dir.c static struct page *nilfs_get_page(struct inode *dir, unsigned long n) n 192 fs/nilfs2/dir.c struct page *page = read_mapping_page(mapping, n, NULL); n 269 fs/nilfs2/dir.c unsigned long n = pos >> PAGE_SHIFT; n 275 fs/nilfs2/dir.c for ( ; n < npages; n++, offset = 0) { n 278 fs/nilfs2/dir.c struct page *page = nilfs_get_page(inode, n); n 287 fs/nilfs2/dir.c limit = kaddr + nilfs_last_byte(inode, n) - n 331 fs/nilfs2/dir.c unsigned long start, n; n 346 fs/nilfs2/dir.c n = start; n 350 fs/nilfs2/dir.c page = nilfs_get_page(dir, n); n 354 fs/nilfs2/dir.c kaddr += nilfs_last_byte(dir, n) - reclen; n 368 fs/nilfs2/dir.c if (++n >= npages) n 369 fs/nilfs2/dir.c n = 0; n 371 fs/nilfs2/dir.c if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { n 378 fs/nilfs2/dir.c } while (n != start); n 384 fs/nilfs2/dir.c ei->i_dir_start_lookup = n; n 449 fs/nilfs2/dir.c unsigned long n; n 459 fs/nilfs2/dir.c for (n = 0; n <= npages; n++) { n 462 fs/nilfs2/dir.c page = nilfs_get_page(dir, n); n 468 fs/nilfs2/dir.c dir_end = kaddr + nilfs_last_byte(dir, n); n 207 fs/nilfs2/direct.c int n; n 211 fs/nilfs2/direct.c n = 0; n 215 fs/nilfs2/direct.c keys[n] = key; n 216 fs/nilfs2/direct.c ptrs[n] = ptr; n 217 fs/nilfs2/direct.c n++; n 220 fs/nilfs2/direct.c return n; n 224 fs/nilfs2/direct.c __u64 key, __u64 *keys, __u64 *ptrs, int n) n 243 fs/nilfs2/direct.c if ((j < n) && (i == keys[j])) { n 41 fs/nilfs2/inode.c void nilfs_inode_add_blocks(struct inode *inode, int n) n 45 fs/nilfs2/inode.c inode_add_bytes(inode, i_blocksize(inode) * n); n 47 fs/nilfs2/inode.c atomic64_add(n, &root->blocks_count); n 50 fs/nilfs2/inode.c void nilfs_inode_sub_blocks(struct inode *inode, int n) n 54 fs/nilfs2/inode.c inode_sub_bytes(inode, i_blocksize(inode) * n); n 56 fs/nilfs2/inode.c atomic64_sub(n, &root->blocks_count); n 1006 fs/nilfs2/inode.c int ret, n; n 1059 fs/nilfs2/inode.c n = nilfs_bmap_lookup_contig( n 1063 fs/nilfs2/inode.c if (n < 0) { n 1066 fs/nilfs2/inode.c if (unlikely(n != -ENOENT)) n 1091 fs/nilfs2/inode.c size += n << blkbits; n 1104 fs/nilfs2/inode.c size = n << blkbits; n 1111 fs/nilfs2/inode.c size = n << blkbits; n 1113 fs/nilfs2/inode.c blkoff += n; n 54 fs/nilfs2/ioctl.c size_t maxmembs, total, n; n 81 fs/nilfs2/ioctl.c for (i = 0; i < argv->v_nmembs; i += n) { n 82 fs/nilfs2/ioctl.c n = (argv->v_nmembs - i < maxmembs) ? n 86 fs/nilfs2/ioctl.c argv->v_size * n)) { n 92 fs/nilfs2/ioctl.c n); n 104 fs/nilfs2/ioctl.c if ((size_t)nr < n) n 107 fs/nilfs2/ioctl.c pos += n; n 618 fs/nilfs2/ioctl.c struct buffer_head *bh, *n; n 657 fs/nilfs2/ioctl.c list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { n 669 fs/nilfs2/ioctl.c list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) { n 874 fs/nilfs2/ioctl.c int n, ret; n 907 fs/nilfs2/ioctl.c for (n = 0; n < 4; n++) { n 909 fs/nilfs2/ioctl.c if (argv[n].v_size != argsz[n]) n 912 fs/nilfs2/ioctl.c if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment) n 915 fs/nilfs2/ioctl.c if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size) n 918 fs/nilfs2/ioctl.c len = argv[n].v_size * argv[n].v_nmembs; n 919 fs/nilfs2/ioctl.c base = (void __user *)(unsigned long)argv[n].v_base; n 921 fs/nilfs2/ioctl.c kbufs[n] = NULL; n 925 fs/nilfs2/ioctl.c kbufs[n] = vmalloc(len); n 926 fs/nilfs2/ioctl.c if (!kbufs[n]) { n 930 fs/nilfs2/ioctl.c if (copy_from_user(kbufs[n], base, len)) { n 932 fs/nilfs2/ioctl.c vfree(kbufs[n]); n 963 fs/nilfs2/ioctl.c while (--n >= 0) n 964 fs/nilfs2/ioctl.c vfree(kbufs[n]); n 580 fs/nilfs2/mdt.c int n; n 585 fs/nilfs2/mdt.c n = bh_offset(bh) >> inode->i_blkbits; n 586 fs/nilfs2/mdt.c bh_frozen = nilfs_page_get_nth_block(page, n); n 252 fs/nilfs2/nilfs.h void nilfs_inode_add_blocks(struct inode *inode, int n); n 253 fs/nilfs2/nilfs.h void nilfs_inode_sub_blocks(struct inode *inode, int n); n 299 fs/nilfs2/page.c unsigned int i, n; n 304 fs/nilfs2/page.c n = pagevec_lookup(&pvec, smap, &index); n 305 fs/nilfs2/page.c if (!n) n 420 fs/nilfs2/recovery.c struct nilfs_segment_entry *ent, *n; n 449 fs/nilfs2/recovery.c list_for_each_entry_safe(ent, n, head, list) { n 498 fs/nilfs2/recovery.c struct nilfs_recovery_block *rb, *n; n 504 fs/nilfs2/recovery.c list_for_each_entry_safe(rb, n, head, list) { n 243 fs/nilfs2/segbuf.c struct buffer_head *bh, *n; n 245 fs/nilfs2/segbuf.c list_for_each_entry_safe(bh, n, list, b_assoc_buffers) { n 272 fs/nilfs2/segbuf.c struct nilfs_segment_buffer *n, *segbuf; n 275 fs/nilfs2/segbuf.c list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) { n 771 fs/nilfs2/segment.c struct nilfs_inode_info *ii, *n; n 777 fs/nilfs2/segment.c list_for_each_entry_safe(ii, n, head, i_dirty) { n 1008 fs/nilfs2/segment.c struct buffer_head *bh, *n; n 1012 fs/nilfs2/segment.c list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) { n 1048 fs/nilfs2/segment.c size_t n, rest = nilfs_segctor_buffer_rest(sci); n 1050 fs/nilfs2/segment.c n = nilfs_lookup_dirty_data_buffers( n 1052 fs/nilfs2/segment.c if (n > rest) { n 1096 fs/nilfs2/segment.c size_t n, rest = nilfs_segctor_buffer_rest(sci); n 1099 fs/nilfs2/segment.c n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1, n 1107 fs/nilfs2/segment.c BUG_ON(n > rest); n 1929 fs/nilfs2/segment.c struct nilfs_inode_info *ii, *n; n 1934 fs/nilfs2/segment.c list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) { n 1972 fs/nilfs2/segment.c struct nilfs_inode_info *ii, *n; n 1977 fs/nilfs2/segment.c list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) { n 2194 fs/nilfs2/segment.c struct nilfs_segctor_wait_request *wrq, *n; n 2198 fs/nilfs2/segment.c list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) { n 2406 fs/nilfs2/segment.c struct nilfs_inode_info *ii, *n; n 2408 fs/nilfs2/segment.c list_for_each_entry_safe(ii, n, head, i_dirty) { n 165 fs/nilfs2/sufile.c size_t nerr = 0, n = 0; n 214 fs/nilfs2/sufile.c n = seg - segnumv; n 220 fs/nilfs2/sufile.c *ndone = n; n 652 fs/nilfs2/sufile.c ssize_t n, nc; n 669 fs/nilfs2/sufile.c for (segnum = start; segnum <= end; segnum += n) { n 670 fs/nilfs2/sufile.c n = min_t(unsigned long, n 686 fs/nilfs2/sufile.c for (j = 0; j < n; j++, su = (void *)su + susz) { n 697 fs/nilfs2/sufile.c for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { n 710 fs/nilfs2/sufile.c if (n == segusages_per_block) { n 822 fs/nilfs2/sufile.c ssize_t n; n 831 fs/nilfs2/sufile.c for (i = 0; i < nsegs; i += n, segnum += n) { n 832 fs/nilfs2/sufile.c n = min_t(unsigned long, n 842 fs/nilfs2/sufile.c memset(si, 0, sisz * n); n 843 fs/nilfs2/sufile.c si = (void *)si + sisz * n; n 850 fs/nilfs2/sufile.c for (j = 0; j < n; n 1023 fs/nilfs2/sufile.c size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size; n 1058 fs/nilfs2/sufile.c n = nilfs_sufile_segment_usages_in_block(sufile, segnum, n 1067 fs/nilfs2/sufile.c segnum += n; n 1074 fs/nilfs2/sufile.c for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { n 718 fs/nilfs2/the_nilfs.c struct rb_node *n; n 722 fs/nilfs2/the_nilfs.c n = nilfs->ns_cptree.rb_node; n 723 fs/nilfs2/the_nilfs.c while (n) { n 724 fs/nilfs2/the_nilfs.c root = rb_entry(n, struct nilfs_root, rb_node); n 727 fs/nilfs2/the_nilfs.c n = n->rb_left; n 729 fs/nilfs2/the_nilfs.c n = n->rb_right; n 359 fs/nilfs2/the_nilfs.h static inline int nilfs_segment_is_active(struct the_nilfs *nilfs, __u64 n) n 361 fs/nilfs2/the_nilfs.h return n == nilfs->ns_segnum || n == nilfs->ns_nextnum; n 11054 fs/nls/nls_cp936.c int n; n 11076 fs/nls/nls_cp936.c n = 2; n 11083 fs/nls/nls_cp936.c n = 1; n 11085 fs/nls/nls_cp936.c return n; n 13867 fs/nls/nls_cp949.c int n; n 13881 fs/nls/nls_cp949.c n = 2; n 13884 fs/nls/nls_cp949.c n = 1; n 13889 fs/nls/nls_cp949.c return n; n 13897 fs/nls/nls_cp949.c int n; n 13915 fs/nls/nls_cp949.c n = 2; n 13918 fs/nls/nls_cp949.c n = 1; n 13920 fs/nls/nls_cp949.c return n; n 9403 fs/nls/nls_cp950.c int n; n 9417 fs/nls/nls_cp950.c n = 2; n 9420 fs/nls/nls_cp950.c n = 1; n 9425 fs/nls/nls_cp950.c return n; n 9433 fs/nls/nls_cp950.c int n; n 9451 fs/nls/nls_cp950.c n = 2; n 9454 fs/nls/nls_cp950.c n = 1; n 9456 fs/nls/nls_cp950.c return n; n 353 fs/nls/nls_euc-jp.c int n; n 366 fs/nls/nls_euc-jp.c if ((n = euc2sjisibm_g3upper(sjis, euc_hi, euc_lo))) { n 367 fs/nls/nls_euc-jp.c return n; n 368 fs/nls/nls_euc-jp.c } else if ((n = euc2sjisibm_jisx0212(sjis, euc_hi, euc_lo))) { n 369 fs/nls/nls_euc-jp.c return n; n 412 fs/nls/nls_euc-jp.c int n; n 416 fs/nls/nls_euc-jp.c if ((n = p_nls->uni2char(uni, out, boundlen)) < 0) n 417 fs/nls/nls_euc-jp.c return n; n 420 fs/nls/nls_euc-jp.c if (n == 1) { n 430 fs/nls/nls_euc-jp.c } else if (n == 2) { n 444 fs/nls/nls_euc-jp.c n = 3; ch = out[0]; cl = out[1]; n 451 fs/nls/nls_euc-jp.c n = sjisibm2euc(euc, out[0], out[1]); n 452 fs/nls/nls_euc-jp.c if (boundlen < n) n 454 fs/nls/nls_euc-jp.c for (i = 0; i < n; i++) n 476 fs/nls/nls_euc-jp.c return n; n 483 fs/nls/nls_euc-jp.c int euc_offset, n; n 546 fs/nls/nls_euc-jp.c if ( (n = p_nls->char2uni(sjis_temp, sizeof(sjis_temp), uni)) < 0) n 547 fs/nls/nls_euc-jp.c return n; n 42 fs/nls/nls_koi8-ru.c int n; n 50 fs/nls/nls_koi8-ru.c n = p_nls->char2uni(rawstring, boundlen, uni); n 51 fs/nls/nls_koi8-ru.c return n; n 16 fs/nls/nls_utf8.c int n; n 21 fs/nls/nls_utf8.c n = utf32_to_utf8(uni, out, boundlen); n 22 fs/nls/nls_utf8.c if (n < 0) { n 26 fs/nls/nls_utf8.c return n; n 31 fs/nls/nls_utf8.c int n; n 34 fs/nls/nls_utf8.c n = utf8_to_utf32(rawstring, boundlen, &u); n 35 fs/nls/nls_utf8.c if (n < 0 || u > MAX_WCHAR_T) { n 40 fs/nls/nls_utf8.c return n; n 102 fs/ntfs/ntfs.h extern int ntfs_ucsncmp(const ntfschar *s1, const ntfschar *s2, size_t n); n 103 fs/ntfs/ntfs.h extern int ntfs_ucsncasecmp(const ntfschar *s1, const ntfschar *s2, size_t n, n 1070 fs/ntfs/runlist.c static inline int ntfs_get_nr_significant_bytes(const s64 n) n 1072 fs/ntfs/runlist.c s64 l = n; n 1081 fs/ntfs/runlist.c j = (n >> 8 * (i - 1)) & 0xff; n 1083 fs/ntfs/runlist.c if ((n < 0 && j >= 0) || (n > 0 && j < 0)) n 1239 fs/ntfs/runlist.c const s64 n) n 1241 fs/ntfs/runlist.c s64 l = n; n 1253 fs/ntfs/runlist.c j = (n >> 8 * (i - 1)) & 0xff; n 1255 fs/ntfs/runlist.c if (n < 0 && j >= 0) { n 1260 fs/ntfs/runlist.c } else if (n > 0 && j < 0) { n 135 fs/ntfs/unistr.c int ntfs_ucsncmp(const ntfschar *s1, const ntfschar *s2, size_t n) n 140 fs/ntfs/unistr.c for (i = 0; i < n; ++i) { n 171 fs/ntfs/unistr.c int ntfs_ucsncasecmp(const ntfschar *s1, const ntfschar *s2, size_t n, n 177 fs/ntfs/unistr.c for (i = 0; i < n; ++i) { n 37 fs/ocfs2/acl.c int n, count; n 50 fs/ocfs2/acl.c for (n = 0; n < count; n++) { n 54 fs/ocfs2/acl.c acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag); n 55 fs/ocfs2/acl.c acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm); n 56 fs/ocfs2/acl.c switch(acl->a_entries[n].e_tag) { n 58 fs/ocfs2/acl.c acl->a_entries[n].e_uid = n 63 fs/ocfs2/acl.c acl->a_entries[n].e_gid = n 83 fs/ocfs2/acl.c size_t n; n 92 fs/ocfs2/acl.c for (n = 0; n < acl->a_count; n++, entry++) { n 93 fs/ocfs2/acl.c entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); n 94 fs/ocfs2/acl.c entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); n 95 fs/ocfs2/acl.c switch(acl->a_entries[n].e_tag) { n 99 fs/ocfs2/acl.c acl->a_entries[n].e_uid)); n 104 fs/ocfs2/acl.c acl->a_entries[n].e_gid)); n 872 fs/ocfs2/cluster/tcp.c struct o2net_msg_handler *nmh, *n; n 875 fs/ocfs2/cluster/tcp.c list_for_each_entry_safe(nmh, n, list, nh_unregister_item) { n 214 fs/ocfs2/dir.c int n = 16; n 220 fs/ocfs2/dir.c } while (--n); n 407 fs/ocfs2/dlm/dlmdomain.c int i, num, n, ret = 0; n 419 fs/ocfs2/dlm/dlmdomain.c n = 0; n 423 fs/ocfs2/dlm/dlmdomain.c n++; n 446 fs/ocfs2/dlm/dlmdomain.c num += n; n 694 fs/ocfs2/dlm/dlmthread.c int n = DLM_THREAD_MAX_DIRTY; n 790 fs/ocfs2/dlm/dlmthread.c if (!--n) { n 801 fs/ocfs2/dlm/dlmthread.c if (!n) { n 104 fs/ocfs2/extent_map.c struct ocfs2_extent_map_item *emi, *n; n 111 fs/ocfs2/extent_map.c list_for_each_entry_safe(emi, n, &em->em_list, ei_list) { n 128 fs/ocfs2/extent_map.c list_for_each_entry_safe(emi, n, &tmp_list, ei_list) { n 1224 fs/ocfs2/journal.c struct ocfs2_la_recovery_item *item, *n; n 1236 fs/ocfs2/journal.c list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { n 581 fs/ocfs2/ocfs2.h static inline void ocfs2_add_links_count(struct ocfs2_dinode *di, int n) n 585 fs/ocfs2/ocfs2.h links += n; n 196 fs/ocfs2/refcounttree.c struct rb_node *n = osb->osb_rf_lock_tree.rb_node; n 199 fs/ocfs2/refcounttree.c while (n) { n 200 fs/ocfs2/refcounttree.c tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); n 203 fs/ocfs2/refcounttree.c n = n->rb_left; n 205 fs/ocfs2/refcounttree.c n = n->rb_right; n 212 fs/ocfs2/uptodate.c struct rb_node * n = ci->ci_cache.ci_tree.rb_node; n 215 fs/ocfs2/uptodate.c while (n) { n 216 fs/ocfs2/uptodate.c item = rb_entry(n, struct ocfs2_meta_cache_item, c_node); n 219 fs/ocfs2/uptodate.c n = n->rb_left; n 221 fs/ocfs2/uptodate.c n = n->rb_right; n 78 fs/openpromfs/inode.c int n = strlen(pval); n 83 fs/openpromfs/inode.c pval += n + 1; n 84 fs/openpromfs/inode.c len -= n + 1; n 205 fs/openpromfs/inode.c int n = strlen(node_name); n 207 fs/openpromfs/inode.c if (len == n && n 219 fs/openpromfs/inode.c int n = strlen(prop->name); n 221 fs/openpromfs/inode.c if (len == n && !strncmp(prop->name, name, len)) { n 87 fs/orangefs/orangefs-bufmap.c long n = left, t; n 98 fs/orangefs/orangefs-bufmap.c if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ) n 99 fs/orangefs/orangefs-bufmap.c n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ; n 102 fs/orangefs/orangefs-bufmap.c t = schedule_timeout(n); n 104 fs/orangefs/orangefs-bufmap.c if (unlikely(!t) && n != left && m->c < 0) n 107 fs/orangefs/orangefs-bufmap.c left = t + (left - n); n 502 fs/orangefs/orangefs-bufmap.c size_t n = size; n 503 fs/orangefs/orangefs-bufmap.c if (n > PAGE_SIZE) n 504 fs/orangefs/orangefs-bufmap.c n = PAGE_SIZE; n 505 fs/orangefs/orangefs-bufmap.c if (copy_page_from_iter(page, 0, n, iter) != n) n 507 fs/orangefs/orangefs-bufmap.c size -= n; n 531 fs/orangefs/orangefs-bufmap.c size_t n = size; n 532 fs/orangefs/orangefs-bufmap.c if (n > PAGE_SIZE) n 533 fs/orangefs/orangefs-bufmap.c n = PAGE_SIZE; n 534 fs/orangefs/orangefs-bufmap.c n = copy_page_to_iter(page, 0, n, iter); n 535 fs/orangefs/orangefs-bufmap.c if (!n) n 537 fs/orangefs/orangefs-bufmap.c size -= n; n 323 fs/orangefs/waitqueue.c long n; n 334 fs/orangefs/waitqueue.c n = wait_for_completion_io_timeout(&op->waitq, timeout); n 336 fs/orangefs/waitqueue.c n = wait_for_completion_interruptible_timeout(&op->waitq, n 339 fs/orangefs/waitqueue.c n = wait_for_completion_killable_timeout(&op->waitq, timeout); n 346 fs/orangefs/waitqueue.c if (unlikely(n < 0)) { n 608 fs/overlayfs/namei.c char *n, *s; n 610 fs/overlayfs/namei.c n = kcalloc(fh->len, 2, GFP_KERNEL); n 611 fs/overlayfs/namei.c if (!n) n 614 fs/overlayfs/namei.c s = bin2hex(n, fh, fh->len); n 615 fs/overlayfs/namei.c *name = (struct qstr) QSTR_INIT(n, s - n); n 61 fs/overlayfs/readdir.c static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) n 63 fs/overlayfs/readdir.c return rb_entry(n, struct ovl_cache_entry, node); n 220 fs/overlayfs/readdir.c struct ovl_cache_entry *n; n 222 fs/overlayfs/readdir.c list_for_each_entry_safe(p, n, list, l_node) n 556 fs/overlayfs/readdir.c struct ovl_cache_entry *p, *n; n 571 fs/overlayfs/readdir.c list_for_each_entry_safe(p, n, list, l_node) { n 921 fs/overlayfs/readdir.c struct ovl_cache_entry *p, *n; n 933 fs/overlayfs/readdir.c list_for_each_entry_safe(p, n, list, l_node) { n 238 fs/pnode.c struct mount *n, *p; n 240 fs/pnode.c for (n = m; ; n = p) { n 241 fs/pnode.c p = n->mnt_master; n 250 fs/pnode.c if (done && peers(n, parent)) n 291 fs/pnode.c struct mount *m, *n; n 307 fs/pnode.c for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { n 308 fs/pnode.c ret = propagate_one(n); n 317 fs/pnode.c n = m; n 319 fs/pnode.c ret = propagate_one(n); n 322 fs/pnode.c n = next_peer(n); n 323 fs/pnode.c } while (n != m); n 327 fs/pnode.c hlist_for_each_entry(n, tree_list, mnt_hash) { n 328 fs/pnode.c m = n->mnt_parent; n 799 fs/posix_acl.c int real_size, n; n 810 fs/posix_acl.c for (n=0; n < acl->a_count; n++, ext_entry++) { n 811 fs/posix_acl.c const struct posix_acl_entry *acl_e = &acl->a_entries[n]; n 156 fs/proc/base.c unsigned int n) n 162 fs/proc/base.c for (i = 0; i < n; ++i) { n 1372 fs/proc/base.c unsigned int n; n 1374 fs/proc/base.c err = kstrtouint_from_user(buf, count, 0, &n); n 1381 fs/proc/base.c task->fail_nth = n; n 203 fs/proc/task_nommu.c loff_t n = *pos; n 221 fs/proc/task_nommu.c if (n-- == 0) n 8 fs/proc/util.c unsigned n = 0; n 16 fs/proc/util.c if (n >= (~0U-9)/10) n 18 fs/proc/util.c n *= 10; n 19 fs/proc/util.c n += c; n 21 fs/proc/util.c return n; n 27 fs/qnx6/dir.c static struct page *qnx6_get_page(struct inode *dir, unsigned long n) n 30 fs/qnx6/dir.c struct page *page = read_mapping_page(mapping, n, NULL); n 51 fs/qnx6/dir.c u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */ n 55 fs/qnx6/dir.c struct page *page = read_mapping_page(mapping, n, NULL); n 119 fs/qnx6/dir.c unsigned long n = pos >> PAGE_SHIFT; n 127 fs/qnx6/dir.c for ( ; !done && n < npages; n++, start = 0) { n 128 fs/qnx6/dir.c struct page *page = qnx6_get_page(inode, n); n 129 fs/qnx6/dir.c int limit = last_entry(inode, n); n 135 fs/qnx6/dir.c ctx->pos = (n + 1) << PAGE_SHIFT; n 219 fs/qnx6/dir.c unsigned long start, n; n 232 fs/qnx6/dir.c n = start; n 235 fs/qnx6/dir.c page = qnx6_get_page(dir, n); n 237 fs/qnx6/dir.c int limit = last_entry(dir, n); n 262 fs/qnx6/dir.c if (++n >= npages) n 263 fs/qnx6/dir.c n = 0; n 264 fs/qnx6/dir.c } while (n != start); n 269 fs/qnx6/dir.c ei->i_dir_start_lookup = n; n 530 fs/qnx6/inode.c u32 n, offs; n 548 fs/qnx6/inode.c n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS); n 551 fs/qnx6/inode.c page = read_mapping_page(mapping, n, NULL); n 78 fs/qnx6/qnx6.h static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n) n 81 fs/qnx6/qnx6.h return le64_to_cpu((__force __le64)n); n 83 fs/qnx6/qnx6.h return be64_to_cpu((__force __be64)n); n 86 fs/qnx6/qnx6.h static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n) n 89 fs/qnx6/qnx6.h return (__force __fs64)cpu_to_le64(n); n 91 fs/qnx6/qnx6.h return (__force __fs64)cpu_to_be64(n); n 94 fs/qnx6/qnx6.h static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n) n 97 fs/qnx6/qnx6.h return le32_to_cpu((__force __le32)n); n 99 fs/qnx6/qnx6.h return be32_to_cpu((__force __be32)n); n 102 fs/qnx6/qnx6.h static inline __fs32 cpu_to_fs32(struct qnx6_sb_info *sbi, __u32 n) n 105 fs/qnx6/qnx6.h return (__force __fs32)cpu_to_le32(n); n 107 fs/qnx6/qnx6.h return (__force __fs32)cpu_to_be32(n); n 110 fs/qnx6/qnx6.h static inline __u16 fs16_to_cpu(struct qnx6_sb_info *sbi, __fs16 n) n 113 fs/qnx6/qnx6.h return le16_to_cpu((__force __le16)n); n 115 fs/qnx6/qnx6.h return be16_to_cpu((__force __be16)n); n 118 fs/qnx6/qnx6.h static inline __fs16 cpu_to_fs16(struct qnx6_sb_info *sbi, __u16 n) n 121 fs/qnx6/qnx6.h return (__force __fs16)cpu_to_le16(n); n 123 fs/qnx6/qnx6.h return (__force __fs16)cpu_to_be16(n); n 149 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tbS0); n 165 fs/reiserfs/do_balan.c leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1, n 178 fs/reiserfs/do_balan.c leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL); n 194 fs/reiserfs/do_balan.c leaf_shift_left(tb, n, -1); n 206 fs/reiserfs/do_balan.c RFALSE((tb->lnum[0] + tb->rnum[0] < n) || n 207 fs/reiserfs/do_balan.c (tb->lnum[0] + tb->rnum[0] > n + 1), n 210 fs/reiserfs/do_balan.c tb->rnum[0], tb->lnum[0], n); n 211 fs/reiserfs/do_balan.c RFALSE((tb->lnum[0] + tb->rnum[0] == n) && n 216 fs/reiserfs/do_balan.c RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) && n 243 fs/reiserfs/do_balan.c int n; n 267 fs/reiserfs/do_balan.c n = B_NR_ITEMS(tbS0); n 276 fs/reiserfs/do_balan.c leaf_shift_right(tb, n, -1); n 292 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tb->L[0]); n 313 fs/reiserfs/do_balan.c leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, n 343 fs/reiserfs/do_balan.c leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, n 355 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tb->L[0]); n 380 fs/reiserfs/do_balan.c leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, n 395 fs/reiserfs/do_balan.c leaf_paste_entries(&bi, n + tb->item_pos - ret, n 418 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tb->L[0]); n 461 fs/reiserfs/do_balan.c tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret); n 465 fs/reiserfs/do_balan.c leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, n 478 fs/reiserfs/do_balan.c leaf_key(tb->L[0], n + tb->item_pos - ret)), n 537 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tb->L[0]); n 549 fs/reiserfs/do_balan.c pasted = item_head(tb->L[0], n - 1); n 564 fs/reiserfs/do_balan.c leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item, n 568 fs/reiserfs/do_balan.c pasted = item_head(tb->L[0], n + tb->item_pos - ret); n 570 fs/reiserfs/do_balan.c leaf_paste_entries(&bi, n + tb->item_pos - ret, n 627 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tbS0); n 631 fs/reiserfs/do_balan.c if (n - tb->rnum[0] >= tb->item_pos) { n 639 fs/reiserfs/do_balan.c if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { n 696 fs/reiserfs/do_balan.c leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1, n 699 fs/reiserfs/do_balan.c if (tb->item_pos - n + tb->rnum[0] - 1 == 0) n 841 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tbS0); n 851 fs/reiserfs/do_balan.c leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0], n 857 fs/reiserfs/do_balan.c pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]); n 859 fs/reiserfs/do_balan.c leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0], n 866 fs/reiserfs/do_balan.c RFALSE(tb->item_pos - n + tb->rnum[0], n 885 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tbS0); n 888 fs/reiserfs/do_balan.c if (n - tb->rnum[0] > tb->item_pos) { n 895 fs/reiserfs/do_balan.c if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1) n 927 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tbS0); n 932 fs/reiserfs/do_balan.c if (n - tb->snum[i] >= tb->item_pos) { n 941 fs/reiserfs/do_balan.c if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) { n 1001 fs/reiserfs/do_balan.c leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1, n 1146 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tbS0); n 1171 fs/reiserfs/do_balan.c leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i], n 1175 fs/reiserfs/do_balan.c pasted = item_head(tb->S_new[i], tb->item_pos - n + n 1178 fs/reiserfs/do_balan.c leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i], n 1198 fs/reiserfs/do_balan.c int n = B_NR_ITEMS(tbS0); n 1201 fs/reiserfs/do_balan.c if (n - tb->snum[i] > tb->item_pos) { n 1209 fs/reiserfs/do_balan.c if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1) n 1787 fs/reiserfs/fix_node.c int n; n 1791 fs/reiserfs/fix_node.c ((n = n 1794 fs/reiserfs/fix_node.c 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; n 1795 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / n 1797 fs/reiserfs/fix_node.c set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, n 1804 fs/reiserfs/fix_node.c int n; n 1808 fs/reiserfs/fix_node.c ((n = n 1811 fs/reiserfs/fix_node.c B_NR_ITEMS(Fh)) ? 0 : n + 1; n 1812 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / n 1814 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, n 1849 fs/reiserfs/fix_node.c int n; n 1853 fs/reiserfs/fix_node.c ((n = n 1856 fs/reiserfs/fix_node.c 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; n 1857 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE + n 1859 fs/reiserfs/fix_node.c set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1); n 1865 fs/reiserfs/fix_node.c int n; n 1869 fs/reiserfs/fix_node.c ((n = n 1871 fs/reiserfs/fix_node.c h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1); n 1872 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE + n 1874 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1); n 27 fs/reiserfs/hashes.c int n = rounds; \ n 38 fs/reiserfs/hashes.c } while(--n); \ n 277 fs/reiserfs/ibalance.c static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n) n 287 fs/reiserfs/ibalance.c internal_delete_pointers_items(cur_bi, from, i_from, n); n 636 fs/reiserfs/ibalance.c int n; n 653 fs/reiserfs/ibalance.c n = B_NR_ITEMS(tbSh); n 660 fs/reiserfs/ibalance.c RFALSE(n n 663 fs/reiserfs/ibalance.c "buffer must have only 0 keys (%d)", n); n 703 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1); n 715 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1); n 742 fs/reiserfs/ibalance.c RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1, n 744 fs/reiserfs/ibalance.c h, tb->lnum[h], h, tb->rnum[h], n); n 819 fs/reiserfs/ibalance.c int insert_num, n, k; n 860 fs/reiserfs/ibalance.c n = B_NR_ITEMS(tb->L[h]); /* number of items in L[h] */ n 877 fs/reiserfs/ibalance.c n + child_pos + 1, n 898 fs/reiserfs/ibalance.c n + child_pos + 1, k, n 929 fs/reiserfs/ibalance.c n = B_NR_ITEMS(tbSh); /* number of items in S[h] */ n 930 fs/reiserfs/ibalance.c if (n - tb->rnum[h] >= child_pos) n 934 fs/reiserfs/ibalance.c else if (n + insert_num - tb->rnum[h] < child_pos) { n 946 fs/reiserfs/ibalance.c child_pos - n - insert_num + n 955 fs/reiserfs/ibalance.c internal_shift1_right(tb, h, n - child_pos + 1); n 957 fs/reiserfs/ibalance.c k = tb->rnum[h] - n + child_pos - 1; n 1059 fs/reiserfs/ibalance.c n = B_NR_ITEMS(tbSh); /* number of items in S[h] */ n 1060 fs/reiserfs/ibalance.c snum = (insert_num + n + 1) / 2; n 1061 fs/reiserfs/ibalance.c if (n - snum >= child_pos) { n 1065 fs/reiserfs/ibalance.c memcpy(&new_insert_key, internal_key(tbSh, n - snum), n 1070 fs/reiserfs/ibalance.c } else if (n + insert_num - snum < child_pos) { n 1078 fs/reiserfs/ibalance.c internal_key(tbSh, n + insert_num - snum), n 1091 fs/reiserfs/ibalance.c child_pos - n - insert_num + n 1104 fs/reiserfs/ibalance.c n - child_pos + 1, 1); n 1106 fs/reiserfs/ibalance.c k = snum - n + child_pos - 1; n 1143 fs/reiserfs/ibalance.c n = B_NR_ITEMS(tbSh); /*number of items in S[h] */ n 1145 fs/reiserfs/ibalance.c if (0 <= child_pos && child_pos <= n && insert_num > 0) { n 887 fs/reiserfs/reiserfs.h #define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u)) n 47 fs/reiserfs/stree.c __u32 n; n 48 fs/reiserfs/stree.c n = le32_to_cpu(le_key->k_dir_id); n 49 fs/reiserfs/stree.c if (n < cpu_key->on_disk_key.k_dir_id) n 51 fs/reiserfs/stree.c if (n > cpu_key->on_disk_key.k_dir_id) n 53 fs/reiserfs/stree.c n = le32_to_cpu(le_key->k_objectid); n 54 fs/reiserfs/stree.c if (n < cpu_key->on_disk_key.k_objectid) n 56 fs/reiserfs/stree.c if (n > cpu_key->on_disk_key.k_objectid) n 433 fs/reiserfs/xattr.c static struct page *reiserfs_get_page(struct inode *dir, size_t n) n 442 fs/reiserfs/xattr.c page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL); n 68 fs/reiserfs/xattr_acl.c int n, count; n 87 fs/reiserfs/xattr_acl.c for (n = 0; n < count; n++) { n 91 fs/reiserfs/xattr_acl.c acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag); n 92 fs/reiserfs/xattr_acl.c acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm); n 93 fs/reiserfs/xattr_acl.c switch (acl->a_entries[n].e_tag) { n 106 fs/reiserfs/xattr_acl.c acl->a_entries[n].e_uid = n 114 fs/reiserfs/xattr_acl.c acl->a_entries[n].e_gid = n 139 fs/reiserfs/xattr_acl.c int n; n 150 fs/reiserfs/xattr_acl.c for (n = 0; n < acl->a_count; n++) { n 151 fs/reiserfs/xattr_acl.c const struct posix_acl_entry *acl_e = &acl->a_entries[n]; n 153 fs/reiserfs/xattr_acl.c entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); n 154 fs/reiserfs/xattr_acl.c entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); n 155 fs/reiserfs/xattr_acl.c switch (acl->a_entries[n].e_tag) { n 39 fs/romfs/storage.c ssize_t n = 0; n 53 fs/romfs/storage.c return n + (p - buf); n 56 fs/romfs/storage.c n += len; n 59 fs/romfs/storage.c return n; n 135 fs/romfs/storage.c ssize_t n = 0; n 150 fs/romfs/storage.c return n + (p - buf); n 153 fs/romfs/storage.c n += segment; n 156 fs/romfs/storage.c return n; n 413 fs/select.c #define FDS_IN(fds, n) (fds->in + n) n 414 fs/select.c #define FDS_OUT(fds, n) (fds->out + n) n 415 fs/select.c #define FDS_EX(fds, n) (fds->ex + n) n 417 fs/select.c #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) n 419 fs/select.c static int max_select_fd(unsigned long n, fd_set_bits *fds) n 427 fs/select.c set = ~(~0UL << (n & (BITS_PER_LONG-1))); n 428 fs/select.c n /= BITS_PER_LONG; n 430 fs/select.c open_fds = fdt->open_fds + n; n 433 fs/select.c set &= BITS(fds, n); n 440 fs/select.c while (n) { n 442 fs/select.c n--; n 443 fs/select.c set = BITS(fds, n); n 455 fs/select.c max += n * BITS_PER_LONG; n 476 fs/select.c static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) n 487 fs/select.c retval = max_select_fd(n, fds); n 492 fs/select.c n = retval; n 512 fs/select.c for (i = 0; i < n; ++rinp, ++routp, ++rexp) { n 526 fs/select.c if (i >= n) n 621 fs/select.c int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, n 633 fs/select.c if (n < 0) n 641 fs/select.c if (n > max_fds) n 642 fs/select.c n = max_fds; n 649 fs/select.c size = FDS_BYTES(n); n 669 fs/select.c if ((ret = get_fd_set(n, inp, fds.in)) || n 670 fs/select.c (ret = get_fd_set(n, outp, fds.out)) || n 671 fs/select.c (ret = get_fd_set(n, exp, fds.ex))) n 673 fs/select.c zero_fd_set(n, fds.res_in); n 674 fs/select.c zero_fd_set(n, fds.res_out); n 675 fs/select.c zero_fd_set(n, fds.res_ex); n 677 fs/select.c ret = do_select(n, &fds, end_time); n 688 fs/select.c if (set_fd_set(n, inp, fds.res_in) || n 689 fs/select.c set_fd_set(n, outp, fds.res_out) || n 690 fs/select.c set_fd_set(n, exp, fds.res_ex)) n 700 fs/select.c static int kern_select(int n, fd_set __user *inp, fd_set __user *outp, n 718 fs/select.c ret = core_sys_select(n, inp, outp, exp, to); n 722 fs/select.c SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, n 725 fs/select.c return kern_select(n, inp, outp, exp, tvp); n 728 fs/select.c static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, n 759 fs/select.c ret = core_sys_select(n, inp, outp, exp, to); n 769 fs/select.c SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, n 784 fs/select.c return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC); n 789 fs/select.c SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp, n 804 fs/select.c return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC); n 811 fs/select.c unsigned long n; n 822 fs/select.c return kern_select(a.n, a.inp, a.outp, a.exp, a.tvp); n 1174 fs/select.c static int compat_core_sys_select(int n, compat_ulong_t __user *inp, n 1184 fs/select.c if (n < 0) n 1192 fs/select.c if (n > max_fds) n 1193 fs/select.c n = max_fds; n 1200 fs/select.c size = FDS_BYTES(n); n 1215 fs/select.c if ((ret = compat_get_fd_set(n, inp, fds.in)) || n 1216 fs/select.c (ret = compat_get_fd_set(n, outp, fds.out)) || n 1217 fs/select.c (ret = compat_get_fd_set(n, exp, fds.ex))) n 1219 fs/select.c zero_fd_set(n, fds.res_in); n 1220 fs/select.c zero_fd_set(n, fds.res_out); n 1221 fs/select.c zero_fd_set(n, fds.res_ex); n 1223 fs/select.c ret = do_select(n, &fds, end_time); n 1234 fs/select.c if (compat_set_fd_set(n, inp, fds.res_in) || n 1235 fs/select.c compat_set_fd_set(n, outp, fds.res_out) || n 1236 fs/select.c compat_set_fd_set(n, exp, fds.res_ex)) n 1245 fs/select.c static int do_compat_select(int n, compat_ulong_t __user *inp, n 1264 fs/select.c ret = compat_core_sys_select(n, inp, outp, exp, to); n 1268 fs/select.c COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, n 1272 fs/select.c return do_compat_select(n, inp, outp, exp, tvp); n 1276 fs/select.c compat_ulong_t n; n 1289 fs/select.c return do_compat_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), n 1293 fs/select.c static long do_compat_pselect(int n, compat_ulong_t __user *inp, n 1324 fs/select.c ret = compat_core_sys_select(n, inp, outp, exp, to); n 1328 fs/select.c COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp, n 1344 fs/select.c return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), n 1350 fs/select.c COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp, n 1366 fs/select.c return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), n 157 fs/seq_file.c size_t n; n 210 fs/seq_file.c n = min(m->count, size); n 211 fs/seq_file.c err = copy_to_user(buf, m->buf + m->from, n); n 214 fs/seq_file.c m->count -= n; n 215 fs/seq_file.c m->from += n; n 216 fs/seq_file.c size -= n; n 217 fs/seq_file.c buf += n; n 218 fs/seq_file.c copied += n; n 276 fs/seq_file.c n = min(m->count, size); n 277 fs/seq_file.c err = copy_to_user(buf, m->buf, n); n 280 fs/seq_file.c copied += n; n 281 fs/seq_file.c m->count -= n; n 282 fs/seq_file.c m->from = n; n 703 fs/splice.c int n, idx; n 722 fs/splice.c for (n = 0, idx = pipe->curbuf; left && n < pipe->nrbufs; n++, idx++) { n 739 fs/splice.c array[n].bv_page = buf->page; n 740 fs/splice.c array[n].bv_len = this_len; n 741 fs/splice.c array[n].bv_offset = buf->offset; n 745 fs/splice.c iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left); n 1223 fs/splice.c int n; n 1231 fs/splice.c for (n = 0; copied; n++, start = 0) { n 1234 fs/splice.c buf.page = pages[n]; n 1245 fs/splice.c put_page(pages[n]); n 1256 fs/splice.c int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data); n 1257 fs/splice.c return n == sd->len ? n : -EFAULT; n 55 fs/squashfs/cache.c int i, n; n 61 fs/squashfs/cache.c for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { n 69 fs/squashfs/cache.c if (n == cache->entries) { n 89 fs/squashfs/cache.c for (n = 0; n < cache->entries; n++) { n 160 fs/squashfs/file.c static long long read_indexes(struct super_block *sb, int n, n 172 fs/squashfs/file.c while (n) { n 173 fs/squashfs/file.c int blocks = min_t(int, n, PAGE_SIZE >> 2); n 191 fs/squashfs/file.c n -= blocks; n 36 fs/squashfs/file_direct.c int i, n, pages, missing_pages, bytes, res = -ENOMEM; n 59 fs/squashfs/file_direct.c for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { n 60 fs/squashfs/file_direct.c page[i] = (n == target_page->index) ? target_page : n 61 fs/squashfs/file_direct.c grab_cache_page_nowait(target_page->mapping, n); n 150 fs/squashfs/file_direct.c int res = buffer->error, n, offset = 0; n 158 fs/squashfs/file_direct.c for (n = 0; n < pages && bytes > 0; n++, n 162 fs/squashfs/file_direct.c if (page[n] == NULL) n 165 fs/squashfs/file_direct.c squashfs_fill_page(page[n], buffer, offset, avail); n 166 fs/squashfs/file_direct.c unlock_page(page[n]); n 167 fs/squashfs/file_direct.c if (page[n] != target_page) n 168 fs/squashfs/file_direct.c put_page(page[n]); n 43 fs/squashfs/xz_wrapper.c int err = 0, n; n 61 fs/squashfs/xz_wrapper.c n = ffs(opts->dict_size) - 1; n 62 fs/squashfs/xz_wrapper.c if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) + n 63 fs/squashfs/xz_wrapper.c (1 << (n + 1))) { n 167 fs/sysv/balloc.c int n; n 185 fs/sysv/balloc.c n = fs16_to_cpu(sbi, *sbi->s_bcache_count); n 189 fs/sysv/balloc.c if (n > sbi->s_flc_size) n 192 fs/sysv/balloc.c while (n && (zone = blocks[--n]) != 0) n 207 fs/sysv/balloc.c n = fs16_to_cpu(sbi, *(__fs16*)bh->b_data); n 55 fs/sysv/dir.c static struct page * dir_get_page(struct inode *dir, unsigned long n) n 58 fs/sysv/dir.c struct page *page = read_mapping_page(mapping, n, NULL); n 71 fs/sysv/dir.c unsigned long n; n 78 fs/sysv/dir.c n = pos >> PAGE_SHIFT; n 80 fs/sysv/dir.c for ( ; n < npages; n++, offset = 0) { n 83 fs/sysv/dir.c struct page *page = dir_get_page(inode, n); n 132 fs/sysv/dir.c unsigned long start, n; n 142 fs/sysv/dir.c n = start; n 146 fs/sysv/dir.c page = dir_get_page(dir, n); n 161 fs/sysv/dir.c if (++n >= npages) n 162 fs/sysv/dir.c n = 0; n 163 fs/sysv/dir.c } while (n != start); n 168 fs/sysv/dir.c SYSV_I(dir)->i_dir_start_lookup = n; n 181 fs/sysv/dir.c unsigned long n; n 187 fs/sysv/dir.c for (n = 0; n <= npages; n++) { n 188 fs/sysv/dir.c page = dir_get_page(dir, n); n 30 fs/sysv/itree.c int n = 0; n 35 fs/sysv/itree.c offsets[n++] = block; n 37 fs/sysv/itree.c offsets[n++] = DIRECT; n 38 fs/sysv/itree.c offsets[n++] = block; n 40 fs/sysv/itree.c offsets[n++] = DIRECT+1; n 41 fs/sysv/itree.c offsets[n++] = block >> ptrs_bits; n 42 fs/sysv/itree.c offsets[n++] = block & (indirect_blocks - 1); n 44 fs/sysv/itree.c offsets[n++] = DIRECT+2; n 45 fs/sysv/itree.c offsets[n++] = block >> (ptrs_bits * 2); n 46 fs/sysv/itree.c offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1); n 47 fs/sysv/itree.c offsets[n++] = block & (indirect_blocks - 1); n 51 fs/sysv/itree.c return n; n 131 fs/sysv/itree.c int n = 0; n 135 fs/sysv/itree.c if (branch[0].key) for (n = 1; n < num; n++) { n 139 fs/sysv/itree.c branch[n].key = sysv_new_block(inode->i_sb); n 140 fs/sysv/itree.c if (!branch[n].key) n 146 fs/sysv/itree.c parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key); n 150 fs/sysv/itree.c branch[n].bh = bh; n 151 fs/sysv/itree.c branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n]; n 152 fs/sysv/itree.c *branch[n].p = branch[n].key; n 157 fs/sysv/itree.c if (n == num) n 161 fs/sysv/itree.c for (i = 1; i < n; i++) n 163 fs/sysv/itree.c for (i = 0; i < n; i++) n 371 fs/sysv/itree.c int n; n 385 fs/sysv/itree.c n = block_to_path(inode, iblock, offsets); n 386 fs/sysv/itree.c if (n == 0) n 389 fs/sysv/itree.c if (n == 1) { n 394 fs/sysv/itree.c partial = find_shared(inode, n, offsets, chain, &nr); n 401 fs/sysv/itree.c free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); n 406 fs/sysv/itree.c (chain+n-1) - partial); n 413 fs/sysv/itree.c while (n < DEPTH) { n 414 fs/sysv/itree.c nr = i_data[DIRECT + n - 1]; n 416 fs/sysv/itree.c i_data[DIRECT + n - 1] = 0; n 418 fs/sysv/itree.c free_branches(inode, &nr, &nr+1, n); n 420 fs/sysv/itree.c n++; n 188 fs/sysv/sysv.h static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n) n 191 fs/sysv/sysv.h return PDP_swab((__force __u32)n); n 193 fs/sysv/sysv.h return le32_to_cpu((__force __le32)n); n 195 fs/sysv/sysv.h return be32_to_cpu((__force __be32)n); n 198 fs/sysv/sysv.h static inline __fs32 cpu_to_fs32(struct sysv_sb_info *sbi, __u32 n) n 201 fs/sysv/sysv.h return (__force __fs32)PDP_swab(n); n 203 fs/sysv/sysv.h return (__force __fs32)cpu_to_le32(n); n 205 fs/sysv/sysv.h return (__force __fs32)cpu_to_be32(n); n 208 fs/sysv/sysv.h static inline __fs32 fs32_add(struct sysv_sb_info *sbi, __fs32 *n, int d) n 211 fs/sysv/sysv.h *(__u32*)n = PDP_swab(PDP_swab(*(__u32*)n)+d); n 213 fs/sysv/sysv.h le32_add_cpu((__le32 *)n, d); n 215 fs/sysv/sysv.h be32_add_cpu((__be32 *)n, d); n 216 fs/sysv/sysv.h return *n; n 219 fs/sysv/sysv.h static inline __u16 fs16_to_cpu(struct sysv_sb_info *sbi, __fs16 n) n 222 fs/sysv/sysv.h return le16_to_cpu((__force __le16)n); n 224 fs/sysv/sysv.h return be16_to_cpu((__force __be16)n); n 227 fs/sysv/sysv.h static inline __fs16 cpu_to_fs16(struct sysv_sb_info *sbi, __u16 n) n 230 fs/sysv/sysv.h return (__force __fs16)cpu_to_le16(n); n 232 fs/sysv/sysv.h return (__force __fs16)cpu_to_be16(n); n 235 fs/sysv/sysv.h static inline __fs16 fs16_add(struct sysv_sb_info *sbi, __fs16 *n, int d) n 238 fs/sysv/sysv.h le16_add_cpu((__le16 *)n, d); n 240 fs/sysv/sysv.h be16_add_cpu((__be16 *)n, d); n 241 fs/sysv/sysv.h return *n; n 296 fs/ubifs/debug.c int i, n; n 499 fs/ubifs/debug.c n = le16_to_cpu(idx->child_cnt); n 500 fs/ubifs/debug.c pr_err("\tchild_cnt %d\n", n); n 504 fs/ubifs/debug.c for (i = 0; i < n && i < c->fanout - 1; i++) { n 528 fs/ubifs/debug.c n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3; n 529 fs/ubifs/debug.c pr_err("\t%d orphan inode numbers:\n", n); n 530 fs/ubifs/debug.c for (i = 0; i < n; i++) n 851 fs/ubifs/debug.c int n; n 871 fs/ubifs/debug.c for (n = 0; n < znode->child_cnt; n++) { n 872 fs/ubifs/debug.c zbr = &znode->zbranch[n]; n 875 fs/ubifs/debug.c n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, n 880 fs/ubifs/debug.c n, zbr->znode, zbr->lnum, zbr->offs, zbr->len, n 1267 fs/ubifs/debug.c int n, err, cmp; n 1320 fs/ubifs/debug.c err = ubifs_search_zbranch(c, zp, &zbr->key, &n); n 1332 fs/ubifs/debug.c if (znode->iip != n) { n 1334 fs/ubifs/debug.c if (keys_cmp(c, &zp->zbranch[n].key, n 1339 fs/ubifs/debug.c n = znode->iip; n 1353 fs/ubifs/debug.c if (n + 1 < zp->child_cnt) { n 1354 fs/ubifs/debug.c max = &zp->zbranch[n + 1].key; n 1380 fs/ubifs/debug.c for (n = 1; n < znode->child_cnt; n++) { n 1381 fs/ubifs/debug.c cmp = keys_cmp(c, &znode->zbranch[n - 1].key, n 1382 fs/ubifs/debug.c &znode->zbranch[n].key); n 1389 fs/ubifs/debug.c if (!is_hash_key(c, &znode->zbranch[n].key)) { n 1401 fs/ubifs/debug.c err = dbg_check_key_order(c, &znode->zbranch[n - 1], n 1402 fs/ubifs/debug.c &znode->zbranch[n]); n 1412 fs/ubifs/debug.c for (n = 0; n < znode->child_cnt; n++) { n 1413 fs/ubifs/debug.c if (!znode->zbranch[n].znode && n 1414 fs/ubifs/debug.c (znode->zbranch[n].lnum == 0 || n 1415 fs/ubifs/debug.c znode->zbranch[n].len == 0)) { n 1420 fs/ubifs/debug.c if (znode->zbranch[n].lnum != 0 && n 1421 fs/ubifs/debug.c znode->zbranch[n].len == 0) { n 1426 fs/ubifs/debug.c if (znode->zbranch[n].lnum == 0 && n 1427 fs/ubifs/debug.c znode->zbranch[n].len != 0) { n 1432 fs/ubifs/debug.c if (znode->zbranch[n].lnum == 0 && n 1433 fs/ubifs/debug.c znode->zbranch[n].offs != 0) { n 1438 fs/ubifs/debug.c if (znode->level != 0 && znode->zbranch[n].znode) n 1439 fs/ubifs/debug.c if (znode->zbranch[n].znode->parent != znode) { n 1907 fs/ubifs/debug.c int n, err; n 1919 fs/ubifs/debug.c err = ubifs_lookup_level0(c, &key, &znode, &n); n 1929 fs/ubifs/debug.c zbr = &znode->zbranch[n]; n 2124 fs/ubifs/debug.c struct fsck_inode *fscki, *n; n 2126 fs/ubifs/debug.c rbtree_postorder_for_each_entry_safe(fscki, n, &fsckd->inodes, rb) n 2142 fs/ubifs/debug.c int n, err; n 2219 fs/ubifs/debug.c err = ubifs_lookup_level0(c, &key, &znode, &n); n 2230 fs/ubifs/debug.c zbr = &znode->zbranch[n]; n 2447 fs/ubifs/debug.c static inline int chance(unsigned int n, unsigned int out_of) n 2449 fs/ubifs/debug.c return !!((prandom_u32() % out_of) + 1 <= n); n 2812 fs/ubifs/debug.c int n; n 2816 fs/ubifs/debug.c n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, n 2818 fs/ubifs/debug.c if (n == UBIFS_DFS_DIR_LEN) { n 607 fs/ubifs/file.c struct bu_info *bu, int *n) n 609 fs/ubifs/file.c int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0; n 696 fs/ubifs/file.c *n = nn; n 724 fs/ubifs/file.c int err, page_idx, page_cnt, ret = 0, n = 0; n 771 fs/ubifs/file.c err = populate_page(c, page1, bu, &n); n 795 fs/ubifs/file.c err = populate_page(c, page, bu, &n); n 44 fs/ubifs/find.c int n, cat = lprops->flags & LPROPS_CAT_MASK; n 58 fs/ubifs/find.c n = c->lst.empty_lebs + c->freeable_cnt - n 60 fs/ubifs/find.c if (n < c->lsave_cnt) n 721 fs/ubifs/io.c int err, written, n, aligned_len = ALIGN(len, 8); n 831 fs/ubifs/io.c n = aligned_len >> c->max_write_shift; n 832 fs/ubifs/io.c if (n) { n 833 fs/ubifs/io.c n <<= c->max_write_shift; n 834 fs/ubifs/io.c dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, n 837 fs/ubifs/io.c wbuf->offs, n); n 840 fs/ubifs/io.c wbuf->offs += n; n 841 fs/ubifs/io.c aligned_len -= n; n 842 fs/ubifs/io.c len -= n; n 843 fs/ubifs/io.c written += n; n 595 fs/ubifs/log.c struct done_ref *dr, *n; n 597 fs/ubifs/log.c rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb) n 48 fs/ubifs/lpt.c int i, n, bits, per_leb_wastage, max_pnode_cnt; n 51 fs/ubifs/lpt.c n = c->main_lebs + c->max_leb_cnt - c->leb_cnt; n 52 fs/ubifs/lpt.c max_pnode_cnt = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT); n 55 fs/ubifs/lpt.c n = UBIFS_LPT_FANOUT; n 56 fs/ubifs/lpt.c while (n < max_pnode_cnt) { n 58 fs/ubifs/lpt.c n <<= UBIFS_LPT_FANOUT_SHIFT; n 63 fs/ubifs/lpt.c n = DIV_ROUND_UP(c->pnode_cnt, UBIFS_LPT_FANOUT); n 64 fs/ubifs/lpt.c c->nnode_cnt = n; n 66 fs/ubifs/lpt.c n = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT); n 67 fs/ubifs/lpt.c c->nnode_cnt += n; n 75 fs/ubifs/lpt.c n = DIV_ROUND_UP(c->max_leb_cnt, UBIFS_LPT_FANOUT); n 76 fs/ubifs/lpt.c c->pcnt_bits = fls(n - 1); n 576 fs/ubifs/lpt.c int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0; n 578 fs/ubifs/lpt.c for (i = 0; i < n; i++) { n 1514 fs/ubifs/lpt.c struct ubifs_nnode *n; n 1527 fs/ubifs/lpt.c n = kmemdup(nnode, sizeof(struct ubifs_nnode), GFP_NOFS); n 1528 fs/ubifs/lpt.c if (unlikely(!n)) n 1531 fs/ubifs/lpt.c n->cnext = NULL; n 1532 fs/ubifs/lpt.c __set_bit(DIRTY_CNODE, &n->flags); n 1533 fs/ubifs/lpt.c __clear_bit(COW_CNODE, &n->flags); n 1537 fs/ubifs/lpt.c struct ubifs_nbranch *branch = &n->nbranch[i]; n 1540 fs/ubifs/lpt.c branch->cnode->parent = n; n 1549 fs/ubifs/lpt.c nnode->parent->nbranch[n->iip].nnode = n; n 1551 fs/ubifs/lpt.c c->nroot = n; n 1552 fs/ubifs/lpt.c return n; n 151 fs/ubifs/lpt_commit.c int i, n; n 153 fs/ubifs/lpt_commit.c n = *lnum - c->lpt_first + 1; n 154 fs/ubifs/lpt_commit.c for (i = n; i < c->lpt_lebs; i++) { n 164 fs/ubifs/lpt_commit.c for (i = 0; i < n; i++) { n 335 fs/ubifs/lpt_commit.c int i, n; n 337 fs/ubifs/lpt_commit.c n = *lnum - c->lpt_first + 1; n 338 fs/ubifs/lpt_commit.c for (i = n; i < c->lpt_lebs; i++) n 345 fs/ubifs/lpt_commit.c for (i = 0; i < n; i++) n 632 fs/ubifs/orphan.c int i, n, err, first = 1; n 683 fs/ubifs/orphan.c n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; n 684 fs/ubifs/orphan.c for (i = 0; i < n; i++) { n 904 fs/ubifs/orphan.c struct check_orphan *o, *n; n 906 fs/ubifs/orphan.c rbtree_postorder_for_each_entry_safe(o, n, root, rb) n 948 fs/ubifs/orphan.c int i, n, err; n 955 fs/ubifs/orphan.c n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; n 956 fs/ubifs/orphan.c for (i = 0; i < n; i++) { n 1319 fs/ubifs/recovery.c struct size_entry *e, *n; n 1321 fs/ubifs/recovery.c rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) { n 675 fs/ubifs/replay.c int n_nodes, n = 0; n 818 fs/ubifs/replay.c n++; n 819 fs/ubifs/replay.c if (n == n_nodes) n 894 fs/ubifs/super.c struct ubifs_bud *bud, *n; n 896 fs/ubifs/super.c rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb) n 174 fs/ubifs/tnc.c struct ubifs_old_idx *old_idx, *n; n 176 fs/ubifs/tnc.c rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb) n 207 fs/ubifs/tnc.c const int n = zn->child_cnt; n 210 fs/ubifs/tnc.c for (i = 0; i < n; i++) { n 582 fs/ubifs/tnc.c struct ubifs_znode *znode, int n) n 586 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 590 fs/ubifs/tnc.c znode = ubifs_load_znode(c, zbr, znode, n); n 603 fs/ubifs/tnc.c static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n) n 606 fs/ubifs/tnc.c int nn = *n; n 610 fs/ubifs/tnc.c *n = nn; n 635 fs/ubifs/tnc.c *n = nn; n 648 fs/ubifs/tnc.c static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n) n 651 fs/ubifs/tnc.c int nn = *n; n 654 fs/ubifs/tnc.c *n = nn - 1; n 680 fs/ubifs/tnc.c *n = nn; n 701 fs/ubifs/tnc.c struct ubifs_znode **zn, int *n, n 706 fs/ubifs/tnc.c err = matches_name(c, &(*zn)->zbranch[*n], nm); n 715 fs/ubifs/tnc.c err = tnc_prev(c, zn, n); n 717 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); n 718 fs/ubifs/tnc.c *n = -1; n 723 fs/ubifs/tnc.c if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { n 753 fs/ubifs/tnc.c if (*n == (*zn)->child_cnt - 1) { n 754 fs/ubifs/tnc.c err = tnc_next(c, zn, n); n 762 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); n 763 fs/ubifs/tnc.c *n = -1; n 767 fs/ubifs/tnc.c err = matches_name(c, &(*zn)->zbranch[*n], nm); n 777 fs/ubifs/tnc.c int nn = *n; n 795 fs/ubifs/tnc.c *n = nn; n 890 fs/ubifs/tnc.c struct ubifs_znode **zn, int *n, n 895 fs/ubifs/tnc.c int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n; n 917 fs/ubifs/tnc.c err = tnc_prev(c, zn, n); n 919 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); n 920 fs/ubifs/tnc.c *n = -1; n 925 fs/ubifs/tnc.c if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { n 927 fs/ubifs/tnc.c if (*n == (*zn)->child_cnt - 1) { n 928 fs/ubifs/tnc.c err = tnc_next(c, zn, n); n 936 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); n 937 fs/ubifs/tnc.c *n = -1; n 941 fs/ubifs/tnc.c err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm); n 948 fs/ubifs/tnc.c o_n = *n; n 963 fs/ubifs/tnc.c *n = nn; n 978 fs/ubifs/tnc.c *n = nn; n 996 fs/ubifs/tnc.c *n = o_n; n 1035 fs/ubifs/tnc.c struct ubifs_znode **zn, int *n, n 1042 fs/ubifs/tnc.c nn = *n; n 1057 fs/ubifs/tnc.c *n = nn; n 1064 fs/ubifs/tnc.c nn = *n; n 1074 fs/ubifs/tnc.c *n = nn; n 1110 fs/ubifs/tnc.c int n; n 1115 fs/ubifs/tnc.c n = znode->iip; n 1117 fs/ubifs/tnc.c path[p++] = n; n 1171 fs/ubifs/tnc.c struct ubifs_znode **zn, int *n) n 1192 fs/ubifs/tnc.c exact = ubifs_search_zbranch(c, znode, key, n); n 1197 fs/ubifs/tnc.c if (*n < 0) n 1198 fs/ubifs/tnc.c *n = 0; n 1199 fs/ubifs/tnc.c zbr = &znode->zbranch[*n]; n 1208 fs/ubifs/tnc.c znode = ubifs_load_znode(c, zbr, znode, *n); n 1214 fs/ubifs/tnc.c if (exact || !is_hash_key(c, key) || *n != -1) { n 1215 fs/ubifs/tnc.c dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); n 1262 fs/ubifs/tnc.c err = tnc_prev(c, &znode, n); n 1265 fs/ubifs/tnc.c *n = -1; n 1270 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[*n].key)) { n 1272 fs/ubifs/tnc.c *n = -1; n 1276 fs/ubifs/tnc.c dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); n 1307 fs/ubifs/tnc.c struct ubifs_znode **zn, int *n) n 1331 fs/ubifs/tnc.c exact = ubifs_search_zbranch(c, znode, key, n); n 1336 fs/ubifs/tnc.c if (*n < 0) n 1337 fs/ubifs/tnc.c *n = 0; n 1338 fs/ubifs/tnc.c zbr = &znode->zbranch[*n]; n 1349 fs/ubifs/tnc.c znode = ubifs_load_znode(c, zbr, znode, *n); n 1358 fs/ubifs/tnc.c if (exact || !is_hash_key(c, key) || *n != -1) { n 1359 fs/ubifs/tnc.c dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n); n 1367 fs/ubifs/tnc.c err = tnc_prev(c, &znode, n); n 1369 fs/ubifs/tnc.c *n = -1; n 1375 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[*n].key)) { n 1376 fs/ubifs/tnc.c *n = -1; n 1387 fs/ubifs/tnc.c dbg_tnc("found 1, lvl %d, n %d", znode->level, *n); n 1444 fs/ubifs/tnc.c int found, n, err, safely = 0, gc_seq1; n 1450 fs/ubifs/tnc.c found = ubifs_lookup_level0(c, key, &znode, &n); n 1458 fs/ubifs/tnc.c zt = &znode->zbranch[n]; n 1476 fs/ubifs/tnc.c zbr = znode->zbranch[n]; n 1517 fs/ubifs/tnc.c int n, err = 0, lnum = -1, uninitialized_var(offs); n 1528 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &bu->key, &znode, &n); n 1533 fs/ubifs/tnc.c len = znode->zbranch[n].len; n 1540 fs/ubifs/tnc.c bu->zbranch[bu->cnt++] = znode->zbranch[n]; n 1542 fs/ubifs/tnc.c lnum = znode->zbranch[n].lnum; n 1543 fs/ubifs/tnc.c offs = ALIGN(znode->zbranch[n].offs + len, 8); n 1551 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); n 1554 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 1812 fs/ubifs/tnc.c int found, n, err; n 1817 fs/ubifs/tnc.c found = ubifs_lookup_level0(c, key, &znode, &n); n 1826 fs/ubifs/tnc.c ubifs_assert(c, n >= 0); n 1828 fs/ubifs/tnc.c err = resolve_collision(c, key, &znode, &n, nm); n 1829 fs/ubifs/tnc.c dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); n 1837 fs/ubifs/tnc.c err = tnc_read_hashed_node(c, &znode->zbranch[n], node); n 1885 fs/ubifs/tnc.c struct ubifs_znode **zn, int *n, int exact) n 1893 fs/ubifs/tnc.c err = tnc_next(c, &znode, n); n 1899 fs/ubifs/tnc.c zbr = &znode->zbranch[*n]; n 1917 fs/ubifs/tnc.c err = tnc_next(c, &znode, n); n 1926 fs/ubifs/tnc.c int n, err; n 1935 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &start_key, &znode, &n); n 1939 fs/ubifs/tnc.c err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err); n 2030 fs/ubifs/tnc.c const struct ubifs_zbranch *zbr, int n) n 2037 fs/ubifs/tnc.c for (i = znode->child_cnt; i > n; i--) { n 2043 fs/ubifs/tnc.c zbr->znode->iip = n; n 2045 fs/ubifs/tnc.c for (i = znode->child_cnt; i > n; i--) n 2048 fs/ubifs/tnc.c znode->zbranch[n] = *zbr; n 2065 fs/ubifs/tnc.c if (n == 0) n 2082 fs/ubifs/tnc.c struct ubifs_zbranch *zbr, int n) n 2088 fs/ubifs/tnc.c ubifs_assert(c, n >= 0 && n <= c->fanout); n 2094 fs/ubifs/tnc.c ubifs_assert(c, n != c->fanout); n 2095 fs/ubifs/tnc.c dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level); n 2097 fs/ubifs/tnc.c insert_zbranch(c, znode, zbr, n); n 2100 fs/ubifs/tnc.c if (n == 0 && zp && znode->iip == 0) n 2128 fs/ubifs/tnc.c if (n == c->fanout) { n 2129 fs/ubifs/tnc.c key1 = &znode->zbranch[n - 1].key; n 2135 fs/ubifs/tnc.c } else if (appending && n != c->fanout) { n 2139 fs/ubifs/tnc.c if (n >= (c->fanout + 1) / 2) { n 2143 fs/ubifs/tnc.c key1 = &znode->zbranch[n].key; n 2146 fs/ubifs/tnc.c keep = n; n 2168 fs/ubifs/tnc.c if (n < keep) { n 2176 fs/ubifs/tnc.c n -= keep; n 2204 fs/ubifs/tnc.c dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level); n 2206 fs/ubifs/tnc.c insert_zbranch(c, zi, zbr, n); n 2210 fs/ubifs/tnc.c if (n == 0 && zi == znode && znode->iip == 0) n 2214 fs/ubifs/tnc.c n = znode->iip + 1; n 2277 fs/ubifs/tnc.c int found, n, err = 0; n 2282 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); n 2292 fs/ubifs/tnc.c err = tnc_insert(c, znode, &zbr, n + 1); n 2294 fs/ubifs/tnc.c struct ubifs_zbranch *zbr = &znode->zbranch[n]; n 2328 fs/ubifs/tnc.c int found, n, err = 0; n 2334 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); n 2341 fs/ubifs/tnc.c struct ubifs_zbranch *zbr = &znode->zbranch[n]; n 2354 fs/ubifs/tnc.c found = resolve_collision_directly(c, key, &znode, &n, n 2357 fs/ubifs/tnc.c found, znode, n, old_lnum, old_offs); n 2372 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 2413 fs/ubifs/tnc.c int found, n, err = 0; n 2418 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); n 2426 fs/ubifs/tnc.c found = fallible_resolve_collision(c, key, &znode, &n, n 2429 fs/ubifs/tnc.c found = resolve_collision(c, key, &znode, &n, nm); n 2430 fs/ubifs/tnc.c dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n); n 2446 fs/ubifs/tnc.c struct ubifs_zbranch *zbr = &znode->zbranch[n]; n 2467 fs/ubifs/tnc.c err = tnc_insert(c, znode, &zbr, n + 1); n 2503 fs/ubifs/tnc.c static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) n 2511 fs/ubifs/tnc.c ubifs_assert(c, n >= 0 && n < c->fanout); n 2512 fs/ubifs/tnc.c dbg_tnck(&znode->zbranch[n].key, "deleting key "); n 2514 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 2524 fs/ubifs/tnc.c for (i = n; i < znode->child_cnt - 1; i++) n 2541 fs/ubifs/tnc.c n = znode->iip; n 2561 fs/ubifs/tnc.c for (i = n; i < znode->child_cnt; i++) { n 2618 fs/ubifs/tnc.c int found, n, err = 0; n 2623 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); n 2629 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); n 2649 fs/ubifs/tnc.c int n, err; n 2654 fs/ubifs/tnc.c err = lookup_level0_dirty(c, key, &znode, &n); n 2660 fs/ubifs/tnc.c err = fallible_resolve_collision(c, key, &znode, &n, n 2663 fs/ubifs/tnc.c err = resolve_collision(c, key, &znode, &n, nm); n 2664 fs/ubifs/tnc.c dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); n 2676 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); n 2698 fs/ubifs/tnc.c int n, err; n 2707 fs/ubifs/tnc.c err = lookup_level0_dirty(c, key, &znode, &n); n 2711 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 2728 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &start_key, &znode, &n); n 2732 fs/ubifs/tnc.c err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err); n 2744 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); n 2787 fs/ubifs/tnc.c int i, n, k, err = 0; n 2794 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, from_key, &znode, &n); n 2801 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); n 2808 fs/ubifs/tnc.c key = &znode->zbranch[n].key; n 2825 fs/ubifs/tnc.c for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) { n 2839 fs/ubifs/tnc.c for (i = n + 1 + k; i < znode->child_cnt; i++) n 2845 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); n 2952 fs/ubifs/tnc.c int n, err, type = key_type(c, key); n 2962 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, key, &znode, &n); n 2970 fs/ubifs/tnc.c err = fallible_resolve_collision(c, key, &znode, &n, n 2973 fs/ubifs/tnc.c err = resolve_collision(c, key, &znode, &n, nm); n 2975 fs/ubifs/tnc.c err, znode, n); n 2981 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); n 2996 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); n 3002 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 3065 fs/ubifs/tnc.c long n, freed; n 3067 fs/ubifs/tnc.c n = atomic_long_read(&c->clean_zn_cnt); n 3069 fs/ubifs/tnc.c ubifs_assert(c, freed == n); n 3070 fs/ubifs/tnc.c atomic_long_sub(n, &ubifs_clean_zn_cnt); n 3091 fs/ubifs/tnc.c int n = znode->iip - 1; n 3097 fs/ubifs/tnc.c if (n >= 0) { n 3099 fs/ubifs/tnc.c znode = get_znode(c, znode, n); n 3103 fs/ubifs/tnc.c n = znode->child_cnt - 1; n 3104 fs/ubifs/tnc.c znode = get_znode(c, znode, n); n 3128 fs/ubifs/tnc.c int n = znode->iip + 1; n 3134 fs/ubifs/tnc.c if (n < znode->child_cnt) { n 3136 fs/ubifs/tnc.c znode = get_znode(c, znode, n); n 3180 fs/ubifs/tnc.c int n, nn; n 3205 fs/ubifs/tnc.c ubifs_search_zbranch(c, znode, key, &n); n 3206 fs/ubifs/tnc.c if (n < 0) { n 3220 fs/ubifs/tnc.c ubifs_search_zbranch(c, znode, key, &n); n 3221 fs/ubifs/tnc.c ubifs_assert(c, n >= 0); n 3225 fs/ubifs/tnc.c znode = get_znode(c, znode, n); n 3230 fs/ubifs/tnc.c if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs) n 3231 fs/ubifs/tnc.c return get_znode(c, znode, n); n 3240 fs/ubifs/tnc.c nn = n; n 3244 fs/ubifs/tnc.c if (n) n 3245 fs/ubifs/tnc.c n -= 1; n 3252 fs/ubifs/tnc.c n = znode->child_cnt - 1; n 3255 fs/ubifs/tnc.c if (znode->zbranch[n].lnum == lnum && n 3256 fs/ubifs/tnc.c znode->zbranch[n].offs == offs) n 3257 fs/ubifs/tnc.c return get_znode(c, znode, n); n 3259 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[n].key, key) < 0) n 3264 fs/ubifs/tnc.c n = nn; n 3268 fs/ubifs/tnc.c if (++n >= znode->child_cnt) { n 3274 fs/ubifs/tnc.c n = 0; n 3277 fs/ubifs/tnc.c if (znode->zbranch[n].lnum == lnum && n 3278 fs/ubifs/tnc.c znode->zbranch[n].offs == offs) n 3279 fs/ubifs/tnc.c return get_znode(c, znode, n); n 3281 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[n].key, key) > 0) n 3336 fs/ubifs/tnc.c int n, found, err, nn; n 3339 fs/ubifs/tnc.c found = ubifs_lookup_level0(c, key, &znode, &n); n 3344 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 3354 fs/ubifs/tnc.c nn = n; n 3357 fs/ubifs/tnc.c err = tnc_prev(c, &znode, &n); n 3362 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[n].key)) n 3364 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 3370 fs/ubifs/tnc.c n = nn; n 3372 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); n 3378 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[n].key)) n 3380 fs/ubifs/tnc.c zbr = &znode->zbranch[n]; n 3480 fs/ubifs/tnc.c int err, n; n 3495 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &from_key, &znode, &n); n 3504 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); n 3513 fs/ubifs/tnc.c key = &znode->zbranch[n].key; n 622 fs/ubifs/tnc_commit.c int n = znode->iip + 1; n 627 fs/ubifs/tnc_commit.c for (; n < znode->child_cnt; n++) { n 628 fs/ubifs/tnc_commit.c struct ubifs_zbranch *zbr = &znode->zbranch[n]; n 127 fs/ubifs/tnc_misc.c const union ubifs_key *key, int *n) n 143 fs/ubifs/tnc_misc.c *n = mid; n 148 fs/ubifs/tnc_misc.c *n = end - 1; n 151 fs/ubifs/tnc_misc.c ubifs_assert(c, *n >= -1 && *n < znode->child_cnt); n 152 fs/ubifs/tnc_misc.c if (*n == -1) n 155 fs/ubifs/tnc_misc.c ubifs_assert(c, keys_cmp(c, key, &zbr[*n].key) > 0); n 156 fs/ubifs/tnc_misc.c if (*n + 1 < znode->child_cnt) n 157 fs/ubifs/tnc_misc.c ubifs_assert(c, keys_cmp(c, key, &zbr[*n + 1].key) < 0); n 226 fs/ubifs/tnc_misc.c int n; n 230 fs/ubifs/tnc_misc.c for (n = 0; n < zn->child_cnt; n++) { n 231 fs/ubifs/tnc_misc.c if (!zn->zbranch[n].znode) n 235 fs/ubifs/tnc_misc.c !ubifs_zn_dirty(zn->zbranch[n].znode)) n 239 fs/ubifs/tnc_misc.c kfree(zn->zbranch[n].znode); n 1819 fs/ubifs/ubifs.h struct ubifs_znode **zn, int *n); n 1863 fs/ubifs/ubifs.h const union ubifs_key *key, int *n); n 474 fs/udf/super.c unsigned n; n 486 fs/udf/super.c n = option; n 487 fs/udf/super.c if (n != 512 && n != 1024 && n != 2048 && n != 4096) n 489 fs/udf/super.c uopt->blocksize = n; n 325 fs/ufs/balloc.c static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n, n 329 fs/ufs/balloc.c sector_t end = beg + n; n 189 fs/ufs/dir.c static struct page *ufs_get_page(struct inode *dir, unsigned long n) n 192 fs/ufs/dir.c struct page *page = read_mapping_page(mapping, n, NULL); n 257 fs/ufs/dir.c unsigned long start, n; n 275 fs/ufs/dir.c n = start; n 278 fs/ufs/dir.c page = ufs_get_page(dir, n); n 282 fs/ufs/dir.c kaddr += ufs_last_byte(dir, n) - reclen; n 290 fs/ufs/dir.c if (++n >= npages) n 291 fs/ufs/dir.c n = 0; n 292 fs/ufs/dir.c } while (n != start); n 298 fs/ufs/dir.c ui->i_dir_start_lookup = n; n 317 fs/ufs/dir.c unsigned long n; n 329 fs/ufs/dir.c for (n = 0; n <= npages; n++) { n 332 fs/ufs/dir.c page = ufs_get_page(dir, n); n 338 fs/ufs/dir.c dir_end = kaddr + ufs_last_byte(dir, n); n 429 fs/ufs/dir.c unsigned long n = pos >> PAGE_SHIFT; n 440 fs/ufs/dir.c for ( ; n < npages; n++, offset = 0) { n 444 fs/ufs/dir.c struct page *page = ufs_get_page(inode, n); n 457 fs/ufs/dir.c ctx->pos = (n<<PAGE_SHIFT) + offset; n 463 fs/ufs/dir.c limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1); n 54 fs/ufs/inode.c int n = 0; n 59 fs/ufs/inode.c offsets[n++] = i_block; n 61 fs/ufs/inode.c offsets[n++] = UFS_IND_BLOCK; n 62 fs/ufs/inode.c offsets[n++] = i_block; n 64 fs/ufs/inode.c offsets[n++] = UFS_DIND_BLOCK; n 65 fs/ufs/inode.c offsets[n++] = i_block >> ptrs_bits; n 66 fs/ufs/inode.c offsets[n++] = i_block & (ptrs - 1); n 68 fs/ufs/inode.c offsets[n++] = UFS_TIND_BLOCK; n 69 fs/ufs/inode.c offsets[n++] = i_block >> (ptrs_bits * 2); n 70 fs/ufs/inode.c offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); n 71 fs/ufs/inode.c offsets[n++] = i_block & (ptrs - 1); n 75 fs/ufs/inode.c return n; n 156 fs/ufs/inode.c unsigned n = *p++; n 159 fs/ufs/inode.c fs32_to_cpu(sb, q->key32) + (n>>shift)); n 162 fs/ufs/inode.c ptr = (__fs32 *)bh->b_data + (n & mask); n 180 fs/ufs/inode.c unsigned n = *p++; n 183 fs/ufs/inode.c fs64_to_cpu(sb, q->key64) + (n>>shift)); n 186 fs/ufs/inode.c ptr = (__fs64 *)bh->b_data + (n & mask); n 27 fs/ufs/swab.h fs64_to_cpu(struct super_block *sbp, __fs64 n) n 30 fs/ufs/swab.h return le64_to_cpu((__force __le64)n); n 32 fs/ufs/swab.h return be64_to_cpu((__force __be64)n); n 36 fs/ufs/swab.h cpu_to_fs64(struct super_block *sbp, u64 n) n 39 fs/ufs/swab.h return (__force __fs64)cpu_to_le64(n); n 41 fs/ufs/swab.h return (__force __fs64)cpu_to_be64(n); n 45 fs/ufs/swab.h fs32_to_cpu(struct super_block *sbp, __fs32 n) n 48 fs/ufs/swab.h return le32_to_cpu((__force __le32)n); n 50 fs/ufs/swab.h return be32_to_cpu((__force __be32)n); n 54 fs/ufs/swab.h cpu_to_fs32(struct super_block *sbp, u32 n) n 57 fs/ufs/swab.h return (__force __fs32)cpu_to_le32(n); n 59 fs/ufs/swab.h return (__force __fs32)cpu_to_be32(n); n 63 fs/ufs/swab.h fs32_add(struct super_block *sbp, __fs32 *n, int d) n 66 fs/ufs/swab.h le32_add_cpu((__le32 *)n, d); n 68 fs/ufs/swab.h be32_add_cpu((__be32 *)n, d); n 72 fs/ufs/swab.h fs32_sub(struct super_block *sbp, __fs32 *n, int d) n 75 fs/ufs/swab.h le32_add_cpu((__le32 *)n, -d); n 77 fs/ufs/swab.h be32_add_cpu((__be32 *)n, -d); n 81 fs/ufs/swab.h fs16_to_cpu(struct super_block *sbp, __fs16 n) n 84 fs/ufs/swab.h return le16_to_cpu((__force __le16)n); n 86 fs/ufs/swab.h return be16_to_cpu((__force __be16)n); n 90 fs/ufs/swab.h cpu_to_fs16(struct super_block *sbp, u16 n) n 93 fs/ufs/swab.h return (__force __fs16)cpu_to_le16(n); n 95 fs/ufs/swab.h return (__force __fs16)cpu_to_be16(n); n 99 fs/ufs/swab.h fs16_add(struct super_block *sbp, __fs16 *n, int d) n 102 fs/ufs/swab.h le16_add_cpu((__le16 *)n, d); n 104 fs/ufs/swab.h be16_add_cpu((__be16 *)n, d); n 108 fs/ufs/swab.h fs16_sub(struct super_block *sbp, __fs16 *n, int d) n 111 fs/ufs/swab.h le16_add_cpu((__le16 *)n, -d); n 113 fs/ufs/swab.h be16_add_cpu((__be16 *)n, -d); n 818 fs/unicode/mkutf8data.c struct node *n; n 840 fs/unicode/mkutf8data.c n = node; n 841 fs/unicode/mkutf8data.c while (n && !n->mark) { n 843 fs/unicode/mkutf8data.c n->mark = 1; n 844 fs/unicode/mkutf8data.c n = n->parent; n 858 fs/unicode/mkutf8data.c n = node; n 859 fs/unicode/mkutf8data.c while (n && !n->mark) { n 861 fs/unicode/mkutf8data.c n->mark = 1; n 862 fs/unicode/mkutf8data.c n = n->parent; n 888 fs/unicode/mkutf8data.c n = node; n 889 fs/unicode/mkutf8data.c while (n && !n->mark) { n 891 fs/unicode/mkutf8data.c n->mark = 1; n 892 fs/unicode/mkutf8data.c n = n->parent; n 910 fs/unicode/mkutf8data.c n = node; n 911 fs/unicode/mkutf8data.c while (n && !n->mark) { n 913 fs/unicode/mkutf8data.c n->mark = 1; n 914 fs/unicode/mkutf8data.c n = n->parent; n 1053 fs/unicode/mkutf8data.c struct node *n; n 1097 fs/unicode/mkutf8data.c n = next->root; n 1098 fs/unicode/mkutf8data.c while (n->bitnum != node->bitnum) { n 1099 fs/unicode/mkutf8data.c nbit = 1 << n->bitnum; n 1103 fs/unicode/mkutf8data.c if (n->rightnode == LEAF) n 1105 fs/unicode/mkutf8data.c n = n->right; n 1107 fs/unicode/mkutf8data.c if (n->leftnode == LEAF) n 1109 fs/unicode/mkutf8data.c n = n->left; n 1112 fs/unicode/mkutf8data.c if (n->bitnum != node->bitnum) n 1114 fs/unicode/mkutf8data.c n = n->right; n 1115 fs/unicode/mkutf8data.c right = n; n 735 fs/userfaultfd.c struct userfaultfd_fork_ctx *fctx, *n; n 737 fs/userfaultfd.c list_for_each_entry_safe(fctx, n, fcs, list) { n 858 fs/userfaultfd.c struct userfaultfd_unmap_ctx *ctx, *n; n 861 fs/userfaultfd.c list_for_each_entry_safe(ctx, n, uf, list) { n 65 fs/xattr.c const char *n; n 67 fs/xattr.c n = strcmp_prefix(*name, xattr_prefix(handler)); n 68 fs/xattr.c if (n) { n 69 fs/xattr.c if (!handler->prefix ^ !*n) { n 70 fs/xattr.c if (*n) n 74 fs/xattr.c *name = n; n 16 fs/xfs/libxfs/xfs_bit.h static inline uint64_t xfs_mask64hi(int n) n 18 fs/xfs/libxfs/xfs_bit.h return (uint64_t)-1 << (64 - (n)); n 20 fs/xfs/libxfs/xfs_bit.h static inline uint32_t xfs_mask32lo(int n) n 22 fs/xfs/libxfs/xfs_bit.h return ((uint32_t)1 << (n)) - 1; n 24 fs/xfs/libxfs/xfs_bit.h static inline uint64_t xfs_mask64lo(int n) n 26 fs/xfs/libxfs/xfs_bit.h return ((uint64_t)1 << (n)) - 1; n 51 fs/xfs/libxfs/xfs_bit.h int n = 0; n 54 fs/xfs/libxfs/xfs_bit.h n = ffs(w); n 58 fs/xfs/libxfs/xfs_bit.h n = ffs(w); n 59 fs/xfs/libxfs/xfs_bit.h if (n) n 60 fs/xfs/libxfs/xfs_bit.h n += 32; n 63 fs/xfs/libxfs/xfs_bit.h return n - 1; n 3701 fs/xfs/libxfs/xfs_bmap.c int n, n 3714 fs/xfs/libxfs/xfs_bmap.c ASSERT((*bno >= obno) || (n == 0)); n 3746 fs/xfs/libxfs/xfs_bmap.c int *n, n 3758 fs/xfs/libxfs/xfs_bmap.c if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { n 3765 fs/xfs/libxfs/xfs_bmap.c } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && n 3774 fs/xfs/libxfs/xfs_bmap.c } else if (*n > 0 && n 3781 fs/xfs/libxfs/xfs_bmap.c } else if (!((*n == 0) && n 3785 fs/xfs/libxfs/xfs_bmap.c (*n)++; n 3810 fs/xfs/libxfs/xfs_bmap.c int n = 0; n 3867 fs/xfs/libxfs/xfs_bmap.c while (bno < end && n < *nmap) { n 3881 fs/xfs/libxfs/xfs_bmap.c n++; n 3886 fs/xfs/libxfs/xfs_bmap.c xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); n 3887 fs/xfs/libxfs/xfs_bmap.c xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); n 3890 fs/xfs/libxfs/xfs_bmap.c if (bno >= end || n >= *nmap) n 3897 fs/xfs/libxfs/xfs_bmap.c *nmap = n; n 4276 fs/xfs/libxfs/xfs_bmap.c int n; /* current extent index */ n 4342 fs/xfs/libxfs/xfs_bmap.c n = 0; n 4345 fs/xfs/libxfs/xfs_bmap.c while (bno < end && n < *nmap) { n 4405 fs/xfs/libxfs/xfs_bmap.c end, n, flags); n 4415 fs/xfs/libxfs/xfs_bmap.c xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); n 4422 fs/xfs/libxfs/xfs_bmap.c if (bno >= end || n >= *nmap || bma.nallocs >= *nmap) n 4430 fs/xfs/libxfs/xfs_bmap.c *nmap = n; n 547 fs/xfs/libxfs/xfs_btree.c int n) n 550 fs/xfs/libxfs/xfs_btree.c (n - 1) * cur->bc_ops->rec_len; n 559 fs/xfs/libxfs/xfs_btree.c int n) n 562 fs/xfs/libxfs/xfs_btree.c (n - 1) * cur->bc_ops->key_len; n 571 fs/xfs/libxfs/xfs_btree.c int n) n 574 fs/xfs/libxfs/xfs_btree.c (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2); n 583 fs/xfs/libxfs/xfs_btree.c int n, n 588 fs/xfs/libxfs/xfs_btree.c (n - 1) * xfs_btree_ptr_len(cur); n 597 fs/xfs/libxfs/xfs_btree.c int n, n 601 fs/xfs/libxfs/xfs_btree.c ((char *)block + xfs_btree_rec_offset(cur, n)); n 610 fs/xfs/libxfs/xfs_btree.c int n, n 614 fs/xfs/libxfs/xfs_btree.c ((char *)block + xfs_btree_key_offset(cur, n)); n 623 fs/xfs/libxfs/xfs_btree.c int n, n 627 fs/xfs/libxfs/xfs_btree.c ((char *)block + xfs_btree_high_key_offset(cur, n)); n 636 fs/xfs/libxfs/xfs_btree.c int n, n 644 fs/xfs/libxfs/xfs_btree.c ((char *)block + xfs_btree_ptr_offset(cur, n, level)); n 2033 fs/xfs/libxfs/xfs_btree.c int n; n 2041 fs/xfs/libxfs/xfs_btree.c for (n = 2; n <= xfs_btree_get_numrecs(block); n++) { n 2042 fs/xfs/libxfs/xfs_btree.c rec = xfs_btree_rec_addr(cur, n, block); n 2064 fs/xfs/libxfs/xfs_btree.c int n; n 2071 fs/xfs/libxfs/xfs_btree.c for (n = 2; n <= xfs_btree_get_numrecs(block); n++) { n 2072 fs/xfs/libxfs/xfs_btree.c hkey = xfs_btree_high_key_addr(cur, n, block); n 490 fs/xfs/libxfs/xfs_btree.h union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n, n 492 fs/xfs/libxfs/xfs_btree.h union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n, n 494 fs/xfs/libxfs/xfs_btree.h union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n, n 496 fs/xfs/libxfs/xfs_btree.h union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n, n 207 fs/xfs/libxfs/xfs_da_format.c #define XFS_DIR2_DATA_ENTSIZE(n) \ n 208 fs/xfs/libxfs/xfs_da_format.c round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \ n 211 fs/xfs/libxfs/xfs_da_format.c #define XFS_DIR3_DATA_ENTSIZE(n) \ n 212 fs/xfs/libxfs/xfs_da_format.c round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \ n 218 fs/xfs/libxfs/xfs_da_format.c int n) n 220 fs/xfs/libxfs/xfs_da_format.c return XFS_DIR2_DATA_ENTSIZE(n); n 225 fs/xfs/libxfs/xfs_da_format.c int n) n 227 fs/xfs/libxfs/xfs_da_format.c return XFS_DIR3_DATA_ENTSIZE(n); n 327 fs/xfs/libxfs/xfs_defer.c struct list_head *n; n 338 fs/xfs/libxfs/xfs_defer.c list_for_each_safe(pwi, n, &dfp->dfp_work) { n 362 fs/xfs/libxfs/xfs_defer.c struct list_head *n; n 395 fs/xfs/libxfs/xfs_defer.c list_for_each_safe(li, n, &dfp->dfp_work) { n 347 fs/xfs/libxfs/xfs_dir2_node.c int n; /* count of live freespc ents */ n 385 fs/xfs/libxfs/xfs_dir2_node.c for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++, to++) { n 387 fs/xfs/libxfs/xfs_dir2_node.c n++; n 394 fs/xfs/libxfs/xfs_dir2_node.c freehdr.nused = n; n 1282 fs/xfs/libxfs/xfs_format.h static inline xfs_inofree_t xfs_inobt_maskn(int i, int n) n 1284 fs/xfs/libxfs/xfs_format.h return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i; n 278 fs/xfs/libxfs/xfs_iext_tree.c int n, n 281 fs/xfs/libxfs/xfs_iext_tree.c if (node->keys[n] > offset) n 283 fs/xfs/libxfs/xfs_iext_tree.c if (node->keys[n] < offset) n 392 fs/xfs/libxfs/xfs_iext_tree.c int n) n 394 fs/xfs/libxfs/xfs_iext_tree.c return leaf->recs[n].lo & XFS_IEXT_STARTOFF_MASK; n 69 fs/xfs/libxfs/xfs_inode_fork.h #define XFS_IFORK_FMT_SET(ip,w,n) \ n 71 fs/xfs/libxfs/xfs_inode_fork.h ((ip)->i_d.di_format = (n)) : \ n 73 fs/xfs/libxfs/xfs_inode_fork.h ((ip)->i_d.di_aformat = (n)) : \ n 74 fs/xfs/libxfs/xfs_inode_fork.h ((ip)->i_cformat = (n)))) n 81 fs/xfs/libxfs/xfs_inode_fork.h #define XFS_IFORK_NEXT_SET(ip,w,n) \ n 83 fs/xfs/libxfs/xfs_inode_fork.h ((ip)->i_d.di_nextents = (n)) : \ n 85 fs/xfs/libxfs/xfs_inode_fork.h ((ip)->i_d.di_anextents = (n)) : \ n 86 fs/xfs/libxfs/xfs_inode_fork.h ((ip)->i_cnextents = (n)))) n 1613 fs/xfs/libxfs/xfs_refcount.c struct xfs_refcount_recovery *rr, *n; n 1663 fs/xfs/libxfs/xfs_refcount.c list_for_each_entry_safe(rr, n, &debris, rr_list) { n 1693 fs/xfs/libxfs/xfs_refcount.c list_for_each_entry_safe(rr, n, &debris, rr_list) { n 26 fs/xfs/mrlock.h #define mrlock_init(mrp, t,n,s) mrinit(mrp, n) n 486 fs/xfs/scrub/agheader_repair.c struct xfs_bitmap_range *n; n 531 fs/xfs/scrub/agheader_repair.c for_each_xfs_bitmap_extent(br, n, agfl_extents) { n 582 fs/xfs/scrub/agheader_repair.c struct xfs_bitmap_range *n; n 606 fs/xfs/scrub/agheader_repair.c for_each_xfs_bitmap_extent(br, n, agfl_extents) { n 46 fs/xfs/scrub/bitmap.c struct xfs_bitmap_range *n; n 48 fs/xfs/scrub/bitmap.c for_each_xfs_bitmap_extent(bmr, n, bitmap) { n 22 fs/xfs/scrub/bitmap.h #define for_each_xfs_bitmap_extent(bex, n, bitmap) \ n 23 fs/xfs/scrub/bitmap.h list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) n 25 fs/xfs/scrub/bitmap.h #define for_each_xfs_bitmap_block(b, bex, n, bitmap) \ n 26 fs/xfs/scrub/bitmap.h list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \ n 609 fs/xfs/scrub/btree.c struct check_owner *n; n 696 fs/xfs/scrub/btree.c list_for_each_entry_safe(co, n, &bs.to_check, list) { n 152 fs/xfs/scrub/refcount.c struct xchk_refcnt_frag *n; n 187 fs/xfs/scrub/refcount.c list_for_each_entry_safe(frag, n, &refchk->fragments, list) { n 210 fs/xfs/scrub/refcount.c list_for_each_entry_safe(frag, n, &worklist, list) { n 223 fs/xfs/scrub/refcount.c list_for_each_entry_safe(frag, n, &refchk->fragments, list) { n 258 fs/xfs/scrub/refcount.c list_for_each_entry_safe(frag, n, &worklist, list) { n 262 fs/xfs/scrub/refcount.c list_for_each_entry_safe(frag, n, &refchk->fragments, list) { n 286 fs/xfs/scrub/refcount.c struct xchk_refcnt_frag *n; n 309 fs/xfs/scrub/refcount.c list_for_each_entry_safe(frag, n, &refchk.fragments, list) { n 436 fs/xfs/scrub/repair.c struct xfs_bitmap_range *n; n 448 fs/xfs/scrub/repair.c for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) { n 601 fs/xfs/scrub/repair.c struct xfs_bitmap_range *n; n 607 fs/xfs/scrub/repair.c for_each_xfs_bitmap_block(fsbno, bmr, n, bitmap) { n 1893 fs/xfs/xfs_buf.c struct xfs_buf *bp, *n; n 1900 fs/xfs/xfs_buf.c list_for_each_entry_safe(bp, n, buffer_list, b_list) { n 564 fs/xfs/xfs_extent_busy.c struct xfs_extent_busy *busyp, *n; n 569 fs/xfs/xfs_extent_busy.c list_for_each_entry_safe(busyp, n, list, list) { n 691 fs/xfs/xfs_inode_item.c struct xfs_log_item *blip, *n; n 703 fs/xfs/xfs_inode_item.c list_for_each_entry_safe(blip, n, &bp->b_li_list, li_bio_list) { n 764 fs/xfs/xfs_inode_item.c list_for_each_entry_safe(blip, n, &tmp, li_bio_list) { n 205 fs/xfs/xfs_linux.h #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) n 1850 fs/xfs/xfs_log_recover.c xlog_recover_item_t *item, *n; n 1859 fs/xfs/xfs_log_recover.c list_for_each_entry_safe(item, n, &sort_list, ri_list) { n 4314 fs/xfs/xfs_log_recover.c xlog_recover_item_t *item, *n; n 4319 fs/xfs/xfs_log_recover.c list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { n 435 fs/xfs/xfs_rtalloc.c xfs_rtblock_t n; /* next block to try */ n 491 fs/xfs/xfs_rtalloc.c bbno + i, minlen, maxlen, len, &n, rbpp, n 537 fs/xfs/xfs_rtalloc.c len, &n, rbpp, rsb, prod, &r); n 558 fs/xfs/xfs_rtalloc.c bbno + i, minlen, maxlen, len, &n, rbpp, n 626 fs/xfs/xfs_rtalloc.c xfs_rtblock_t n; /* next block to be tried */ n 662 fs/xfs/xfs_rtalloc.c maxlen, len, &n, rbpp, rsb, prod, &r); n 678 fs/xfs/xfs_rtalloc.c if (XFS_BITTOBLOCK(mp, n) > i + 1) n 679 fs/xfs/xfs_rtalloc.c i = XFS_BITTOBLOCK(mp, n) - 1; n 725 fs/xfs/xfs_rtalloc.c len, &n, rbpp, rsb, prod, &r); n 741 fs/xfs/xfs_rtalloc.c if (XFS_BITTOBLOCK(mp, n) > i + 1) n 742 fs/xfs/xfs_rtalloc.c i = XFS_BITTOBLOCK(mp, n) - 1; n 38 fs/xfs/xfs_symlink.c int n; n 51 fs/xfs/xfs_symlink.c for (n = 0; n < nmaps; n++) { n 52 fs/xfs/xfs_symlink.c d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); n 53 fs/xfs/xfs_symlink.c byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); n 165 fs/xfs/xfs_symlink.c int n; n 287 fs/xfs/xfs_symlink.c for (n = 0; n < nmaps; n++) { n 290 fs/xfs/xfs_symlink.c d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); n 291 fs/xfs/xfs_symlink.c byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); n 51 include/asm-generic/atomic64.h extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); n 96 include/asm-generic/cmpxchg.h #define cmpxchg_local(ptr, o, n) ({ \ n 98 include/asm-generic/cmpxchg.h (unsigned long)(n), sizeof(*(ptr)))); \ n 103 include/asm-generic/cmpxchg.h #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) n 106 include/asm-generic/cmpxchg.h #define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) n 107 include/asm-generic/cmpxchg.h #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) n 20 include/asm-generic/delay.h #define udelay(n) \ n 22 include/asm-generic/delay.h if (__builtin_constant_p(n)) { \ n 23 include/asm-generic/delay.h if ((n) / 20000 >= 1) \ n 26 include/asm-generic/delay.h __const_udelay((n) * 0x10c7ul); \ n 28 include/asm-generic/delay.h __udelay(n); \ n 33 include/asm-generic/delay.h #define ndelay(n) \ n 35 include/asm-generic/delay.h if (__builtin_constant_p(n)) { \ n 36 include/asm-generic/delay.h if ((n) / 20000 >= 1) \ n 39 include/asm-generic/delay.h __const_udelay((n) * 5ul); \ n 41 include/asm-generic/delay.h __ndelay(n); \ n 43 include/asm-generic/div64.h # define do_div(n,base) ({ \ n 46 include/asm-generic/div64.h __rem = ((uint64_t)(n)) % __base; \ n 47 include/asm-generic/div64.h (n) = ((uint64_t)(n)) / __base; \ n 69 include/asm-generic/div64.h #define __div64_const32(n, ___b) \ n 80 include/asm-generic/div64.h uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ n 175 include/asm-generic/div64.h static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) n 179 include/asm-generic/div64.h uint32_t n_lo = n; n 180 include/asm-generic/div64.h uint32_t n_hi = n >> 32; n 223 include/asm-generic/div64.h # define do_div(n,base) ({ \ n 226 include/asm-generic/div64.h (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ n 229 include/asm-generic/div64.h __rem = (n) & (__base - 1); \ n 230 include/asm-generic/div64.h (n) >>= ilog2(__base); \ n 234 include/asm-generic/div64.h uint32_t __res_lo, __n_lo = (n); \ n 235 include/asm-generic/div64.h (n) = __div64_const32(n, __base); \ n 237 include/asm-generic/div64.h __res_lo = (n); \ n 239 include/asm-generic/div64.h } else if (likely(((n) >> 32) == 0)) { \ n 240 include/asm-generic/div64.h __rem = (uint32_t)(n) % __base; \ n 241 include/asm-generic/div64.h (n) = (uint32_t)(n) / __base; \ n 243 include/asm-generic/div64.h __rem = __div64_32(&(n), __base); \ n 44 include/asm-generic/local.h #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) n 45 include/asm-generic/local.h #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) n 45 include/asm-generic/local64.h #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) n 46 include/asm-generic/local64.h #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) n 83 include/asm-generic/local64.h #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) n 84 include/asm-generic/local64.h #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) n 14 include/asm-generic/uaccess.h raw_copy_from_user(void *to, const void __user * from, unsigned long n) n 16 include/asm-generic/uaccess.h if (__builtin_constant_p(n)) { n 17 include/asm-generic/uaccess.h switch(n) { n 35 include/asm-generic/uaccess.h memcpy(to, (const void __force *)from, n); n 40 include/asm-generic/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) n 42 include/asm-generic/uaccess.h if (__builtin_constant_p(n)) { n 43 include/asm-generic/uaccess.h switch(n) { n 63 include/asm-generic/uaccess.h memcpy((void __force *)to, from, n); n 244 include/asm-generic/uaccess.h #define __strnlen_user(s, n) (strnlen((s), (n)) + 1) n 252 include/asm-generic/uaccess.h static inline long strnlen_user(const char __user *src, long n) n 256 include/asm-generic/uaccess.h return __strnlen_user(src, n); n 264 include/asm-generic/uaccess.h __clear_user(void __user *to, unsigned long n) n 266 include/asm-generic/uaccess.h memset((void __force *)to, 0, n); n 272 include/asm-generic/uaccess.h clear_user(void __user *to, unsigned long n) n 275 include/asm-generic/uaccess.h if (!access_ok(to, n)) n 276 include/asm-generic/uaccess.h return n; n 278 include/asm-generic/uaccess.h return __clear_user(to, n); n 32 include/crypto/internal/rsa.h const u8 *n; n 70 include/drm/drm_ioctl.h #define DRM_IOCTL_NR(n) _IOC_NR(n) n 13 include/dt-bindings/bus/moxtet.h #define MOXTET_IRQ_PERIDOT(n) (8 + (n)) n 16 include/dt-bindings/gpio/uniphier-gpio.h #define UNIPHIER_GPIO_IRQ(n) ((UNIPHIER_GPIO_IRQ_OFFSET) + (n)) n 457 include/linux/acpi.h int acpi_check_region(resource_size_t start, resource_size_t n, n 777 include/linux/acpi.h static inline int acpi_check_region(resource_size_t start, resource_size_t n, n 51 include/linux/amba/sp810.h #define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2)) n 570 include/linux/ata.h #define ata_id_u32(id,n) \ n 571 include/linux/ata.h (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) n 572 include/linux/ata.h #define ata_id_u64(id,n) \ n 573 include/linux/ata.h ( ((u64) (id)[(n) + 3] << 48) | \ n 574 include/linux/ata.h ((u64) (id)[(n) + 2] << 32) | \ n 575 include/linux/ata.h ((u64) (id)[(n) + 1] << 16) | \ n 576 include/linux/ata.h ((u64) (id)[(n) + 0]) ) n 148 include/linux/audit.h size_t n); n 151 include/linux/audit.h size_t n); n 206 include/linux/audit.h const char *buf, size_t n) n 209 include/linux/audit.h const char *string, size_t n) n 146 include/linux/avf/virtchnl.h #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ n 147 include/linux/avf/virtchnl.h { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } n 148 include/linux/avf/virtchnl.h #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ n 149 include/linux/avf/virtchnl.h { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } n 39 include/linux/bch.h unsigned int n; n 468 include/linux/bitmap.h #define BITMAP_FROM_U64(n) (n) n 470 include/linux/bitmap.h #define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \ n 471 include/linux/bitmap.h ((unsigned long) ((u64)(n) >> 32)) n 9 include/linux/bitops.h # define aligned_byte_mask(n) ((1UL << 8*(n))-1) n 11 include/linux/bitops.h # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) n 20 include/linux/build_bug.h #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ n 21 include/linux/build_bug.h BUILD_BUG_ON(((n) & ((n) - 1)) != 0) n 22 include/linux/build_bug.h #define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ n 23 include/linux/build_bug.h BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) n 43 include/linux/ceph/decode.h static inline void ceph_decode_copy(void **p, void *pv, size_t n) n 45 include/linux/ceph/decode.h memcpy(pv, *p, n); n 46 include/linux/ceph/decode.h *p += n; n 52 include/linux/ceph/decode.h static inline bool ceph_has_room(void **p, void *end, size_t n) n 54 include/linux/ceph/decode.h return end >= *p && n <= end - *p; n 57 include/linux/ceph/decode.h #define ceph_decode_need(p, end, n, bad) \ n 59 include/linux/ceph/decode.h if (!likely(ceph_has_room(p, end, n))) \ n 84 include/linux/ceph/decode.h #define ceph_decode_copy_safe(p, end, pv, n, bad) \ n 86 include/linux/ceph/decode.h ceph_decode_need(p, end, n, bad); \ n 87 include/linux/ceph/decode.h ceph_decode_copy(p, pv, n); \ n 139 include/linux/ceph/decode.h #define ceph_decode_skip_n(p, end, n, bad) \ n 141 include/linux/ceph/decode.h ceph_decode_need(p, end, n, bad); \ n 142 include/linux/ceph/decode.h *p += n; \ n 351 include/linux/ceph/decode.h #define ceph_encode_need(p, end, n, bad) \ n 353 include/linux/ceph/decode.h if (!likely(ceph_has_room(p, end, n))) \ n 378 include/linux/ceph/decode.h #define ceph_encode_copy_safe(p, end, pv, n, bad) \ n 380 include/linux/ceph/decode.h ceph_encode_need(p, end, n, bad); \ n 381 include/linux/ceph/decode.h ceph_encode_copy(p, pv, n); \ n 383 include/linux/ceph/decode.h #define ceph_encode_string_safe(p, end, s, n, bad) \ n 385 include/linux/ceph/decode.h ceph_encode_need(p, end, n, bad); \ n 386 include/linux/ceph/decode.h ceph_encode_string(p, end, s, n); \ n 193 include/linux/ceph/libceph.h struct rb_node **n = &root->rb_node; \ n 198 include/linux/ceph/libceph.h while (*n) { \ n 199 include/linux/ceph/libceph.h type *cur = rb_entry(*n, type, nodefld); \ n 202 include/linux/ceph/libceph.h parent = *n; \ n 205 include/linux/ceph/libceph.h n = &(*n)->rb_left; \ n 207 include/linux/ceph/libceph.h n = &(*n)->rb_right; \ n 212 include/linux/ceph/libceph.h rb_link_node(&t->nodefld, parent, n); \ n 230 include/linux/ceph/libceph.h struct rb_node *n = root->rb_node; \ n 232 include/linux/ceph/libceph.h while (n) { \ n 233 include/linux/ceph/libceph.h type *cur = rb_entry(n, type, nodefld); \ n 238 include/linux/ceph/libceph.h n = n->rb_left; \ n 240 include/linux/ceph/libceph.h n = n->rb_right; \ n 58 include/linux/ceph/messenger.h #define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) n 92 include/linux/ceph/messenger.h #define __ceph_bio_iter_advance_step(it, n, STEP) do { \ n 93 include/linux/ceph/messenger.h unsigned int __n = (n), __cur_n; \ n 112 include/linux/ceph/messenger.h #define ceph_bio_iter_advance(it, n) \ n 113 include/linux/ceph/messenger.h __ceph_bio_iter_advance_step(it, n, 0) n 118 include/linux/ceph/messenger.h #define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \ n 119 include/linux/ceph/messenger.h __ceph_bio_iter_advance_step(it, n, ({ \ n 136 include/linux/ceph/messenger.h #define __ceph_bvec_iter_advance_step(it, n, STEP) do { \ n 137 include/linux/ceph/messenger.h BUG_ON((n) > (it)->iter.bi_size); \ n 139 include/linux/ceph/messenger.h bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \ n 145 include/linux/ceph/messenger.h #define ceph_bvec_iter_advance(it, n) \ n 146 include/linux/ceph/messenger.h __ceph_bvec_iter_advance_step(it, n, 0) n 151 include/linux/ceph/messenger.h #define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \ n 152 include/linux/ceph/messenger.h __ceph_bvec_iter_advance_step(it, n, ({ \ n 157 include/linux/ceph/messenger.h __cur_iter.bi_size = (n); \ n 162 include/linux/ceph/messenger.h #define ceph_bvec_iter_shorten(it, n) do { \ n 163 include/linux/ceph/messenger.h BUG_ON((n) > (it)->iter.bi_size); \ n 164 include/linux/ceph/messenger.h (it)->iter.bi_size = (n); \ n 328 include/linux/cgroup.h static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) n 331 include/linux/cgroup.h percpu_ref_get_many(&css->refcnt, n); n 408 include/linux/cgroup.h static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) n 411 include/linux/cgroup.h percpu_ref_put_many(&css->refcnt, n); n 28 include/linux/circ_buf.h int n = ((head) + end) & ((size)-1); \ n 29 include/linux/circ_buf.h n < end ? n : end;}) n 34 include/linux/circ_buf.h int n = (end + (tail)) & ((size)-1); \ n 35 include/linux/circ_buf.h n <= end ? n : end+1;}) n 653 include/linux/clk-provider.h unsigned long *m, unsigned long *n); n 64 include/linux/clk/at91_pmc.h #define AT91_PMC_MUL_GET(n) ((n) >> 16 & 0x7ff) n 66 include/linux/clk/at91_pmc.h #define AT91_PMC3_MUL_GET(n) ((n) >> 18 & 0x7f) n 130 include/linux/clk/at91_pmc.h #define AT91_PMC_SMDDIV(n) (((n) << 8) & AT91_PMC_SMD_DIV) n 132 include/linux/clk/at91_pmc.h #define AT91_PMC_PCKR(n) (0x40 + ((n) * 4)) /* Programmable Clock 0-N Registers */ n 159 include/linux/clk/at91_pmc.h #define AT91_PMC_FSTT(n) BIT(n) n 201 include/linux/clk/at91_pmc.h #define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET) n 204 include/linux/clk/at91_pmc.h #define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET) n 210 include/linux/clk/at91_pmc.h #define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET) n 213 include/linux/clk/at91_pmc.h #define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET) n 217 include/linux/clk/at91_pmc.h #define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET) n 26 include/linux/clkdev.h #define CLKDEV_INIT(d, n, c) \ n 29 include/linux/clkdev.h .con_id = n, \ n 612 include/linux/compat.h asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp, n 617 include/linux/compat.h asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, n 880 include/linux/compat.h asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, n 161 include/linux/cpumask.h static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) n 163 include/linux/cpumask.h return n+1; n 166 include/linux/cpumask.h static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) n 168 include/linux/cpumask.h return n+1; n 171 include/linux/cpumask.h static inline unsigned int cpumask_next_and(int n, n 175 include/linux/cpumask.h return n+1; n 178 include/linux/cpumask.h static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, n 182 include/linux/cpumask.h return (wrap && n == 0); n 228 include/linux/cpumask.h unsigned int cpumask_next(int n, const struct cpumask *srcp); n 237 include/linux/cpumask.h static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) n 240 include/linux/cpumask.h if (n != -1) n 241 include/linux/cpumask.h cpumask_check(n); n 242 include/linux/cpumask.h return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); n 245 include/linux/cpumask.h int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); n 273 include/linux/cpumask.h extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); n 566 include/linux/cpumask.h const struct cpumask *srcp, int n) n 568 include/linux/cpumask.h bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, n 579 include/linux/cpumask.h const struct cpumask *srcp, int n) n 581 include/linux/cpumask.h bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, n 57 include/linux/dcache.h #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } n 43 include/linux/delay.h #define mdelay(n) (\ n 44 include/linux/delay.h (__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ n 45 include/linux/delay.h ({unsigned long __ms=(n); while (__ms--) udelay(1000);})) n 582 include/linux/device-mapper.h #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) n 584 include/linux/device-mapper.h #define dm_sector_div_up(n, sz) ( \ n 586 include/linux/device-mapper.h sector_t _r = ((n) + (sz) - 1); \ n 595 include/linux/device-mapper.h #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) n 606 include/linux/device-mapper.h static inline sector_t to_sector(unsigned long long n) n 608 include/linux/device-mapper.h return (n >> SECTOR_SHIFT); n 611 include/linux/device-mapper.h static inline unsigned long to_bytes(sector_t n) n 613 include/linux/device-mapper.h return (n << SECTOR_SHIFT); n 922 include/linux/device.h size_t n, size_t size, gfp_t flags) n 926 include/linux/device.h if (unlikely(check_mul_overflow(n, size, &bytes))) n 932 include/linux/device.h size_t n, size_t size, gfp_t flags) n 934 include/linux/device.h return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); n 56 include/linux/dio.h #define to_dio_dev(n) container_of(n, struct dio_dev, dev) n 137 include/linux/dm-bufio.h void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); n 142 include/linux/dma-mapping.h #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) n 617 include/linux/dma-mapping.h #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) n 618 include/linux/dma-mapping.h #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) n 52 include/linux/dynamic_debug.h int ddebug_add_module(struct _ddebug *tab, unsigned int n, n 180 include/linux/dynamic_debug.h static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n, n 1132 include/linux/efi.h #define efi_early_memdesc_ptr(map, desc_size, n) \ n 1133 include/linux/efi.h (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size))) n 48 include/linux/eisa.h #define to_eisa_device(n) container_of(n, struct eisa_device, dev) n 41 include/linux/eventfd.h __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); n 64 include/linux/eventfd.h static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) n 185 include/linux/f2fs_fs.h #define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \ n 446 include/linux/filter.h #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) n 447 include/linux/filter.h #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) n 460 include/linux/filter.h #define __BPF_PAD(n) \ n 461 include/linux/filter.h __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ n 3458 include/linux/fs.h void simple_transaction_set(struct file *file, size_t n); n 95 include/linux/fs_parser.h #define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf)) n 40 include/linux/fscrypt.h #define FSTR_INIT(n, l) { .name = n, .len = l } n 230 include/linux/fscrypt.h const struct fscrypt_digested_name *n = n 237 include/linux/fscrypt.h n->digest, FSCRYPT_FNAME_DIGEST_SIZE); n 68 include/linux/fsl_ifc.h #define IFC_AMASK(n) (IFC_AMASK_MASK << \ n 69 include/linux/fsl_ifc.h (__ilog2(n) - IFC_AMASK_SHIFT)) n 110 include/linux/fsl_ifc.h #define CSOR_NAND_PB(n) ((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT) n 134 include/linux/fsl_ifc.h #define CSOR_NOR_ADM_SHIFT(n) ((n) << CSOR_NOR_ADM_SHIFT_SHIFT) n 163 include/linux/fsl_ifc.h #define CSOR_GPCM_GPTO(n) ((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT) n 171 include/linux/fsl_ifc.h #define CSOR_GPCM_ADM_SHIFT(n) ((n) << CSOR_GPCM_ADM_SHIFT_SHIFT) n 175 include/linux/fsl_ifc.h #define CSOR_GPCM_GAPERRD(n) (((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT) n 240 include/linux/fsl_ifc.h #define IFC_CCR_CLK_DIV(n) ((n-1) << IFC_CCR_CLK_DIV_SHIFT) n 244 include/linux/fsl_ifc.h #define IFC_CCR_CLK_DLY(n) ((n) << IFC_CCR_CLK_DLY_SHIFT) n 273 include/linux/fsl_ifc.h #define IFC_NAND_NCFGR_NUM_LOOP(n) ((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT) n 439 include/linux/fsl_ifc.h #define PGRDCMPL_EVT_STAT_SECTION_SP(n) (1 << (31 - (n))) n 441 include/linux/fsl_ifc.h #define PGRDCMPL_EVT_STAT_LP_2K(n) (0xF << (28 - (n)*4)) n 443 include/linux/fsl_ifc.h #define PGRDCMPL_EVT_STAT_LP_4K(n) (0xFF << (24 - (n)*8)) n 535 include/linux/fsl_ifc.h #define IFC_NAND_NCR_FTOCNT(n) ((_ilog2(n) - 8) << IFC_NAND_NCR_FTOCNT_SHIFT) n 614 include/linux/fsl_ifc.h #define IFC_NORCR_NUM_PHASE(n) ((n-1) << IFC_NORCR_NUM_PHASE_SHIFT) n 618 include/linux/fsl_ifc.h #define IFC_NORCR_STOCNT(n) ((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT) n 672 include/linux/ftrace.h # define ftrace_return_address(n) __builtin_return_address(n) n 674 include/linux/ftrace.h # define ftrace_return_address(n) 0UL n 97 include/linux/gpio/machine.h void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n); n 104 include/linux/gpio/machine.h void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {} n 911 include/linux/hid.h s32 hid_snto32(__u32 value, unsigned n); n 913 include/linux/hid.h unsigned offset, unsigned n); n 167 include/linux/hugetlb.h #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) n 173 include/linux/hugetlb.h #define hugetlb_report_node_meminfo(n, buf) 0 n 490 include/linux/i2c.h unsigned n); n 494 include/linux/i2c.h unsigned n) n 2028 include/linux/ieee80211.h u8 n; n 2034 include/linux/ieee80211.h n = hweight8(ppe_thres_hdr & n 2036 include/linux/ieee80211.h n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >> n 2043 include/linux/ieee80211.h n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; n 2044 include/linux/ieee80211.h n = DIV_ROUND_UP(n, 8); n 2046 include/linux/ieee80211.h return n; n 123 include/linux/iio/adc/ad_sigma_delta.h const struct ad_sd_calib_data *cd, unsigned int n); n 139 include/linux/iio/buffer-dma.h int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, n 49 include/linux/iio/buffer_impl.h size_t n, n 229 include/linux/ioport.h #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) n 230 include/linux/ioport.h #define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) n 231 include/linux/ioport.h #define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) n 232 include/linux/ioport.h #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) n 233 include/linux/ioport.h #define request_mem_region_exclusive(start,n,name) \ n 234 include/linux/ioport.h __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) n 239 include/linux/ioport.h resource_size_t n, n 243 include/linux/ioport.h #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) n 244 include/linux/ioport.h #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) n 260 include/linux/ioport.h #define devm_request_region(dev,start,n,name) \ n 261 include/linux/ioport.h __devm_request_region(dev, &ioport_resource, (start), (n), (name)) n 262 include/linux/ioport.h #define devm_request_mem_region(dev,start,n,name) \ n 263 include/linux/ioport.h __devm_request_region(dev, &iomem_resource, (start), (n), (name)) n 267 include/linux/ioport.h resource_size_t n, const char *name); n 269 include/linux/ioport.h #define devm_release_region(dev, start, n) \ n 270 include/linux/ioport.h __devm_release_region(dev, &ioport_resource, (start), (n)) n 271 include/linux/ioport.h #define devm_release_mem_region(dev, start, n) \ n 272 include/linux/ioport.h __devm_release_region(dev, &iomem_resource, (start), (n)) n 275 include/linux/ioport.h resource_size_t start, resource_size_t n); n 1133 include/linux/irq.h #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) n 164 include/linux/irqchip/arm-gic-v3.h #define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) n 60 include/linux/jbd2.h #define jbd_debug(n, fmt, a...) \ n 61 include/linux/jbd2.h __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) n 63 include/linux/jbd2.h #define jbd_debug(n, fmt, a...) /**/ n 30 include/linux/jhash.h #define jhash_size(n) ((u32)1<<(n)) n 32 include/linux/jhash.h #define jhash_mask(n) (jhash_size(n)-1) n 459 include/linux/jiffies.h extern u64 nsecs_to_jiffies64(u64 n); n 460 include/linux/jiffies.h extern unsigned long nsecs_to_jiffies(u64 n); n 192 include/linux/kernel.h #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) n 198 include/linux/kernel.h #define lower_32_bits(n) ((u32)(n)) n 488 include/linux/kfifo.h #define kfifo_in(fifo, buf, n) \ n 492 include/linux/kfifo.h unsigned long __n = (n); \ n 510 include/linux/kfifo.h #define kfifo_in_spinlocked(fifo, buf, n, lock) \ n 515 include/linux/kfifo.h __ret = kfifo_in(fifo, buf, n); \ n 521 include/linux/kfifo.h #define kfifo_in_locked(fifo, buf, n, lock) \ n 522 include/linux/kfifo.h kfifo_in_spinlocked(fifo, buf, n, lock) n 536 include/linux/kfifo.h #define kfifo_out(fifo, buf, n) \ n 541 include/linux/kfifo.h unsigned long __n = (n); \ n 560 include/linux/kfifo.h #define kfifo_out_spinlocked(fifo, buf, n, lock) \ n 566 include/linux/kfifo.h __ret = kfifo_out(fifo, buf, n); \ n 573 include/linux/kfifo.h #define kfifo_out_locked(fifo, buf, n, lock) \ n 574 include/linux/kfifo.h kfifo_out_spinlocked(fifo, buf, n, lock) n 744 include/linux/kfifo.h #define kfifo_out_peek(fifo, buf, n) \ n 749 include/linux/kfifo.h unsigned long __n = (n); \ n 43 include/linux/klist.h extern void klist_add_tail(struct klist_node *n, struct klist *k); n 44 include/linux/klist.h extern void klist_add_head(struct klist_node *n, struct klist *k); n 45 include/linux/klist.h extern void klist_add_behind(struct klist_node *n, struct klist_node *pos); n 46 include/linux/klist.h extern void klist_add_before(struct klist_node *n, struct klist_node *pos); n 48 include/linux/klist.h extern void klist_del(struct klist_node *n); n 49 include/linux/klist.h extern void klist_remove(struct klist_node *n); n 51 include/linux/klist.h extern int klist_node_attached(struct klist_node *n); n 62 include/linux/klist.h struct klist_node *n); n 23 include/linux/kref.h #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), } n 67 include/linux/linkage.h # define asmlinkage_protect(n, ret, args...) do { } while (0) n 52 include/linux/linux_logo.h unsigned int n); n 55 include/linux/linux_logo.h unsigned int n) n 555 include/linux/list.h #define list_for_each_safe(pos, n, head) \ n 556 include/linux/list.h for (pos = (head)->next, n = pos->next; pos != (head); \ n 557 include/linux/list.h pos = n, n = pos->next) n 565 include/linux/list.h #define list_for_each_prev_safe(pos, n, head) \ n 566 include/linux/list.h for (pos = (head)->prev, n = pos->prev; \ n 568 include/linux/list.h pos = n, n = pos->prev) n 663 include/linux/list.h #define list_for_each_entry_safe(pos, n, head, member) \ n 665 include/linux/list.h n = list_next_entry(pos, member); \ n 667 include/linux/list.h pos = n, n = list_next_entry(n, member)) n 679 include/linux/list.h #define list_for_each_entry_safe_continue(pos, n, head, member) \ n 681 include/linux/list.h n = list_next_entry(pos, member); \ n 683 include/linux/list.h pos = n, n = list_next_entry(n, member)) n 695 include/linux/list.h #define list_for_each_entry_safe_from(pos, n, head, member) \ n 696 include/linux/list.h for (n = list_next_entry(pos, member); \ n 698 include/linux/list.h pos = n, n = list_next_entry(n, member)) n 710 include/linux/list.h #define list_for_each_entry_safe_reverse(pos, n, head, member) \ n 712 include/linux/list.h n = list_prev_entry(pos, member); \ n 714 include/linux/list.h pos = n, n = list_prev_entry(n, member)) n 728 include/linux/list.h #define list_safe_reset_next(pos, n, member) \ n 729 include/linux/list.h n = list_next_entry(pos, member) n 757 include/linux/list.h static inline void __hlist_del(struct hlist_node *n) n 759 include/linux/list.h struct hlist_node *next = n->next; n 760 include/linux/list.h struct hlist_node **pprev = n->pprev; n 767 include/linux/list.h static inline void hlist_del(struct hlist_node *n) n 769 include/linux/list.h __hlist_del(n); n 770 include/linux/list.h n->next = LIST_POISON1; n 771 include/linux/list.h n->pprev = LIST_POISON2; n 774 include/linux/list.h static inline void hlist_del_init(struct hlist_node *n) n 776 include/linux/list.h if (!hlist_unhashed(n)) { n 777 include/linux/list.h __hlist_del(n); n 778 include/linux/list.h INIT_HLIST_NODE(n); n 782 include/linux/list.h static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) n 785 include/linux/list.h n->next = first; n 787 include/linux/list.h first->pprev = &n->next; n 788 include/linux/list.h WRITE_ONCE(h->first, n); n 789 include/linux/list.h n->pprev = &h->first; n 793 include/linux/list.h static inline void hlist_add_before(struct hlist_node *n, n 796 include/linux/list.h n->pprev = next->pprev; n 797 include/linux/list.h n->next = next; n 798 include/linux/list.h next->pprev = &n->next; n 799 include/linux/list.h WRITE_ONCE(*(n->pprev), n); n 802 include/linux/list.h static inline void hlist_add_behind(struct hlist_node *n, n 805 include/linux/list.h n->next = prev->next; n 806 include/linux/list.h prev->next = n; n 807 include/linux/list.h n->pprev = &prev->next; n 809 include/linux/list.h if (n->next) n 810 include/linux/list.h n->next->pprev = &n->next; n 814 include/linux/list.h static inline void hlist_add_fake(struct hlist_node *n) n 816 include/linux/list.h n->pprev = &n->next; n 829 include/linux/list.h hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) n 831 include/linux/list.h return !n->next && n->pprev == &h->first; n 852 include/linux/list.h #define hlist_for_each_safe(pos, n, head) \ n 853 include/linux/list.h for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ n 854 include/linux/list.h pos = n) n 898 include/linux/list.h #define hlist_for_each_entry_safe(pos, n, head, member) \ n 900 include/linux/list.h pos && ({ n = pos->member.next; 1; }); \ n 901 include/linux/list.h pos = hlist_entry_safe(n, typeof(*pos), member)) n 64 include/linux/list_bl.h struct hlist_bl_node *n) n 66 include/linux/list_bl.h LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); n 69 include/linux/list_bl.h h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); n 77 include/linux/list_bl.h static inline void hlist_bl_add_head(struct hlist_bl_node *n, n 82 include/linux/list_bl.h n->next = first; n 84 include/linux/list_bl.h first->pprev = &n->next; n 85 include/linux/list_bl.h n->pprev = &h->first; n 86 include/linux/list_bl.h hlist_bl_set_first(h, n); n 89 include/linux/list_bl.h static inline void hlist_bl_add_before(struct hlist_bl_node *n, n 94 include/linux/list_bl.h n->pprev = pprev; n 95 include/linux/list_bl.h n->next = next; n 96 include/linux/list_bl.h next->pprev = &n->next; n 101 include/linux/list_bl.h ((uintptr_t)n | ((uintptr_t)*pprev & LIST_BL_LOCKMASK))); n 104 include/linux/list_bl.h static inline void hlist_bl_add_behind(struct hlist_bl_node *n, n 107 include/linux/list_bl.h n->next = prev->next; n 108 include/linux/list_bl.h n->pprev = &prev->next; n 109 include/linux/list_bl.h prev->next = n; n 111 include/linux/list_bl.h if (n->next) n 112 include/linux/list_bl.h n->next->pprev = &n->next; n 115 include/linux/list_bl.h static inline void __hlist_bl_del(struct hlist_bl_node *n) n 117 include/linux/list_bl.h struct hlist_bl_node *next = n->next; n 118 include/linux/list_bl.h struct hlist_bl_node **pprev = n->pprev; n 120 include/linux/list_bl.h LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); n 131 include/linux/list_bl.h static inline void hlist_bl_del(struct hlist_bl_node *n) n 133 include/linux/list_bl.h __hlist_bl_del(n); n 134 include/linux/list_bl.h n->next = LIST_POISON1; n 135 include/linux/list_bl.h n->pprev = LIST_POISON2; n 138 include/linux/list_bl.h static inline void hlist_bl_del_init(struct hlist_bl_node *n) n 140 include/linux/list_bl.h if (!hlist_bl_unhashed(n)) { n 141 include/linux/list_bl.h __hlist_bl_del(n); n 142 include/linux/list_bl.h INIT_HLIST_BL_NODE(n); n 183 include/linux/list_bl.h #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ n 185 include/linux/list_bl.h pos && ({ n = pos->next; 1; }) && \ n 187 include/linux/list_bl.h pos = n) n 69 include/linux/list_nulls.h static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, n 74 include/linux/list_nulls.h n->next = first; n 75 include/linux/list_nulls.h WRITE_ONCE(n->pprev, &h->first); n 76 include/linux/list_nulls.h h->first = n; n 78 include/linux/list_nulls.h WRITE_ONCE(first->pprev, &n->next); n 81 include/linux/list_nulls.h static inline void __hlist_nulls_del(struct hlist_nulls_node *n) n 83 include/linux/list_nulls.h struct hlist_nulls_node *next = n->next; n 84 include/linux/list_nulls.h struct hlist_nulls_node **pprev = n->pprev; n 91 include/linux/list_nulls.h static inline void hlist_nulls_del(struct hlist_nulls_node *n) n 93 include/linux/list_nulls.h __hlist_nulls_del(n); n 94 include/linux/list_nulls.h WRITE_ONCE(n->pprev, LIST_POISON2); n 133 include/linux/llist.h #define llist_for_each_safe(pos, n, node) \ n 134 include/linux/llist.h for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n)) n 173 include/linux/llist.h #define llist_for_each_entry_safe(pos, n, node, member) \ n 176 include/linux/llist.h (n = llist_entry(pos->member.next, typeof(*n), member), true); \ n 177 include/linux/llist.h pos = n) n 430 include/linux/lockdep.h # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) n 431 include/linux/lockdep.h # define lock_release(l, n, i) do { } while (0) n 433 include/linux/lockdep.h # define lock_set_class(l, n, k, s, i) do { } while (0) n 501 include/linux/lockdep.h #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) n 588 include/linux/lockdep.h #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) n 589 include/linux/lockdep.h #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) n 590 include/linux/lockdep.h #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) n 593 include/linux/lockdep.h #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) n 594 include/linux/lockdep.h #define spin_release(l, n, i) lock_release(l, n, i) n 598 include/linux/lockdep.h #define rwlock_release(l, n, i) lock_release(l, n, i) n 602 include/linux/lockdep.h #define seqcount_release(l, n, i) lock_release(l, n, i) n 605 include/linux/lockdep.h #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) n 606 include/linux/lockdep.h #define mutex_release(l, n, i) lock_release(l, n, i) n 609 include/linux/lockdep.h #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) n 611 include/linux/lockdep.h #define rwsem_release(l, n, i) lock_release(l, n, i) n 22 include/linux/log2.h int __ilog2_u32(u32 n) n 24 include/linux/log2.h return fls(n) - 1; n 30 include/linux/log2.h int __ilog2_u64(u64 n) n 32 include/linux/log2.h return fls64(n) - 1; n 45 include/linux/log2.h bool is_power_of_2(unsigned long n) n 47 include/linux/log2.h return (n != 0 && ((n & (n - 1)) == 0)); n 55 include/linux/log2.h unsigned long __roundup_pow_of_two(unsigned long n) n 57 include/linux/log2.h return 1UL << fls_long(n - 1); n 65 include/linux/log2.h unsigned long __rounddown_pow_of_two(unsigned long n) n 67 include/linux/log2.h return 1UL << (fls_long(n) - 1); n 77 include/linux/log2.h #define const_ilog2(n) \ n 79 include/linux/log2.h __builtin_constant_p(n) ? ( \ n 80 include/linux/log2.h (n) < 2 ? 0 : \ n 81 include/linux/log2.h (n) & (1ULL << 63) ? 63 : \ n 82 include/linux/log2.h (n) & (1ULL << 62) ? 62 : \ n 83 include/linux/log2.h (n) & (1ULL << 61) ? 61 : \ n 84 include/linux/log2.h (n) & (1ULL << 60) ? 60 : \ n 85 include/linux/log2.h (n) & (1ULL << 59) ? 59 : \ n 86 include/linux/log2.h (n) & (1ULL << 58) ? 58 : \ n 87 include/linux/log2.h (n) & (1ULL << 57) ? 57 : \ n 88 include/linux/log2.h (n) & (1ULL << 56) ? 56 : \ n 89 include/linux/log2.h (n) & (1ULL << 55) ? 55 : \ n 90 include/linux/log2.h (n) & (1ULL << 54) ? 54 : \ n 91 include/linux/log2.h (n) & (1ULL << 53) ? 53 : \ n 92 include/linux/log2.h (n) & (1ULL << 52) ? 52 : \ n 93 include/linux/log2.h (n) & (1ULL << 51) ? 51 : \ n 94 include/linux/log2.h (n) & (1ULL << 50) ? 50 : \ n 95 include/linux/log2.h (n) & (1ULL << 49) ? 49 : \ n 96 include/linux/log2.h (n) & (1ULL << 48) ? 48 : \ n 97 include/linux/log2.h (n) & (1ULL << 47) ? 47 : \ n 98 include/linux/log2.h (n) & (1ULL << 46) ? 46 : \ n 99 include/linux/log2.h (n) & (1ULL << 45) ? 45 : \ n 100 include/linux/log2.h (n) & (1ULL << 44) ? 44 : \ n 101 include/linux/log2.h (n) & (1ULL << 43) ? 43 : \ n 102 include/linux/log2.h (n) & (1ULL << 42) ? 42 : \ n 103 include/linux/log2.h (n) & (1ULL << 41) ? 41 : \ n 104 include/linux/log2.h (n) & (1ULL << 40) ? 40 : \ n 105 include/linux/log2.h (n) & (1ULL << 39) ? 39 : \ n 106 include/linux/log2.h (n) & (1ULL << 38) ? 38 : \ n 107 include/linux/log2.h (n) & (1ULL << 37) ? 37 : \ n 108 include/linux/log2.h (n) & (1ULL << 36) ? 36 : \ n 109 include/linux/log2.h (n) & (1ULL << 35) ? 35 : \ n 110 include/linux/log2.h (n) & (1ULL << 34) ? 34 : \ n 111 include/linux/log2.h (n) & (1ULL << 33) ? 33 : \ n 112 include/linux/log2.h (n) & (1ULL << 32) ? 32 : \ n 113 include/linux/log2.h (n) & (1ULL << 31) ? 31 : \ n 114 include/linux/log2.h (n) & (1ULL << 30) ? 30 : \ n 115 include/linux/log2.h (n) & (1ULL << 29) ? 29 : \ n 116 include/linux/log2.h (n) & (1ULL << 28) ? 28 : \ n 117 include/linux/log2.h (n) & (1ULL << 27) ? 27 : \ n 118 include/linux/log2.h (n) & (1ULL << 26) ? 26 : \ n 119 include/linux/log2.h (n) & (1ULL << 25) ? 25 : \ n 120 include/linux/log2.h (n) & (1ULL << 24) ? 24 : \ n 121 include/linux/log2.h (n) & (1ULL << 23) ? 23 : \ n 122 include/linux/log2.h (n) & (1ULL << 22) ? 22 : \ n 123 include/linux/log2.h (n) & (1ULL << 21) ? 21 : \ n 124 include/linux/log2.h (n) & (1ULL << 20) ? 20 : \ n 125 include/linux/log2.h (n) & (1ULL << 19) ? 19 : \ n 126 include/linux/log2.h (n) & (1ULL << 18) ? 18 : \ n 127 include/linux/log2.h (n) & (1ULL << 17) ? 17 : \ n 128 include/linux/log2.h (n) & (1ULL << 16) ? 16 : \ n 129 include/linux/log2.h (n) & (1ULL << 15) ? 15 : \ n 130 include/linux/log2.h (n) & (1ULL << 14) ? 14 : \ n 131 include/linux/log2.h (n) & (1ULL << 13) ? 13 : \ n 132 include/linux/log2.h (n) & (1ULL << 12) ? 12 : \ n 133 include/linux/log2.h (n) & (1ULL << 11) ? 11 : \ n 134 include/linux/log2.h (n) & (1ULL << 10) ? 10 : \ n 135 include/linux/log2.h (n) & (1ULL << 9) ? 9 : \ n 136 include/linux/log2.h (n) & (1ULL << 8) ? 8 : \ n 137 include/linux/log2.h (n) & (1ULL << 7) ? 7 : \ n 138 include/linux/log2.h (n) & (1ULL << 6) ? 6 : \ n 139 include/linux/log2.h (n) & (1ULL << 5) ? 5 : \ n 140 include/linux/log2.h (n) & (1ULL << 4) ? 4 : \ n 141 include/linux/log2.h (n) & (1ULL << 3) ? 3 : \ n 142 include/linux/log2.h (n) & (1ULL << 2) ? 2 : \ n 156 include/linux/log2.h #define ilog2(n) \ n 158 include/linux/log2.h __builtin_constant_p(n) ? \ n 159 include/linux/log2.h const_ilog2(n) : \ n 160 include/linux/log2.h (sizeof(n) <= 4) ? \ n 161 include/linux/log2.h __ilog2_u32(n) : \ n 162 include/linux/log2.h __ilog2_u64(n) \ n 173 include/linux/log2.h #define roundup_pow_of_two(n) \ n 175 include/linux/log2.h __builtin_constant_p(n) ? ( \ n 176 include/linux/log2.h (n == 1) ? 1 : \ n 177 include/linux/log2.h (1UL << (ilog2((n) - 1) + 1)) \ n 179 include/linux/log2.h __roundup_pow_of_two(n) \ n 190 include/linux/log2.h #define rounddown_pow_of_two(n) \ n 192 include/linux/log2.h __builtin_constant_p(n) ? ( \ n 193 include/linux/log2.h (1UL << ilog2(n))) : \ n 194 include/linux/log2.h __rounddown_pow_of_two(n) \ n 198 include/linux/log2.h int __order_base_2(unsigned long n) n 200 include/linux/log2.h return n > 1 ? ilog2(n - 1) + 1 : 0; n 216 include/linux/log2.h #define order_base_2(n) \ n 218 include/linux/log2.h __builtin_constant_p(n) ? ( \ n 219 include/linux/log2.h ((n) == 0 || (n) == 1) ? 0 : \ n 220 include/linux/log2.h ilog2((n) - 1) + 1) : \ n 221 include/linux/log2.h __order_base_2(n) \ n 225 include/linux/log2.h int __bits_per(unsigned long n) n 227 include/linux/log2.h if (n < 2) n 229 include/linux/log2.h if (is_power_of_2(n)) n 230 include/linux/log2.h return order_base_2(n) + 1; n 231 include/linux/log2.h return order_base_2(n); n 249 include/linux/log2.h #define bits_per(n) \ n 251 include/linux/log2.h __builtin_constant_p(n) ? ( \ n 252 include/linux/log2.h ((n) == 0 || (n) == 1) \ n 253 include/linux/log2.h ? 1 : ilog2(n) + 1 \ n 255 include/linux/log2.h __bits_per(n) \ n 100 include/linux/maple.h #define to_maple_dev(n) container_of(n, struct maple_device, dev) n 101 include/linux/maple.h #define to_maple_driver(n) container_of(n, struct maple_driver, drv) n 206 include/linux/mfd/as3722.h #define AS3722_LDO3_MODE_VAL(n) (((n) & 0x3) << 6) n 219 include/linux/mfd/as3722.h #define AS3722_SDn_CTRL(n) BIT(n) n 297 include/linux/mfd/as3722.h #define AS3722_GPIO_MODE_VAL(n) ((n) & AS3722_GPIO_MODE_MASK) n 301 include/linux/mfd/as3722.h #define AS3722_GPIO_IOSF_VAL(n) (((n) & 0xF) << 3) n 317 include/linux/mfd/as3722.h #define AS3722_GPIOn_SIGNAL(n) BIT(n) n 318 include/linux/mfd/as3722.h #define AS3722_GPIOn_CONTROL_REG(n) (AS3722_GPIO0_CONTROL_REG + n) n 50 include/linux/mfd/bd9571mwv.h #define BD9571MWV_AVS_VD09_VID(n) (0x32 + (n)) n 51 include/linux/mfd/bd9571mwv.h #define BD9571MWV_AVS_DVFS_VID(n) (0x36 + (n)) n 72 include/linux/mfd/bd9571mwv.h #define BD9571MWV_REG_KEEP(n) (0x70 + (n)) n 14 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP(n) ((n) * 4) n 18 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2) n 23 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0) n 30 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP1_CAP2SRC(n) ((n) << 27) n 47 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP1_CAP1SRC(n) ((n) << 22) n 64 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP1_CAP0SRC(n) ((n) << 17) n 83 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13) n 89 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP1_AMUTESEL0(n) ((n) << 0) n 105 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP2_OTGMODE(n) ((n) << 13) n 120 include/linux/mfd/da8xx-cfgchip.h #define CFGCHIP2_REFFREQ(n) ((n) << 0) n 608 include/linux/mfd/dbx500-prcmu.h static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {} n 25 include/linux/mfd/imx25-tsadc.h #define MX25_ADCQ_CFG(n) (0x40 + ((n) * 0x4)) n 161 include/linux/mfd/max77620.h #define MAX77620_CID5_DIDM(n) (((n) >> 4) & 0xF) n 163 include/linux/mfd/max77620.h #define MAX77620_CID5_DIDO(n) ((n) & 0xF) n 53 include/linux/mfd/mxs-lradc.h #define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16)) n 58 include/linux/mfd/mxs-lradc.h #define LRADC_CTRL1_LRADC_IRQ(n) BIT(n) n 70 include/linux/mfd/mxs-lradc.h #define LRADC_CH(n) (0x50 + (0x10 * (n))) n 79 include/linux/mfd/mxs-lradc.h #define LRADC_DELAY(n) (0xd0 + (0x10 * (n))) n 103 include/linux/mfd/mxs-lradc.h #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) n 104 include/linux/mfd/mxs-lradc.h #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) n 105 include/linux/mfd/mxs-lradc.h #define LRADC_CTRL4_LRADCSELECT(n, x) \ n 106 include/linux/mfd/mxs-lradc.h (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \ n 107 include/linux/mfd/mxs-lradc.h LRADC_CTRL4_LRADCSELECT_MASK(n)) n 29 include/linux/mfd/syscon/atmel-mc.h #define AT91_MC_MST(n) BIT(16 + (n)) n 30 include/linux/mfd/syscon/atmel-mc.h #define AT91_MC_SVMST(n) BIT(24 + (n)) n 35 include/linux/mfd/syscon/atmel-mc.h #define AT91_MPR_MSTP(n) GENMASK(2 + ((x) * 4), ((x) * 4)) n 39 include/linux/mfd/syscon/atmel-mc.h #define AT91_MC_EBI_CS(n) BIT(x) n 46 include/linux/mfd/syscon/atmel-mc.h #define AT91_MC_SMC_CSR(n) (0x70 + ((n) * 4)) n 35 include/linux/mfd/ti_am335x_tscadc.h #define REG_STEPCONFIG(n) (0x64 + ((n) * 8)) n 36 include/linux/mfd/ti_am335x_tscadc.h #define REG_STEPDELAY(n) (0x68 + ((n) * 8)) n 267 include/linux/mlx5/driver.h u64 n; n 138 include/linux/mlx5/fs.h mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); n 213 include/linux/mm.h #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) n 681 include/linux/mm.h static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) n 685 include/linux/mm.h if (unlikely(check_mul_overflow(n, size, &bytes))) n 691 include/linux/mm.h static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) n 693 include/linux/mm.h return kvmalloc_array(n, size, flags | __GFP_ZERO); n 439 include/linux/mmc/mmc.h #define mmc_driver_type_mask(n) (1 << (n)) n 310 include/linux/moduleparam.h extern bool parameqn(const char *name1, const char *name2, size_t n); n 23 include/linux/mtd/spear_smi.h #define DEFINE_PARTS(n, of, s) \ n 25 include/linux/mtd/spear_smi.h .name = n, \ n 425 include/linux/netdevice.h void __napi_schedule(struct napi_struct *n); n 426 include/linux/netdevice.h void __napi_schedule_irqoff(struct napi_struct *n); n 428 include/linux/netdevice.h static inline bool napi_disable_pending(struct napi_struct *n) n 430 include/linux/netdevice.h return test_bit(NAPI_STATE_DISABLE, &n->state); n 433 include/linux/netdevice.h bool napi_schedule_prep(struct napi_struct *n); n 442 include/linux/netdevice.h static inline void napi_schedule(struct napi_struct *n) n 444 include/linux/netdevice.h if (napi_schedule_prep(n)) n 445 include/linux/netdevice.h __napi_schedule(n); n 454 include/linux/netdevice.h static inline void napi_schedule_irqoff(struct napi_struct *n) n 456 include/linux/netdevice.h if (napi_schedule_prep(n)) n 457 include/linux/netdevice.h __napi_schedule_irqoff(n); n 470 include/linux/netdevice.h bool napi_complete_done(struct napi_struct *n, int work_done); n 479 include/linux/netdevice.h static inline bool napi_complete(struct napi_struct *n) n 481 include/linux/netdevice.h return napi_complete_done(n, 0); n 505 include/linux/netdevice.h void napi_disable(struct napi_struct *n); n 514 include/linux/netdevice.h static inline void napi_enable(struct napi_struct *n) n 516 include/linux/netdevice.h BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); n 518 include/linux/netdevice.h clear_bit(NAPI_STATE_SCHED, &n->state); n 519 include/linux/netdevice.h clear_bit(NAPI_STATE_NPSVC, &n->state); n 530 include/linux/netdevice.h static inline void napi_synchronize(const struct napi_struct *n) n 533 include/linux/netdevice.h while (test_bit(NAPI_STATE_SCHED, &n->state)) n 547 include/linux/netdevice.h static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) n 552 include/linux/netdevice.h val = READ_ONCE(n->state); n 560 include/linux/netdevice.h } while (cmpxchg(&n->state, val, new) != val); n 1371 include/linux/netdevice.h struct neighbour *n); n 1373 include/linux/netdevice.h struct neighbour *n); n 1439 include/linux/netdevice.h int (*ndo_xdp_xmit)(struct net_device *dev, int n, n 2563 include/linux/netdevice.h #define for_each_netdev_safe(net, d, n) \ n 2564 include/linux/netdevice.h list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) n 3481 include/linux/netdevice.h static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, n 3485 include/linux/netdevice.h if (n != -1) n 3486 include/linux/netdevice.h cpu_max_bits_warn(n, nr_bits); n 3489 include/linux/netdevice.h return find_next_bit(srcp, nr_bits, n + 1); n 3491 include/linux/netdevice.h return n + 1; n 3503 include/linux/netdevice.h static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, n 3508 include/linux/netdevice.h if (n != -1) n 3509 include/linux/netdevice.h cpu_max_bits_warn(n, nr_bits); n 3512 include/linux/netdevice.h return find_next_and_bit(src1p, src2p, nr_bits, n + 1); n 3514 include/linux/netdevice.h return find_next_bit(src1p, nr_bits, n + 1); n 3516 include/linux/netdevice.h return find_next_bit(src2p, nr_bits, n + 1); n 3518 include/linux/netdevice.h return n + 1; n 123 include/linux/netfilter.h unsigned int n = e->num_hook_entries; n 126 include/linux/netfilter.h hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ n 186 include/linux/netfilter.h unsigned int n); n 188 include/linux/netfilter.h unsigned int n); n 39 include/linux/netfilter/nfnetlink.h int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); n 40 include/linux/netfilter/nfnetlink.h int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); n 272 include/linux/netfilter/x_tables.h int xt_register_targets(struct xt_target *target, unsigned int n); n 273 include/linux/netfilter/x_tables.h void xt_unregister_targets(struct xt_target *target, unsigned int n); n 277 include/linux/netfilter/x_tables.h int xt_register_matches(struct xt_match *match, unsigned int n); n 278 include/linux/netfilter/x_tables.h void xt_unregister_matches(struct xt_match *match, unsigned int n); n 76 include/linux/netfilter_bridge/ebtables.h unsigned int n; /* n'th entry */ n 243 include/linux/nodemask.h #define nodes_shift_right(dst, src, n) \ n 244 include/linux/nodemask.h __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) n 246 include/linux/nodemask.h const nodemask_t *srcp, int n, int nbits) n 248 include/linux/nodemask.h bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); n 251 include/linux/nodemask.h #define nodes_shift_left(dst, src, n) \ n 252 include/linux/nodemask.h __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) n 254 include/linux/nodemask.h const nodemask_t *srcp, int n, int nbits) n 256 include/linux/nodemask.h bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); n 268 include/linux/nodemask.h #define next_node(n, src) __next_node((n), &(src)) n 269 include/linux/nodemask.h static inline int __next_node(int n, const nodemask_t *srcp) n 271 include/linux/nodemask.h return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); n 278 include/linux/nodemask.h #define next_node_in(n, src) __next_node_in((n), &(src)) n 115 include/linux/of.h #define of_node_kobj(n) (&(n)->kobj) n 117 include/linux/of.h #define of_node_kobj(n) NULL n 188 include/linux/of.h static inline int of_node_check_flag(struct device_node *n, unsigned long flag) n 190 include/linux/of.h return test_bit(flag, &n->_flags); n 193 include/linux/of.h static inline int of_node_test_and_set_flag(struct device_node *n, n 196 include/linux/of.h return test_and_set_bit(flag, &n->_flags); n 199 include/linux/of.h static inline void of_node_set_flag(struct device_node *n, unsigned long flag) n 201 include/linux/of.h set_bit(flag, &n->_flags); n 204 include/linux/of.h static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) n 206 include/linux/of.h clear_bit(flag, &n->_flags); n 936 include/linux/of.h static inline int of_node_check_flag(struct device_node *n, unsigned long flag) n 941 include/linux/of.h static inline int of_node_test_and_set_flag(struct device_node *n, n 947 include/linux/of.h static inline void of_node_set_flag(struct device_node *n, unsigned long flag) n 951 include/linux/of.h static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) n 85 include/linux/omap-dma.h #define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12) n 86 include/linux/omap-dma.h #define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3) n 314 include/linux/overflow.h #define struct_size(p, member, n) \ n 315 include/linux/overflow.h __ab_c_size(n, \ n 162 include/linux/parport.h #define to_pardevice(n) container_of(n, struct pardevice, dev) n 254 include/linux/parport.h #define to_parport_dev(n) container_of(n, struct parport, bus_dev) n 269 include/linux/parport.h #define to_parport_driver(n) container_of(n, struct parport_driver, driver) n 484 include/linux/pci.h #define to_pci_dev(n) container_of(n, struct pci_dev, dev) n 523 include/linux/pci.h #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) n 606 include/linux/pci.h #define to_pci_bus(n) container_of(n, struct pci_bus, dev) n 1300 include/linux/pci.h struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); n 1142 include/linux/phy.h void phy_drivers_unregister(struct phy_driver *drv, int n); n 1144 include/linux/phy.h int phy_drivers_register(struct phy_driver *new_driver, int n, n 1257 include/linux/phy.h unsigned int n); n 1260 include/linux/phy.h unsigned int n) n 17 include/linux/phy/omap_usb.h u8 n; n 5709 include/linux/platform_data/cros_ec_commands.h #define EC_CMD_PASSTHRU_OFFSET(n) (0x4000 * (n)) n 5710 include/linux/platform_data/cros_ec_commands.h #define EC_CMD_PASSTHRU_MAX(n) (EC_CMD_PASSTHRU_OFFSET(n) + 0x3fff) n 161 include/linux/platform_data/video-pxafb.h uint16_t *cmds, int n) n 170 include/linux/plist.h #define plist_for_each_safe(pos, n, head) \ n 171 include/linux/plist.h list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) n 203 include/linux/plist.h #define plist_for_each_entry_safe(pos, n, head, m) \ n 204 include/linux/plist.h list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) n 289 include/linux/pm_domain.h struct genpd_power_state **states, int *n); n 326 include/linux/pm_domain.h struct genpd_power_state **states, int *n) n 220 include/linux/pnp.h #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) n 221 include/linux/pnp.h #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) n 222 include/linux/pnp.h #define to_pnp_card(n) container_of(n, struct pnp_card, dev) n 273 include/linux/pnp.h #define global_to_pnp_dev(n) list_entry(n, struct pnp_dev, global_list) n 274 include/linux/pnp.h #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) n 275 include/linux/pnp.h #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) n 276 include/linux/pnp.h #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) n 432 include/linux/pnp.h #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) n 119 include/linux/poll.h extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, n 81 include/linux/profile.h int task_handoff_register(struct notifier_block * n); n 82 include/linux/profile.h int task_handoff_unregister(struct notifier_block * n); n 84 include/linux/profile.h int profile_event_register(enum profile_type, struct notifier_block * n); n 85 include/linux/profile.h int profile_event_unregister(enum profile_type, struct notifier_block * n); n 113 include/linux/profile.h static inline int task_handoff_register(struct notifier_block * n) n 118 include/linux/profile.h static inline int task_handoff_unregister(struct notifier_block * n) n 123 include/linux/profile.h static inline int profile_event_register(enum profile_type t, struct notifier_block * n) n 128 include/linux/profile.h static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n) n 306 include/linux/ptr_ring.h void **array, int n) n 311 include/linux/ptr_ring.h for (i = 0; i < n; i++) { n 372 include/linux/ptr_ring.h void **array, int n) n 377 include/linux/ptr_ring.h ret = __ptr_ring_consume_batched(r, array, n); n 384 include/linux/ptr_ring.h void **array, int n) n 389 include/linux/ptr_ring.h ret = __ptr_ring_consume_batched(r, array, n); n 396 include/linux/ptr_ring.h void **array, int n) n 402 include/linux/ptr_ring.h ret = __ptr_ring_consume_batched(r, array, n); n 409 include/linux/ptr_ring.h void **array, int n) n 414 include/linux/ptr_ring.h ret = __ptr_ring_consume_batched(r, array, n); n 510 include/linux/ptr_ring.h static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, n 535 include/linux/ptr_ring.h while (n) { n 543 include/linux/ptr_ring.h r->queue[head] = batch[--n]; n 551 include/linux/ptr_ring.h while (n) n 552 include/linux/ptr_ring.h destroy(batch[--n]); n 258 include/linux/pxa2xx_ssp.h static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n, n 109 include/linux/rbtree.h #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ n 111 include/linux/rbtree.h pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ n 113 include/linux/rbtree.h pos = n) n 28 include/linux/rcu_segcblist.h #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } n 81 include/linux/rcu_segcblist.h #define RCU_SEGCBLIST_INITIALIZER(n) \ n 84 include/linux/rcu_segcblist.h .tails[RCU_DONE_TAIL] = &n.head, \ n 85 include/linux/rcu_segcblist.h .tails[RCU_WAIT_TAIL] = &n.head, \ n 86 include/linux/rcu_segcblist.h .tails[RCU_NEXT_READY_TAIL] = &n.head, \ n 87 include/linux/rcu_segcblist.h .tails[RCU_NEXT_TAIL] = &n.head, \ n 172 include/linux/rculist.h static inline void hlist_del_init_rcu(struct hlist_node *n) n 174 include/linux/rculist.h if (!hlist_unhashed(n)) { n 175 include/linux/rculist.h __hlist_del(n); n 176 include/linux/rculist.h n->pprev = NULL; n 473 include/linux/rculist.h static inline void hlist_del_rcu(struct hlist_node *n) n 475 include/linux/rculist.h __hlist_del(n); n 476 include/linux/rculist.h n->pprev = LIST_POISON2; n 525 include/linux/rculist.h static inline void hlist_add_head_rcu(struct hlist_node *n, n 530 include/linux/rculist.h n->next = first; n 531 include/linux/rculist.h n->pprev = &h->first; n 532 include/linux/rculist.h rcu_assign_pointer(hlist_first_rcu(h), n); n 534 include/linux/rculist.h first->pprev = &n->next; n 556 include/linux/rculist.h static inline void hlist_add_tail_rcu(struct hlist_node *n, n 566 include/linux/rculist.h n->next = last->next; n 567 include/linux/rculist.h n->pprev = &last->next; n 568 include/linux/rculist.h rcu_assign_pointer(hlist_next_rcu(last), n); n 570 include/linux/rculist.h hlist_add_head_rcu(n, h); n 592 include/linux/rculist.h static inline void hlist_add_before_rcu(struct hlist_node *n, n 595 include/linux/rculist.h n->pprev = next->pprev; n 596 include/linux/rculist.h n->next = next; n 597 include/linux/rculist.h rcu_assign_pointer(hlist_pprev_rcu(n), n); n 598 include/linux/rculist.h next->pprev = &n->next; n 619 include/linux/rculist.h static inline void hlist_add_behind_rcu(struct hlist_node *n, n 622 include/linux/rculist.h n->next = prev->next; n 623 include/linux/rculist.h n->pprev = &prev->next; n 624 include/linux/rculist.h rcu_assign_pointer(hlist_next_rcu(prev), n); n 625 include/linux/rculist.h if (n->next) n 626 include/linux/rculist.h n->next->pprev = &n->next; n 12 include/linux/rculist_bl.h struct hlist_bl_node *n) n 14 include/linux/rculist_bl.h LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); n 18 include/linux/rculist_bl.h (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); n 47 include/linux/rculist_bl.h static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n) n 49 include/linux/rculist_bl.h if (!hlist_bl_unhashed(n)) { n 50 include/linux/rculist_bl.h __hlist_bl_del(n); n 51 include/linux/rculist_bl.h n->pprev = NULL; n 74 include/linux/rculist_bl.h static inline void hlist_bl_del_rcu(struct hlist_bl_node *n) n 76 include/linux/rculist_bl.h __hlist_bl_del(n); n 77 include/linux/rculist_bl.h n->pprev = LIST_POISON2; n 99 include/linux/rculist_bl.h static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n, n 107 include/linux/rculist_bl.h n->next = first; n 109 include/linux/rculist_bl.h first->pprev = &n->next; n 110 include/linux/rculist_bl.h n->pprev = &h->first; n 113 include/linux/rculist_bl.h hlist_bl_set_first_rcu(h, n); n 33 include/linux/rculist_nulls.h static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) n 35 include/linux/rculist_nulls.h if (!hlist_nulls_unhashed(n)) { n 36 include/linux/rculist_nulls.h __hlist_nulls_del(n); n 37 include/linux/rculist_nulls.h WRITE_ONCE(n->pprev, NULL); n 66 include/linux/rculist_nulls.h static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) n 68 include/linux/rculist_nulls.h __hlist_nulls_del(n); n 69 include/linux/rculist_nulls.h WRITE_ONCE(n->pprev, LIST_POISON2); n 91 include/linux/rculist_nulls.h static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, n 96 include/linux/rculist_nulls.h n->next = first; n 97 include/linux/rculist_nulls.h WRITE_ONCE(n->pprev, &h->first); n 98 include/linux/rculist_nulls.h rcu_assign_pointer(hlist_nulls_first_rcu(h), n); n 100 include/linux/rculist_nulls.h WRITE_ONCE(first->pprev, &n->next); n 122 include/linux/rculist_nulls.h static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, n 132 include/linux/rculist_nulls.h n->next = last->next; n 133 include/linux/rculist_nulls.h n->pprev = &last->next; n 134 include/linux/rculist_nulls.h rcu_assign_pointer(hlist_next_rcu(last), n); n 136 include/linux/rculist_nulls.h hlist_nulls_add_head_rcu(n, h); n 21 include/linux/rcupdate_wait.h void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, n 23 include/linux/refcount.h #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } n 30 include/linux/refcount.h static inline void refcount_set(refcount_t *r, unsigned int n) n 32 include/linux/refcount.h atomic_set(&r->refs, n); n 193 include/linux/regset.h unsigned int n; n 221 include/linux/regset.h unsigned int n; n 420 include/linux/regset.h return regset->n * regset->size; n 207 include/linux/rio.h #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) n 208 include/linux/rio.h #define rio_dev_f(n) list_entry(n, struct rio_dev, net_list) n 209 include/linux/rio.h #define to_rio_dev(n) container_of(n, struct rio_dev, dev) n 210 include/linux/rio.h #define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0]) n 211 include/linux/rio.h #define to_rio_mport(n) container_of(n, struct rio_mport, dev) n 212 include/linux/rio.h #define to_rio_net(n) container_of(n, struct rio_net, dev) n 224 include/linux/rio_regs.h #define RIO_PORT_N_MNT_REQ_CSR(n, m) (0x40 + (n) * (0x20 * (m))) n 227 include/linux/rio_regs.h #define RIO_PORT_N_MNT_RSP_CSR(n, m) (0x44 + (n) * (0x20 * (m))) n 231 include/linux/rio_regs.h #define RIO_PORT_N_ACK_STS_CSR(n) (0x48 + (n) * 0x20) /* Only in RM-I */ n 236 include/linux/rio_regs.h #define RIO_PORT_N_CTL2_CSR(n, m) (0x54 + (n) * (0x20 * (m))) n 238 include/linux/rio_regs.h #define RIO_PORT_N_ERR_STS_CSR(n, m) (0x58 + (n) * (0x20 * (m))) n 246 include/linux/rio_regs.h #define RIO_PORT_N_CTL_CSR(n, m) (0x5c + (n) * (0x20 * (m))) n 255 include/linux/rio_regs.h #define RIO_PORT_N_OB_ACK_CSR(n) (0x60 + (n) * 0x40) /* Only in RM-II */ n 259 include/linux/rio_regs.h #define RIO_PORT_N_IB_ACK_CSR(n) (0x64 + (n) * 0x40) /* Only in RM-II */ n 267 include/linux/rio_regs.h #define RIO_DEV_PORT_N_MNT_REQ_CSR(d, n) \ n 268 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(n, d->phys_rmap)) n 270 include/linux/rio_regs.h #define RIO_DEV_PORT_N_MNT_RSP_CSR(d, n) \ n 271 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(n, d->phys_rmap)) n 273 include/linux/rio_regs.h #define RIO_DEV_PORT_N_ACK_STS_CSR(d, n) \ n 274 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_ACK_STS_CSR(n)) n 276 include/linux/rio_regs.h #define RIO_DEV_PORT_N_CTL2_CSR(d, n) \ n 277 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_CTL2_CSR(n, d->phys_rmap)) n 279 include/linux/rio_regs.h #define RIO_DEV_PORT_N_ERR_STS_CSR(d, n) \ n 280 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_ERR_STS_CSR(n, d->phys_rmap)) n 282 include/linux/rio_regs.h #define RIO_DEV_PORT_N_CTL_CSR(d, n) \ n 283 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_CTL_CSR(n, d->phys_rmap)) n 285 include/linux/rio_regs.h #define RIO_DEV_PORT_N_OB_ACK_CSR(d, n) \ n 286 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_OB_ACK_CSR(n)) n 288 include/linux/rio_regs.h #define RIO_DEV_PORT_N_IB_ACK_CSR(d, n) \ n 289 include/linux/rio_regs.h (d->phys_efptr + RIO_PORT_N_IB_ACK_CSR(n)) n 41 include/linux/sched/loadavg.h unsigned long active, unsigned int n); n 93 include/linux/sched/task_stack.h unsigned long *n = end_of_stack(p); n 97 include/linux/sched/task_stack.h n--; n 99 include/linux/sched/task_stack.h n++; n 101 include/linux/sched/task_stack.h } while (!*n); n 104 include/linux/sched/task_stack.h return (unsigned long)end_of_stack(p) - (unsigned long)n; n 106 include/linux/sched/task_stack.h return (unsigned long)n - (unsigned long)end_of_stack(p); n 104 include/linux/seccomp.h unsigned long n, void __user *data) n 21 include/linux/semaphore.h #define __SEMAPHORE_INITIALIZER(name, n) \ n 24 include/linux/semaphore.h .count = n, \ n 107 include/linux/skb_array.h struct sk_buff **array, int n) n 109 include/linux/skb_array.h return ptr_ring_consume_batched(&a->ring, (void **)array, n); n 118 include/linux/skb_array.h struct sk_buff **array, int n) n 120 include/linux/skb_array.h return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n); n 129 include/linux/skb_array.h struct sk_buff **array, int n) n 131 include/linux/skb_array.h return ptr_ring_consume_batched_any(&a->ring, (void **)array, n); n 141 include/linux/skb_array.h struct sk_buff **array, int n) n 143 include/linux/skb_array.h return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n); n 191 include/linux/skb_array.h struct sk_buff **skbs, int n) n 193 include/linux/skb_array.h ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); n 195 include/linux/slab.h void __check_heap_object(const void *ptr, unsigned long n, struct page *page, n 198 include/linux/slab.h static inline void __check_heap_object(const void *ptr, unsigned long n, n 569 include/linux/slab.h static __always_inline unsigned int kmalloc_size(unsigned int n) n 572 include/linux/slab.h if (n > 2) n 573 include/linux/slab.h return 1U << n; n 575 include/linux/slab.h if (n == 1 && KMALLOC_MIN_SIZE <= 32) n 578 include/linux/slab.h if (n == 2 && KMALLOC_MIN_SIZE <= 64) n 610 include/linux/slab.h static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) n 614 include/linux/slab.h if (unlikely(check_mul_overflow(n, size, &bytes))) n 616 include/linux/slab.h if (__builtin_constant_p(n) && __builtin_constant_p(size)) n 627 include/linux/slab.h static inline void *kcalloc(size_t n, size_t size, gfp_t flags) n 629 include/linux/slab.h return kmalloc_array(n, size, flags | __GFP_ZERO); n 644 include/linux/slab.h static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, n 649 include/linux/slab.h if (unlikely(check_mul_overflow(n, size, &bytes))) n 651 include/linux/slab.h if (__builtin_constant_p(n) && __builtin_constant_p(size)) n 656 include/linux/slab.h static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) n 658 include/linux/slab.h return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); n 147 include/linux/slub_def.h #define slub_set_cpu_partial(s, n) \ n 149 include/linux/slub_def.h slub_cpu_partial(s) = (n); \ n 153 include/linux/slub_def.h #define slub_set_cpu_partial(s, n) n 40 include/linux/soc/samsung/exynos-regs-pmu.h #define EXYNOS_CORE_PO_RESET(n) ((1 << 4) << n) n 52 include/linux/soc/samsung/exynos-regs-pmu.h #define EXYNOS4_MIPI_PHY_CONTROL(n) (0x0710 + (n) * 4) n 510 include/linux/soc/samsung/exynos-regs-pmu.h #define EXYNOS5420_MIPI_PHY_CONTROL(n) (0x0714 + (n) * 4) n 72 include/linux/sock_diag.h const struct net *n = sock_net(sk); n 75 include/linux/sock_diag.h return group != SKNLGRP_NONE && n->diag_nlsk && n 76 include/linux/sock_diag.h netlink_has_listeners(n->diag_nlsk, group); n 41 include/linux/soundwire/sdw.h #define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1) n 12 include/linux/soundwire/sdw_registers.h #define SDW_REG_SHIFT(n) (ffs(n) - 1) n 124 include/linux/soundwire/sdw_registers.h #define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n)) n 125 include/linux/soundwire/sdw_registers.h #define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n)) n 126 include/linux/soundwire/sdw_registers.h #define SDW_DPN_PORTCTRL(n) (0x2 + SDW_DPN_SIZE * (n)) n 127 include/linux/soundwire/sdw_registers.h #define SDW_DPN_BLOCKCTRL1(n) (0x3 + SDW_DPN_SIZE * (n)) n 128 include/linux/soundwire/sdw_registers.h #define SDW_DPN_PREPARESTATUS(n) (0x4 + SDW_DPN_SIZE * (n)) n 129 include/linux/soundwire/sdw_registers.h #define SDW_DPN_PREPARECTRL(n) (0x5 + SDW_DPN_SIZE * (n)) n 145 include/linux/soundwire/sdw_registers.h #define SDW_DPN_CHANNELEN_B0(n) (0x20 + SDW_DPN_SIZE * (n)) n 146 include/linux/soundwire/sdw_registers.h #define SDW_DPN_CHANNELEN_B1(n) (0x30 + SDW_DPN_SIZE * (n)) n 148 include/linux/soundwire/sdw_registers.h #define SDW_DPN_BLOCKCTRL2_B0(n) (0x21 + SDW_DPN_SIZE * (n)) n 149 include/linux/soundwire/sdw_registers.h #define SDW_DPN_BLOCKCTRL2_B1(n) (0x31 + SDW_DPN_SIZE * (n)) n 151 include/linux/soundwire/sdw_registers.h #define SDW_DPN_SAMPLECTRL1_B0(n) (0x22 + SDW_DPN_SIZE * (n)) n 152 include/linux/soundwire/sdw_registers.h #define SDW_DPN_SAMPLECTRL1_B1(n) (0x32 + SDW_DPN_SIZE * (n)) n 154 include/linux/soundwire/sdw_registers.h #define SDW_DPN_SAMPLECTRL2_B0(n) (0x23 + SDW_DPN_SIZE * (n)) n 155 include/linux/soundwire/sdw_registers.h #define SDW_DPN_SAMPLECTRL2_B1(n) (0x33 + SDW_DPN_SIZE * (n)) n 157 include/linux/soundwire/sdw_registers.h #define SDW_DPN_OFFSETCTRL1_B0(n) (0x24 + SDW_DPN_SIZE * (n)) n 158 include/linux/soundwire/sdw_registers.h #define SDW_DPN_OFFSETCTRL1_B1(n) (0x34 + SDW_DPN_SIZE * (n)) n 160 include/linux/soundwire/sdw_registers.h #define SDW_DPN_OFFSETCTRL2_B0(n) (0x25 + SDW_DPN_SIZE * (n)) n 161 include/linux/soundwire/sdw_registers.h #define SDW_DPN_OFFSETCTRL2_B1(n) (0x35 + SDW_DPN_SIZE * (n)) n 163 include/linux/soundwire/sdw_registers.h #define SDW_DPN_HCTRL_B0(n) (0x26 + SDW_DPN_SIZE * (n)) n 164 include/linux/soundwire/sdw_registers.h #define SDW_DPN_HCTRL_B1(n) (0x36 + SDW_DPN_SIZE * (n)) n 166 include/linux/soundwire/sdw_registers.h #define SDW_DPN_BLOCKCTRL3_B0(n) (0x27 + SDW_DPN_SIZE * (n)) n 167 include/linux/soundwire/sdw_registers.h #define SDW_DPN_BLOCKCTRL3_B1(n) (0x37 + SDW_DPN_SIZE * (n)) n 169 include/linux/soundwire/sdw_registers.h #define SDW_DPN_LANECTRL_B0(n) (0x28 + SDW_DPN_SIZE * (n)) n 170 include/linux/soundwire/sdw_registers.h #define SDW_DPN_LANECTRL_B1(n) (0x38 + SDW_DPN_SIZE * (n)) n 1342 include/linux/spi/spi.h spi_register_board_info(struct spi_board_info const *info, unsigned n); n 1346 include/linux/spi/spi.h spi_register_board_info(struct spi_board_info const *info, unsigned n) n 621 include/linux/ssb/ssb_driver_chipcommon.h u32 *plltype, u32 *n, u32 *m); n 623 include/linux/ssb/ssb_driver_chipcommon.h u32 *plltype, u32 *n, u32 *m); n 172 include/linux/ssb/ssb_driver_extif.h u32 *plltype, u32 *n, u32 *m); n 205 include/linux/ssb/ssb_driver_extif.h u32 *plltype, u32 *n, u32 *m) n 57 include/linux/string.h extern int strncasecmp(const char *s1, const char *s2, size_t n); n 122 include/linux/string.h __kernel_size_t n) n 125 include/linux/string.h return memset32((uint32_t *)p, v, n); n 127 include/linux/string.h return memset64((uint64_t *)p, v, n); n 130 include/linux/string.h static inline void *memset_p(void **p, void *v, __kernel_size_t n) n 133 include/linux/string.h return memset32((uint32_t *)p, (uintptr_t)v, n); n 135 include/linux/string.h return memset64((uint64_t *)p, (uintptr_t)v, n); n 177 include/linux/string.h void *memchr_inv(const void *s, int c, size_t n); n 198 include/linux/string.h int match_string(const char * const *array, size_t n, const char *string); n 199 include/linux/string.h int __sysfs_match_string(const char * const *array, size_t n, const char *s); n 432 include/linux/string.h void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); n 299 include/linux/sunrpc/xdr.h xdr_align_size(size_t n) n 303 include/linux/sunrpc/xdr.h return (n + mask) & ~mask; n 316 include/linux/sunrpc/xdr.h xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n) n 318 include/linux/sunrpc/xdr.h const size_t len = sizeof(n); n 323 include/linux/sunrpc/xdr.h *p = cpu_to_be32(n); n 337 include/linux/sunrpc/xdr.h xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) n 339 include/linux/sunrpc/xdr.h const size_t len = sizeof(n); n 344 include/linux/sunrpc/xdr.h xdr_encode_hyper(p, n); n 118 include/linux/svga.h int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node); n 452 include/linux/swap.h extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); n 458 include/linux/swap.h extern void swapcache_free_entries(swp_entry_t *entries, int n); n 114 include/linux/syscalls.h #define __MAP(n,...) __MAP##n(__VA_ARGS__) n 1100 include/linux/syscalls.h asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, n 95 include/linux/tc.h #define to_tc_dev(n) container_of(n, struct tc_dev, dev) n 112 include/linux/thread_info.h extern void __check_object_size(const void *ptr, unsigned long n, n 115 include/linux/thread_info.h static __always_inline void check_object_size(const void *ptr, unsigned long n, n 118 include/linux/thread_info.h if (!__builtin_constant_p(n)) n 119 include/linux/thread_info.h __check_object_size(ptr, n, to_user); n 122 include/linux/thread_info.h static inline void check_object_size(const void *ptr, unsigned long n, n 83 include/linux/torture.h #define torture_create_kthread(n, arg, tp) \ n 84 include/linux/torture.h _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \ n 85 include/linux/torture.h "Failed to create " #n, &(tp)) n 86 include/linux/torture.h #define torture_stop_kthread(n, tp) \ n 87 include/linux/torture.h _torture_stop_kthread("Stopping " #n " task", &(tp)) n 59 include/linux/uaccess.h __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) n 61 include/linux/uaccess.h kasan_check_write(to, n); n 62 include/linux/uaccess.h check_object_size(to, n, false); n 63 include/linux/uaccess.h return raw_copy_from_user(to, from, n); n 67 include/linux/uaccess.h __copy_from_user(void *to, const void __user *from, unsigned long n) n 70 include/linux/uaccess.h kasan_check_write(to, n); n 71 include/linux/uaccess.h check_object_size(to, n, false); n 72 include/linux/uaccess.h return raw_copy_from_user(to, from, n); n 89 include/linux/uaccess.h __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) n 91 include/linux/uaccess.h kasan_check_read(from, n); n 92 include/linux/uaccess.h check_object_size(from, n, true); n 93 include/linux/uaccess.h return raw_copy_to_user(to, from, n); n 97 include/linux/uaccess.h __copy_to_user(void __user *to, const void *from, unsigned long n) n 100 include/linux/uaccess.h kasan_check_read(from, n); n 101 include/linux/uaccess.h check_object_size(from, n, true); n 102 include/linux/uaccess.h return raw_copy_to_user(to, from, n); n 107 include/linux/uaccess.h _copy_from_user(void *to, const void __user *from, unsigned long n) n 109 include/linux/uaccess.h unsigned long res = n; n 111 include/linux/uaccess.h if (likely(access_ok(from, n))) { n 112 include/linux/uaccess.h kasan_check_write(to, n); n 113 include/linux/uaccess.h res = raw_copy_from_user(to, from, n); n 116 include/linux/uaccess.h memset(to + (n - res), 0, res); n 126 include/linux/uaccess.h _copy_to_user(void __user *to, const void *from, unsigned long n) n 129 include/linux/uaccess.h if (access_ok(to, n)) { n 130 include/linux/uaccess.h kasan_check_read(from, n); n 131 include/linux/uaccess.h n = raw_copy_to_user(to, from, n); n 133 include/linux/uaccess.h return n; n 141 include/linux/uaccess.h copy_from_user(void *to, const void __user *from, unsigned long n) n 143 include/linux/uaccess.h if (likely(check_copy_size(to, n, false))) n 144 include/linux/uaccess.h n = _copy_from_user(to, from, n); n 145 include/linux/uaccess.h return n; n 149 include/linux/uaccess.h copy_to_user(void __user *to, const void *from, unsigned long n) n 151 include/linux/uaccess.h if (likely(check_copy_size(from, n, true))) n 152 include/linux/uaccess.h n = _copy_to_user(to, from, n); n 153 include/linux/uaccess.h return n; n 157 include/linux/uaccess.h copy_in_user(void __user *to, const void __user *from, unsigned long n) n 160 include/linux/uaccess.h if (access_ok(to, n) && access_ok(from, n)) n 161 include/linux/uaccess.h n = raw_copy_in_user(to, from, n); n 162 include/linux/uaccess.h return n; n 227 include/linux/uaccess.h unsigned long n) n 229 include/linux/uaccess.h return __copy_from_user_inatomic(to, from, n); n 184 include/linux/usb/audio-v3.h #define DECLARE_UAC3_POWER_DOMAIN_DESCRIPTOR(n) \ n 185 include/linux/usb/audio-v3.h struct uac3_power_domain_descriptor_##n { \ n 193 include/linux/usb/audio-v3.h __u8 baEntityID[n]; \ n 32 include/linux/usb/c67x00.h #define c67x00_sie_config(config, n) (((config)>>(4*(n)))&0x3) n 522 include/linux/usb/composite.h extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); n 111 include/linux/usb/typec_altmode.h size_t n, u16 svid, u8 mode); n 179 include/linux/virtio_config.h vq_callback_t *c, const char *n) n 182 include/linux/virtio_config.h const char *names[] = { n }; n 131 include/linux/vt_kern.h int vt_waitactive(int n); n 178 include/linux/workqueue.h #define __WORK_INIT_LOCKDEP_MAP(n, k) \ n 179 include/linux/workqueue.h .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), n 181 include/linux/workqueue.h #define __WORK_INIT_LOCKDEP_MAP(n, k) n 184 include/linux/workqueue.h #define __WORK_INITIALIZER(n, f) { \ n 186 include/linux/workqueue.h .entry = { &(n).entry, &(n).entry }, \ n 188 include/linux/workqueue.h __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ n 191 include/linux/workqueue.h #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ n 192 include/linux/workqueue.h .work = __WORK_INITIALIZER((n).work, (f)), \ n 197 include/linux/workqueue.h #define DECLARE_WORK(n, f) \ n 198 include/linux/workqueue.h struct work_struct n = __WORK_INITIALIZER(n, f) n 200 include/linux/workqueue.h #define DECLARE_DELAYED_WORK(n, f) \ n 201 include/linux/workqueue.h struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) n 203 include/linux/workqueue.h #define DECLARE_DEFERRABLE_WORK(n, f) \ n 204 include/linux/workqueue.h struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) n 360 include/linux/xarray.h unsigned long max, unsigned int n, xa_mark_t); n 40 include/linux/zorro.h #define to_zorro_dev(n) container_of(n, struct zorro_dev, dev) n 877 include/math-emu/op-common.h #define _FP_DIV_HELP_imm(q, r, n, d) \ n 879 include/math-emu/op-common.h q = n / d, r = n % d; \ n 43 include/media/cec-notifier.h void cec_notifier_put(struct cec_notifier *n); n 70 include/media/cec-notifier.h void cec_notifier_conn_unregister(struct cec_notifier *n); n 97 include/media/cec-notifier.h void cec_notifier_cec_adap_unregister(struct cec_notifier *n); n 107 include/media/cec-notifier.h void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa); n 117 include/media/cec-notifier.h void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, n 139 include/media/cec-notifier.h static inline void cec_notifier_put(struct cec_notifier *n) n 151 include/media/cec-notifier.h static inline void cec_notifier_conn_unregister(struct cec_notifier *n) n 163 include/media/cec-notifier.h static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n) n 167 include/media/cec-notifier.h static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) n 171 include/media/cec-notifier.h static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, n 208 include/media/cec-notifier.h static inline void cec_notifier_phys_addr_invalidate(struct cec_notifier *n) n 210 include/media/cec-notifier.h cec_notifier_set_phys_addr(n, CEC_PHYS_ADDR_INVALID); n 428 include/media/cec.h void cec_notifier_register(struct cec_notifier *n, n 436 include/media/cec.h void cec_notifier_unregister(struct cec_notifier *n); n 449 include/media/cec.h cec_notifier_register(struct cec_notifier *n, n 455 include/media/cec.h static inline void cec_notifier_unregister(struct cec_notifier *n) n 96 include/media/drv-intf/soc_mediabus.h int n); n 12 include/media/i2c/mt9t112.h u8 m, n; n 219 include/media/rc-core.h int (*tx_ir)(struct rc_dev *dev, unsigned *txbuf, unsigned n); n 508 include/media/v4l2-mem2mem.h #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ n 509 include/media/v4l2-mem2mem.h list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) n 519 include/media/v4l2-mem2mem.h #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ n 520 include/media/v4l2-mem2mem.h list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) n 190 include/media/v4l2-subdev.h int (*s_io_pin_config)(struct v4l2_subdev *sd, size_t n, n 39 include/net/arp.h struct neighbour *n; n 42 include/net/arp.h n = __ipv4_neigh_lookup_noref(dev, key); n 43 include/net/arp.h if (n && !refcount_inc_not_zero(&n->refcnt)) n 44 include/net/arp.h n = NULL; n 47 include/net/arp.h return n; n 52 include/net/arp.h struct neighbour *n; n 55 include/net/arp.h n = __ipv4_neigh_lookup_noref(dev, key); n 56 include/net/arp.h if (n) { n 60 include/net/arp.h if (READ_ONCE(n->confirmed) != now) n 61 include/net/arp.h WRITE_ONCE(n->confirmed, now); n 107 include/net/checksum.h static inline __wsum csum_unfold(__sum16 n) n 109 include/net/checksum.h return (__force __wsum)n; n 80 include/net/dn_fib.h u32 n; n 83 include/net/dn_fib.h struct nlattr *attrs[], struct nlmsghdr *n, n 86 include/net/dn_fib.h struct nlattr *attrs[], struct nlmsghdr *n, n 116 include/net/dn_fib.h struct dn_fib_table *dn_fib_get_table(u32 n, int creat); n 160 include/net/dn_fib.h static inline __le16 dnet_make_mask(int n) n 162 include/net/dn_fib.h if (n) n 163 include/net/dn_fib.h return cpu_to_le16(~((1 << (16 - n)) - 1)); n 10 include/net/dn_neigh.h struct neighbour n; n 25 include/net/dn_neigh.h int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); n 66 include/net/dn_route.h struct neighbour *n; n 573 include/net/dsa.h struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n); n 397 include/net/dst.h struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); n 398 include/net/dst.h return IS_ERR(n) ? NULL : n; n 404 include/net/dst.h struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); n 405 include/net/dst.h return IS_ERR(n) ? NULL : n; n 128 include/net/inetpeer.h int i, n; n 131 include/net/inetpeer.h n = sizeof(a->a4) / sizeof(u32); n 133 include/net/inetpeer.h n = sizeof(a->a6) / sizeof(u32); n 135 include/net/inetpeer.h for (i = 0; i < n; i++) { n 136 include/net/iucv/af_iucv.h enum iucv_tx_notify n); n 206 include/net/ndisc.h void (*update)(const struct net_device *dev, struct neighbour *n, n 246 include/net/ndisc.h struct neighbour *n, u32 flags, n 251 include/net/ndisc.h dev->ndisc_ops->update(dev, n, flags, icmp6_type, ndopts); n 395 include/net/ndisc.h struct neighbour *n; n 398 include/net/ndisc.h n = __ipv6_neigh_lookup_noref(dev, pkey); n 399 include/net/ndisc.h if (n && !refcount_inc_not_zero(&n->refcnt)) n 400 include/net/ndisc.h n = NULL; n 403 include/net/ndisc.h return n; n 409 include/net/ndisc.h struct neighbour *n; n 412 include/net/ndisc.h n = __ipv6_neigh_lookup_noref(dev, pkey); n 413 include/net/ndisc.h if (n) { n 417 include/net/ndisc.h if (READ_ONCE(n->confirmed) != now) n 418 include/net/ndisc.h WRITE_ONCE(n->confirmed, now); n 426 include/net/ndisc.h struct neighbour *n; n 429 include/net/ndisc.h n = __ipv6_neigh_lookup_noref_stub(dev, pkey); n 430 include/net/ndisc.h if (n) { n 434 include/net/ndisc.h if (READ_ONCE(n->confirmed) != now) n 435 include/net/ndisc.h WRITE_ONCE(n->confirmed, now); n 246 include/net/neighbour.h static inline void *neighbour_priv(const struct neighbour *n) n 248 include/net/neighbour.h return (char *)n + n->tbl->entry_size; n 261 include/net/neighbour.h static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey) n 263 include/net/neighbour.h return *(const u16 *)n->primary_key == *(const u16 *)pkey; n 266 include/net/neighbour.h static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey) n 268 include/net/neighbour.h return *(const u32 *)n->primary_key == *(const u32 *)pkey; n 271 include/net/neighbour.h static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey) n 273 include/net/neighbour.h const u32 *n32 = (const u32 *)n->primary_key; n 282 include/net/neighbour.h bool (*key_eq)(const struct neighbour *n, const void *pkey), n 290 include/net/neighbour.h struct neighbour *n; n 294 include/net/neighbour.h for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); n 295 include/net/neighbour.h n != NULL; n 296 include/net/neighbour.h n = rcu_dereference_bh(n->next)) { n 297 include/net/neighbour.h if (n->dev == dev && key_eq(n, pkey)) n 298 include/net/neighbour.h return n; n 368 include/net/neighbour.h void neigh_app_ns(struct neighbour *n); n 382 include/net/neighbour.h struct neighbour *n, loff_t *pos); n 435 include/net/neighbour.h #define neigh_hold(n) refcount_inc(&(n)->refcnt) n 502 include/net/neighbour.h static inline int neigh_output(struct neighbour *n, struct sk_buff *skb, n 505 include/net/neighbour.h const struct hh_cache *hh = &n->hh; n 507 include/net/neighbour.h if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache) n 510 include/net/neighbour.h return n->output(n, skb); n 516 include/net/neighbour.h struct neighbour *n = neigh_lookup(tbl, pkey, dev); n 518 include/net/neighbour.h if (n || !creat) n 519 include/net/neighbour.h return n; n 521 include/net/neighbour.h n = neigh_create(tbl, pkey, dev); n 522 include/net/neighbour.h return IS_ERR(n) ? NULL : n; n 529 include/net/neighbour.h struct neighbour *n = neigh_lookup(tbl, pkey, dev); n 531 include/net/neighbour.h if (n) n 532 include/net/neighbour.h return n; n 546 include/net/neighbour.h static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, n 552 include/net/neighbour.h seq = read_seqbegin(&n->ha_lock); n 553 include/net/neighbour.h memcpy(dst, n->ha, dev->addr_len); n 554 include/net/neighbour.h } while (read_seqretry(&n->ha_lock, seq)); n 153 include/net/netfilter/nf_conntrack_helper.h void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n); n 154 include/net/netfilter/nf_conntrack_helper.h void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n); n 107 include/net/netfilter/nf_nat.h int nf_xfrm_me_harder(struct net *n, struct sk_buff *s, unsigned int family); n 643 include/net/sch_generic.h void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); n 1952 include/net/sock.h static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) n 1959 include/net/sock.h if (READ_ONCE(n->confirmed) != now) n 1960 include/net/sock.h WRITE_ONCE(n->confirmed, now); n 386 include/net/udp.h int n; n 388 include/net/udp.h n = copy_to_iter(skb->data + off, len, to); n 389 include/net/udp.h if (n == len) n 392 include/net/udp.h iov_iter_revert(to, n); n 1504 include/net/xfrm.h void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, n 1506 include/net/xfrm.h void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, n 1510 include/net/xfrm.h int n, unsigned short family) n 1515 include/net/xfrm.h int n, unsigned short family) n 1807 include/net/xfrm.h static inline void xfrm_states_put(struct xfrm_state **states, int n) n 1810 include/net/xfrm.h for (i = 0; i < n; i++) n 1814 include/net/xfrm.h static inline void xfrm_states_delete(struct xfrm_state **states, int n) n 1817 include/net/xfrm.h for (i = 0; i < n; i++) n 151 include/pcmcia/ds.h #define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev) n 152 include/pcmcia/ds.h #define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv) n 188 include/rdma/ib_mad.h #define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << (n))) n 4344 include/rdma/ib_verbs.h int n; n 4346 include/rdma/ib_verbs.h n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); n 4349 include/rdma/ib_verbs.h return n; n 115 include/rdma/rdmavt_mr.h u16 n; /* current index: mr->map[m]->segs[n] */ n 170 include/rdma/rdmavt_mr.h if (++sge->n >= RVT_SEGSZ) { n 173 include/rdma/rdmavt_mr.h sge->n = 0; n 175 include/rdma/rdmavt_mr.h sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; n 176 include/rdma/rdmavt_mr.h sge->length = sge->mr->map[sge->m]->segs[sge->n].length; n 528 include/rdma/rdmavt_qp.h unsigned n) n 533 include/rdma/rdmavt_qp.h sizeof(struct rvt_sge)) * n); n 540 include/rdma/rdmavt_qp.h static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) n 545 include/rdma/rdmavt_qp.h rq->max_sge * sizeof(struct ib_sge)) * n); n 723 include/rdma/rdmavt_qp.h u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); n 725 include/rdma/rdmavt_qp.h for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; n 926 include/rdma/rdmavt_qp.h int n; n 53 include/scsi/fc/fc_encaps.h #define FC_SOF_ENCODE(n) FC_XYNN(n, n) n 54 include/scsi/fc/fc_encaps.h #define FC_EOF_ENCODE(n) FC_XYNN(n, n) n 32 include/scsi/fc/fc_fcoe.h #define FC_FCOE_ENCAPS_ID(n) (((u64) FC_FCOE_OUI << 24) | (n)) n 33 include/scsi/fc/fc_fcoe.h #define FC_FCOE_DECAPS_ID(n) ((n) >> 24) n 112 include/soc/at91/at91sam9_ddrsdr.h #define AT91_DDRSDRC_DELAY(n) (0x30 + (0x4 * (n))) /* Delay I/O Register n */ n 42 include/soc/fsl/qe/qe_tdm.h #define SIMR_TFSD(n) (n) n 43 include/soc/fsl/qe/qe_tdm.h #define SIMR_RFSD(n) ((n) << 8) n 61 include/soc/fsl/qman.h #define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) n 573 include/soc/fsl/qman.h #define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */ n 808 include/soc/fsl/qman.h #define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) n 809 include/soc/fsl/qman.h #define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) n 15 include/soc/qcom/rpmh.h const struct tcs_cmd *cmd, u32 n); n 18 include/soc/qcom/rpmh.h const struct tcs_cmd *cmd, u32 n); n 21 include/soc/qcom/rpmh.h const struct tcs_cmd *cmd, u32 *n); n 30 include/soc/qcom/rpmh.h const struct tcs_cmd *cmd, u32 n) n 35 include/soc/qcom/rpmh.h const struct tcs_cmd *cmd, u32 n) n 40 include/soc/qcom/rpmh.h const struct tcs_cmd *cmd, u32 *n) n 71 include/sound/control.h #define snd_kcontrol(n) list_entry(n, struct snd_kcontrol, list) n 79 include/sound/control.h #define snd_kctl_event(n) list_entry(n, struct snd_kctl_event, list) n 101 include/sound/control.h #define snd_ctl_file(n) list_entry(n, struct snd_ctl_file, list) n 75 include/sound/core.h #define snd_device(n) list_entry(n, struct snd_device, list) n 293 include/sound/hda_codec.h #define list_for_each_codec_safe(c, n, bus) \ n 294 include/sound/hda_codec.h list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) n 21 include/sound/i2c.h #define snd_i2c_device(n) list_entry(n, struct snd_i2c_device, list) n 60 include/sound/i2c.h #define snd_i2c_slave_bus(n) list_entry(n, struct snd_i2c_bus, buses) n 180 include/sound/seq_midi_emul.h struct snd_midi_channel_set *snd_midi_channel_alloc_set(int n); n 392 include/trace/events/fscache.h void *val, int n), n 394 include/trace/events/fscache.h TP_ARGS(cookie, page, val, n), n 400 include/trace/events/fscache.h __field(int, n ) n 407 include/trace/events/fscache.h __entry->n = n; n 411 include/trace/events/fscache.h __entry->cookie, __entry->page, __entry->val, __entry->n) n 505 include/trace/events/fscache.h void **results, int n, pgoff_t store_limit), n 507 include/trace/events/fscache.h TP_ARGS(cookie, op, results, n, store_limit), n 513 include/trace/events/fscache.h __field(int, n ) n 521 include/trace/events/fscache.h __entry->n = n; n 526 include/trace/events/fscache.h __entry->cookie, __entry->op, __entry->results0, __entry->n, n 26 include/trace/events/neigh.h const void *pkey, const struct neighbour *n, n 29 include/trace/events/neigh.h TP_ARGS(tbl, dev, pkey, n, exempt_from_gc), n 48 include/trace/events/neigh.h __entry->created = n != NULL; n 74 include/trace/events/neigh.h TP_PROTO(struct neighbour *n, const u8 *lladdr, u8 new, n 77 include/trace/events/neigh.h TP_ARGS(n, lladdr, new, flags, nlmsg_pid), n 81 include/trace/events/neigh.h __string(dev, (n->dev ? n->dev->name : "NULL")) n 101 include/trace/events/neigh.h int lladdr_len = (n->dev ? n->dev->addr_len : MAX_ADDR_LEN); n 105 include/trace/events/neigh.h __entry->family = n->tbl->family; n 106 include/trace/events/neigh.h __assign_str(dev, (n->dev ? n->dev->name : "NULL")); n 108 include/trace/events/neigh.h memcpy(__entry->lladdr, n->ha, lladdr_len); n 109 include/trace/events/neigh.h __entry->flags = n->flags; n 110 include/trace/events/neigh.h __entry->nud_state = n->nud_state; n 111 include/trace/events/neigh.h __entry->type = n->type; n 112 include/trace/events/neigh.h __entry->dead = n->dead; n 113 include/trace/events/neigh.h __entry->refcnt = refcount_read(&n->refcnt); n 117 include/trace/events/neigh.h if (n->tbl->family == AF_INET) n 118 include/trace/events/neigh.h *p32 = *(__be32 *)n->primary_key; n 123 include/trace/events/neigh.h if (n->tbl->family == AF_INET6) { n 125 include/trace/events/neigh.h *pin6 = *(struct in6_addr *)n->primary_key; n 131 include/trace/events/neigh.h __entry->confirmed = n->confirmed; n 132 include/trace/events/neigh.h __entry->updated = n->updated; n 133 include/trace/events/neigh.h __entry->used = n->used; n 157 include/trace/events/neigh.h TP_PROTO(struct neighbour *n, int err), n 158 include/trace/events/neigh.h TP_ARGS(n, err), n 161 include/trace/events/neigh.h __string(dev, (n->dev ? n->dev->name : "NULL")) n 178 include/trace/events/neigh.h int lladdr_len = (n->dev ? n->dev->addr_len : MAX_ADDR_LEN); n 182 include/trace/events/neigh.h __entry->family = n->tbl->family; n 183 include/trace/events/neigh.h __assign_str(dev, (n->dev ? n->dev->name : "NULL")); n 185 include/trace/events/neigh.h memcpy(__entry->lladdr, n->ha, lladdr_len); n 186 include/trace/events/neigh.h __entry->flags = n->flags; n 187 include/trace/events/neigh.h __entry->nud_state = n->nud_state; n 188 include/trace/events/neigh.h __entry->type = n->type; n 189 include/trace/events/neigh.h __entry->dead = n->dead; n 190 include/trace/events/neigh.h __entry->refcnt = refcount_read(&n->refcnt); n 194 include/trace/events/neigh.h if (n->tbl->family == AF_INET) n 195 include/trace/events/neigh.h *p32 = *(__be32 *)n->primary_key; n 200 include/trace/events/neigh.h if (n->tbl->family == AF_INET6) { n 202 include/trace/events/neigh.h *pin6 = *(struct in6_addr *)n->primary_key; n 209 include/trace/events/neigh.h __entry->confirmed = n->confirmed; n 210 include/trace/events/neigh.h __entry->updated = n->updated; n 211 include/trace/events/neigh.h __entry->used = n->used; n 408 include/uapi/drm/drm_mode.h #define DRM_MODE_PROP_TYPE(n) ((n) << 6) n 297 include/uapi/drm/r128_drm.h int n; n 8 include/uapi/linux/arm_sdei.h #define SDEI_1_0_FN(n) (SDEI_1_0_FN_BASE + (n)) n 58 include/uapi/linux/atm.h #define __SO_ENCODE(l,n,t) ((((l) & 0x1FF) << 22) | ((n) << 16) | \ n 136 include/uapi/linux/atmdev.h #define __ATM_LM_MKLOC(n) ((n)) /* Local (i.e. loop TX to RX) */ n 137 include/uapi/linux/atmdev.h #define __ATM_LM_MKRMT(n) ((n) << 8) /* Remote (i.e. loop RX to TX) */ n 139 include/uapi/linux/atmdev.h #define __ATM_LM_XTLOC(n) ((n) & 0xff) n 140 include/uapi/linux/atmdev.h #define __ATM_LM_XTRMT(n) (((n) >> 8) & 0xff) n 20 include/uapi/linux/batadv_packet.h #define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0) n 151 include/uapi/linux/genwqe/genwqe_card.h #define IO_PF_SLC_JOBPEND(n) (0x00061000 + 8*(n)) n 152 include/uapi/linux/genwqe/genwqe_card.h #define IO_SLC_JOBPEND(n) IO_PF_SLC_JOBPEND(n) n 155 include/uapi/linux/genwqe/genwqe_card.h #define IO_SLU_SLC_PARSE_TRAP(n) (0x00011000 + 8*(n)) n 158 include/uapi/linux/genwqe/genwqe_card.h #define IO_SLU_SLC_DISP_TRAP(n) (0x00011200 + 8*(n)) n 69 include/uapi/linux/if_addr.h #define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg)) n 179 include/uapi/linux/if_link.h #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) n 32 include/uapi/linux/kcov.h #define KCOV_CMP_SIZE(n) ((n) << 1) n 13 include/uapi/linux/kernel.h #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) n 1164 include/uapi/linux/kvm.h __u64 n; /* number of regs */ n 51 include/uapi/linux/mroute.h #define VIFM_SET(n,m) ((m)|=(1<<(n))) n 52 include/uapi/linux/mroute.h #define VIFM_CLR(n,m) ((m)&=~(1<<(n))) n 53 include/uapi/linux/mroute.h #define VIFM_ISSET(n,m) ((m)&(1<<(n))) n 63 include/uapi/linux/mroute6.h #define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS))) n 64 include/uapi/linux/mroute6.h #define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS))) n 65 include/uapi/linux/mroute6.h #define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS))) n 53 include/uapi/linux/msdos_fs.h #define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG) n 59 include/uapi/linux/netfilter/nfnetlink_compat.h #define NFM_NFA(n) ((struct nfattr *)(((char *)(n)) \ n 61 include/uapi/linux/netfilter/nfnetlink_compat.h #define NFM_PAYLOAD(n) NLMSG_PAYLOAD(n, sizeof(struct nfgenmsg)) n 148 include/uapi/linux/netfilter/x_tables.h #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ n 157 include/uapi/linux/netfilter/x_tables.h if (__n < n) \ n 71 include/uapi/linux/netfilter/xt_sctp.h __sctp_chunkmap_is_clear(const __u32 *chunkmap, unsigned int n) n 74 include/uapi/linux/netfilter/xt_sctp.h for (i = 0; i < n; ++i) n 83 include/uapi/linux/netfilter/xt_sctp.h __sctp_chunkmap_is_all_set(const __u32 *chunkmap, unsigned int n) n 86 include/uapi/linux/netfilter/xt_sctp.h for (i = 0; i < n; ++i) n 208 include/uapi/linux/omap3isp.h #define OMAP3ISP_HIST_MEM_SIZE_BINS(n) ((1 << ((n)+5))*4*4) n 56 include/uapi/linux/ppp-comp.h #define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) n 27 include/uapi/linux/psci.h #define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n)) n 31 include/uapi/linux/psci.h #define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n)) n 359 include/uapi/linux/rtnetlink.h #define RTM_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct rtmsg)) n 602 include/uapi/linux/rtnetlink.h #define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg)) n 741 include/uapi/linux/rtnetlink.h #define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg)) n 166 include/uapi/linux/usb/audio.h #define UAC_DT_AC_HEADER_SIZE(n) (8 + (n)) n 169 include/uapi/linux/usb/audio.h #define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ n 170 include/uapi/linux/usb/audio.h struct uac1_ac_header_descriptor_##n { \ n 177 include/uapi/linux/usb/audio.h __u8 baInterfaceNr[n]; \ n 537 include/uapi/linux/usb/audio.h #define DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(n) \ n 538 include/uapi/linux/usb/audio.h struct uac_format_type_i_discrete_descriptor_##n { \ n 547 include/uapi/linux/usb/audio.h __u8 tSamFreq[n][3]; \ n 550 include/uapi/linux/usb/audio.h #define UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) n 101 include/uapi/linux/usb/midi.h #define USB_DT_MS_ENDPOINT_SIZE(n) (4 + (n)) n 104 include/uapi/linux/usb/midi.h #define DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(n) \ n 105 include/uapi/linux/usb/midi.h struct usb_ms_endpoint_descriptor_##n { \ n 110 include/uapi/linux/usb/midi.h __u8 baAssocJackID[n]; \ n 205 include/uapi/linux/usb/video.h #define UVC_DT_HEADER_SIZE(n) (12+(n)) n 207 include/uapi/linux/usb/video.h #define UVC_HEADER_DESCRIPTOR(n) \ n 208 include/uapi/linux/usb/video.h uvc_header_descriptor_##n n 210 include/uapi/linux/usb/video.h #define DECLARE_UVC_HEADER_DESCRIPTOR(n) \ n 211 include/uapi/linux/usb/video.h struct UVC_HEADER_DESCRIPTOR(n) { \ n 219 include/uapi/linux/usb/video.h __u8 baInterfaceNr[n]; \ n 265 include/uapi/linux/usb/video.h #define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n)) n 278 include/uapi/linux/usb/video.h #define UVC_DT_SELECTOR_UNIT_SIZE(n) (6+(n)) n 280 include/uapi/linux/usb/video.h #define UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ n 281 include/uapi/linux/usb/video.h uvc_selector_unit_descriptor_##n n 283 include/uapi/linux/usb/video.h #define DECLARE_UVC_SELECTOR_UNIT_DESCRIPTOR(n) \ n 284 include/uapi/linux/usb/video.h struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \ n 290 include/uapi/linux/usb/video.h __u8 baSourceID[n]; \ n 307 include/uapi/linux/usb/video.h #define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) n 324 include/uapi/linux/usb/video.h #define UVC_DT_EXTENSION_UNIT_SIZE(p, n) (24+(p)+(n)) n 326 include/uapi/linux/usb/video.h #define UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ n 327 include/uapi/linux/usb/video.h uvc_extension_unit_descriptor_##p_##n n 329 include/uapi/linux/usb/video.h #define DECLARE_UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) \ n 330 include/uapi/linux/usb/video.h struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \ n 340 include/uapi/linux/usb/video.h __u8 bmControls[n]; \ n 371 include/uapi/linux/usb/video.h #define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p)) n 373 include/uapi/linux/usb/video.h #define UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ n 376 include/uapi/linux/usb/video.h #define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ n 377 include/uapi/linux/usb/video.h struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \ n 390 include/uapi/linux/usb/video.h __u8 bmaControls[p][n]; \ n 406 include/uapi/linux/usb/video.h #define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p)) n 408 include/uapi/linux/usb/video.h #define UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ n 411 include/uapi/linux/usb/video.h #define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ n 412 include/uapi/linux/usb/video.h struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \ n 421 include/uapi/linux/usb/video.h __u8 bmaControls[p][n]; \ n 491 include/uapi/linux/usb/video.h #define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n)) n 493 include/uapi/linux/usb/video.h #define UVC_FRAME_UNCOMPRESSED(n) \ n 494 include/uapi/linux/usb/video.h uvc_frame_uncompressed_##n n 496 include/uapi/linux/usb/video.h #define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \ n 497 include/uapi/linux/usb/video.h struct UVC_FRAME_UNCOMPRESSED(n) { \ n 510 include/uapi/linux/usb/video.h __le32 dwFrameInterval[n]; \ n 547 include/uapi/linux/usb/video.h #define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n)) n 549 include/uapi/linux/usb/video.h #define UVC_FRAME_MJPEG(n) \ n 550 include/uapi/linux/usb/video.h uvc_frame_mjpeg_##n n 552 include/uapi/linux/usb/video.h #define DECLARE_UVC_FRAME_MJPEG(n) \ n 553 include/uapi/linux/usb/video.h struct UVC_FRAME_MJPEG(n) { \ n 566 include/uapi/linux/usb/video.h __le32 dwFrameInterval[n]; \ n 171 include/uapi/misc/xilinx_sdfec.h __u32 n; n 187 include/video/atmel_lcdc.h #define ATMEL_LCDC_LUT(n) (0x0c00 + ((n)*4)) n 320 include/video/sstfb.h unsigned int n; n 311 include/xen/interface/memory.h #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n)) n 35 init/do_mounts.h int __init rd_load_disk(int n); n 40 init/do_mounts.h static inline int rd_load_disk(int n) { return 0; } n 68 init/do_mounts_rd.c unsigned long n; n 157 init/do_mounts_rd.c n = ext2_image_size(buf); n 158 init/do_mounts_rd.c if (n) { n 162 init/do_mounts_rd.c nblocks = n; n 285 init/do_mounts_rd.c int __init rd_load_disk(int n) n 290 init/do_mounts_rd.c create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n)); n 195 init/initramfs.c static inline void __init eat(unsigned n) n 197 init/initramfs.c victim += n; n 198 init/initramfs.c this_header += n; n 199 init/initramfs.c byte_count -= n; n 231 init/initramfs.c unsigned long n = remains; n 232 init/initramfs.c if (byte_count < n) n 233 init/initramfs.c n = byte_count; n 234 init/initramfs.c memcpy(collect, victim, n); n 235 init/initramfs.c eat(n); n 236 init/initramfs.c collect += n; n 237 init/initramfs.c if ((remains -= n) != 0) n 185 init/main.c int n = strlen(p->str); n 186 init/main.c if (parameqn(line, p->str, n)) { n 192 init/main.c if (line[n] == '\0' || line[n] == '=') n 198 init/main.c } else if (p->setup_func(line + n)) n 1359 ipc/mqueue.c struct sigevent n, *p = NULL; n 1361 ipc/mqueue.c if (copy_from_user(&n, u_notification, sizeof(struct sigevent))) n 1363 ipc/mqueue.c p = &n; n 1494 ipc/mqueue.c struct sigevent n, *p = NULL; n 1496 ipc/mqueue.c if (get_compat_sigevent(&n, u_notification)) n 1498 ipc/mqueue.c if (n.sigev_notify == SIGEV_THREAD) n 1499 ipc/mqueue.c n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int); n 1500 ipc/mqueue.c p = &n; n 386 ipc/shm.c struct shmid_kernel *shp, *n; n 415 ipc/shm.c list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { n 309 kernel/audit.h #define audit_dupe_exe(n, o) (-EINVAL) n 223 kernel/audit_tree.c unsigned long n = key / L1_CACHE_BYTES; n 224 kernel/audit_tree.c return chunk_hash_heads + n % HASH_SIZE; n 265 kernel/audit_tree.c int n; n 266 kernel/audit_tree.c for (n = 0; n < chunk->count; n++) n 267 kernel/audit_tree.c if (chunk->owners[n].owner == tree) n 465 kernel/audit_tree.c int n; n 480 kernel/audit_tree.c for (n = 0; n < old->count; n++) { n 481 kernel/audit_tree.c if (old->owners[n].owner == tree) { n 1011 kernel/audit_tree.c int n; n 1032 kernel/audit_tree.c for (n = 0; n < chunk->count; n++) n 1033 kernel/audit_tree.c list_del_init(&chunk->owners[n].list); n 171 kernel/auditfilter.c unsigned n = *list++; n 172 kernel/auditfilter.c if (n >= AUDIT_BITMASK_SIZE * 32 - AUDIT_SYSCALL_CLASSES) { n 176 kernel/auditfilter.c p[AUDIT_WORD(n)] |= AUDIT_BIT(n); n 316 kernel/auditfilter.c u32 n; n 317 kernel/auditfilter.c for (n = Audit_equal; n < Audit_bad && audit_ops[n] != op; n++) n 319 kernel/auditfilter.c return n; n 1432 kernel/auditfilter.c struct audit_krule *r, *n; n 1439 kernel/auditfilter.c list_for_each_entry_safe(r, n, &audit_rules_list[i], list) { n 135 kernel/auditsc.c unsigned n; n 138 kernel/auditsc.c n = ctx->major; n 140 kernel/auditsc.c switch (audit_classify_syscall(ctx->arch, n)) { n 143 kernel/auditsc.c audit_match_class(AUDIT_CLASS_WRITE, n)) n 146 kernel/auditsc.c audit_match_class(AUDIT_CLASS_READ, n)) n 149 kernel/auditsc.c audit_match_class(AUDIT_CLASS_CHATTR, n)) n 154 kernel/auditsc.c audit_match_class(AUDIT_CLASS_WRITE_32, n)) n 157 kernel/auditsc.c audit_match_class(AUDIT_CLASS_READ_32, n)) n 160 kernel/auditsc.c audit_match_class(AUDIT_CLASS_CHATTR_32, n)) n 178 kernel/auditsc.c struct audit_names *n; n 184 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 185 kernel/auditsc.c if ((n->ino != AUDIT_INO_UNSET) && n 186 kernel/auditsc.c ((n->mode & S_IFMT) == mode)) n 252 kernel/auditsc.c int n; n 261 kernel/auditsc.c n = count; n 262 kernel/auditsc.c for (q = p; q != ctx->trees; q = q->next, n = 31) { n 263 kernel/auditsc.c while (n--) { n 264 kernel/auditsc.c audit_put_chunk(q->c[n]); n 265 kernel/auditsc.c q->c[n] = NULL; n 268 kernel/auditsc.c while (n-- > ctx->tree_count) { n 269 kernel/auditsc.c audit_put_chunk(q->c[n]); n 270 kernel/auditsc.c q->c[n] = NULL; n 288 kernel/auditsc.c int n; n 293 kernel/auditsc.c for (n = 0; n < 31; n++) n 294 kernel/auditsc.c if (audit_tree_match(p->c[n], tree)) n 299 kernel/auditsc.c for (n = ctx->tree_count; n < 31; n++) n 300 kernel/auditsc.c if (audit_tree_match(p->c[n], tree)) n 311 kernel/auditsc.c struct audit_names *n; n 321 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 322 kernel/auditsc.c rc = audit_uid_comparator(uid, f->op, n->uid); n 335 kernel/auditsc.c struct audit_names *n; n 345 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 346 kernel/auditsc.c rc = audit_gid_comparator(gid, f->op, n->gid); n 454 kernel/auditsc.c struct audit_names *n; n 543 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 544 kernel/auditsc.c if (audit_comparator(MAJOR(n->dev), f->op, f->val) || n 545 kernel/auditsc.c audit_comparator(MAJOR(n->rdev), f->op, f->val)) { n 558 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 559 kernel/auditsc.c if (audit_comparator(MINOR(n->dev), f->op, f->val) || n 560 kernel/auditsc.c audit_comparator(MINOR(n->rdev), f->op, f->val)) { n 571 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 572 kernel/auditsc.c if (audit_comparator(n->ino, f->op, f->val)) { n 583 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 584 kernel/auditsc.c if (audit_uid_comparator(n->uid, f->op, f->uid)) { n 595 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 596 kernel/auditsc.c if (audit_gid_comparator(n->gid, f->op, f->gid)) { n 667 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 669 kernel/auditsc.c n->osid, n 809 kernel/auditsc.c struct audit_names *n, n 811 kernel/auditsc.c int h = audit_hash_ino((u32)n->ino); n 818 kernel/auditsc.c audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) { n 833 kernel/auditsc.c struct audit_names *n; n 840 kernel/auditsc.c list_for_each_entry(n, &ctx->names_list, list) { n 841 kernel/auditsc.c if (audit_filter_inode_name(tsk, n, ctx)) n 863 kernel/auditsc.c struct audit_names *n, *next; n 865 kernel/auditsc.c list_for_each_entry_safe(n, next, &context->names_list, list) { n 866 kernel/auditsc.c list_del(&n->list); n 867 kernel/auditsc.c if (n->name) n 868 kernel/auditsc.c putname(n->name); n 869 kernel/auditsc.c if (n->should_free) n 870 kernel/auditsc.c kfree(n); n 1317 kernel/auditsc.c static void audit_log_name(struct audit_context *context, struct audit_names *n, n 1330 kernel/auditsc.c else if (n->name) { n 1331 kernel/auditsc.c switch (n->name_len) { n 1335 kernel/auditsc.c audit_log_untrustedstring(ab, n->name->name); n 1346 kernel/auditsc.c audit_log_n_untrustedstring(ab, n->name->name, n 1347 kernel/auditsc.c n->name_len); n 1352 kernel/auditsc.c if (n->ino != AUDIT_INO_UNSET) n 1354 kernel/auditsc.c n->ino, n 1355 kernel/auditsc.c MAJOR(n->dev), n 1356 kernel/auditsc.c MINOR(n->dev), n 1357 kernel/auditsc.c n->mode, n 1358 kernel/auditsc.c from_kuid(&init_user_ns, n->uid), n 1359 kernel/auditsc.c from_kgid(&init_user_ns, n->gid), n 1360 kernel/auditsc.c MAJOR(n->rdev), n 1361 kernel/auditsc.c MINOR(n->rdev)); n 1362 kernel/auditsc.c if (n->osid != 0) { n 1367 kernel/auditsc.c n->osid, &ctx, &len)) { n 1368 kernel/auditsc.c audit_log_format(ab, " osid=%u", n->osid); n 1378 kernel/auditsc.c switch (n->type) { n 1396 kernel/auditsc.c audit_log_fcaps(ab, n); n 1450 kernel/auditsc.c struct audit_names *n; n 1560 kernel/auditsc.c list_for_each_entry(n, &context->names_list, list) { n 1561 kernel/auditsc.c if (n->hidden) n 1563 kernel/auditsc.c audit_log_name(context, n, NULL, i++, &call_panic); n 1855 kernel/auditsc.c struct audit_names *n; n 1857 kernel/auditsc.c list_for_each_entry(n, &context->names_list, list) { n 1858 kernel/auditsc.c if (!n->name) n 1860 kernel/auditsc.c if (n->name->uptr == uptr) { n 1861 kernel/auditsc.c n->name->refcnt++; n 1862 kernel/auditsc.c return n->name; n 1878 kernel/auditsc.c struct audit_names *n; n 1883 kernel/auditsc.c n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); n 1884 kernel/auditsc.c if (!n) n 1887 kernel/auditsc.c n->name = name; n 1888 kernel/auditsc.c n->name_len = AUDIT_NAME_FULL; n 1889 kernel/auditsc.c name->aname = n; n 1949 kernel/auditsc.c struct audit_names *n; n 1981 kernel/auditsc.c n = name->aname; n 1982 kernel/auditsc.c if (n) { n 1984 kernel/auditsc.c if (n->type == AUDIT_TYPE_PARENT || n 1985 kernel/auditsc.c n->type == AUDIT_TYPE_UNKNOWN) n 1988 kernel/auditsc.c if (n->type != AUDIT_TYPE_PARENT) n 1993 kernel/auditsc.c list_for_each_entry_reverse(n, &context->names_list, list) { n 1994 kernel/auditsc.c if (n->ino) { n 1996 kernel/auditsc.c if (n->ino != inode->i_ino || n 1997 kernel/auditsc.c n->dev != inode->i_sb->s_dev) n 1999 kernel/auditsc.c } else if (n->name) { n 2001 kernel/auditsc.c if (strcmp(n->name->name, name->name)) n 2009 kernel/auditsc.c if (n->type == AUDIT_TYPE_PARENT || n 2010 kernel/auditsc.c n->type == AUDIT_TYPE_UNKNOWN) n 2013 kernel/auditsc.c if (n->type != AUDIT_TYPE_PARENT) n 2020 kernel/auditsc.c n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); n 2021 kernel/auditsc.c if (!n) n 2024 kernel/auditsc.c n->name = name; n 2030 kernel/auditsc.c n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; n 2031 kernel/auditsc.c n->type = AUDIT_TYPE_PARENT; n 2033 kernel/auditsc.c n->hidden = true; n 2035 kernel/auditsc.c n->name_len = AUDIT_NAME_FULL; n 2036 kernel/auditsc.c n->type = AUDIT_TYPE_NORMAL; n 2039 kernel/auditsc.c audit_copy_inode(n, dentry, inode, flags & AUDIT_INODE_NOEVAL); n 2068 kernel/auditsc.c struct audit_names *n, *found_parent = NULL, *found_child = NULL; n 2096 kernel/auditsc.c list_for_each_entry(n, &context->names_list, list) { n 2097 kernel/auditsc.c if (!n->name || n 2098 kernel/auditsc.c (n->type != AUDIT_TYPE_PARENT && n 2099 kernel/auditsc.c n->type != AUDIT_TYPE_UNKNOWN)) n 2102 kernel/auditsc.c if (n->ino == parent->i_ino && n->dev == parent->i_sb->s_dev && n 2104 kernel/auditsc.c n->name->name, n->name_len)) { n 2105 kernel/auditsc.c if (n->type == AUDIT_TYPE_UNKNOWN) n 2106 kernel/auditsc.c n->type = AUDIT_TYPE_PARENT; n 2107 kernel/auditsc.c found_parent = n; n 2113 kernel/auditsc.c list_for_each_entry(n, &context->names_list, list) { n 2115 kernel/auditsc.c if (!n->name || n 2116 kernel/auditsc.c (n->type != type && n->type != AUDIT_TYPE_UNKNOWN)) n 2119 kernel/auditsc.c if (!strcmp(dname->name, n->name->name) || n 2120 kernel/auditsc.c !audit_compare_dname_path(dname, n->name->name, n 2124 kernel/auditsc.c if (n->type == AUDIT_TYPE_UNKNOWN) n 2125 kernel/auditsc.c n->type = type; n 2126 kernel/auditsc.c found_child = n; n 2133 kernel/auditsc.c n = audit_alloc_name(context, AUDIT_TYPE_PARENT); n 2134 kernel/auditsc.c if (!n) n 2136 kernel/auditsc.c audit_copy_inode(n, NULL, parent, 0); n 576 kernel/bpf/core.c bpf_get_prog_addr_start(struct latch_tree_node *n) n 581 kernel/bpf/core.c aux = container_of(n, struct bpf_prog_aux, ksym_tnode); n 593 kernel/bpf/core.c static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) n 599 kernel/bpf/core.c aux = container_of(n, struct bpf_prog_aux, ksym_tnode); n 669 kernel/bpf/core.c struct latch_tree_node *n; n 674 kernel/bpf/core.c n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); n 675 kernel/bpf/core.c return n ? n 676 kernel/bpf/core.c container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : n 265 kernel/bpf/cpumap.c int i, n, m; n 286 kernel/bpf/cpumap.c n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); n 288 kernel/bpf/cpumap.c for (i = 0; i < n; i++) { n 299 kernel/bpf/cpumap.c m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs); n 301 kernel/bpf/cpumap.c for (i = 0; i < n; i++) n 303 kernel/bpf/cpumap.c drops = n; n 307 kernel/bpf/cpumap.c for (i = 0; i < n; i++) { n 324 kernel/bpf/cpumap.c trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched); n 424 kernel/bpf/hashtab.c struct hlist_nulls_node *n; n 427 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) n 442 kernel/bpf/hashtab.c struct hlist_nulls_node *n; n 446 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) n 450 kernel/bpf/hashtab.c if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) n 575 kernel/bpf/hashtab.c struct hlist_nulls_node *n; n 585 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) n 1178 kernel/bpf/hashtab.c struct hlist_nulls_node *n; n 1181 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { n 1394 kernel/bpf/hashtab.c struct hlist_nulls_node *n; n 1402 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { n 180 kernel/bpf/tnum.c size_t n; n 182 kernel/bpf/tnum.c for (n = 64; n; n--) { n 183 kernel/bpf/tnum.c if (n < size) { n 185 kernel/bpf/tnum.c str[n - 1] = 'x'; n 187 kernel/bpf/tnum.c str[n - 1] = '1'; n 189 kernel/bpf/tnum.c str[n - 1] = '0'; n 235 kernel/bpf/verifier.c unsigned int n; n 237 kernel/bpf/verifier.c n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); n 239 kernel/bpf/verifier.c WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, n 242 kernel/bpf/verifier.c n = min(log->len_total - log->len_used - 1, n); n 243 kernel/bpf/verifier.c log->kbuf[n] = '\0'; n 245 kernel/bpf/verifier.c if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) n 246 kernel/bpf/verifier.c log->len_used += n; n 69 kernel/bpf/xskmap.c struct xsk_map_node *n, *tmp; n 72 kernel/bpf/xskmap.c list_for_each_entry_safe(n, tmp, &xs->map_list, node) { n 73 kernel/bpf/xskmap.c if (map_entry == n->map_entry) { n 74 kernel/bpf/xskmap.c list_del(&n->node); n 75 kernel/bpf/xskmap.c xsk_map_node_free(n); n 335 kernel/cgroup/cgroup-v1.c int pid, n = 0; /* used for populating the array */ n 355 kernel/cgroup/cgroup-v1.c if (unlikely(n == length)) n 363 kernel/cgroup/cgroup-v1.c array[n++] = pid; n 366 kernel/cgroup/cgroup-v1.c length = n; n 65 kernel/cred.c static inline void set_cred_subscribers(struct cred *cred, int n) n 68 kernel/cred.c atomic_set(&cred->subscribers, n); n 81 kernel/cred.c static inline void alter_cred_subscribers(const struct cred *_cred, int n) n 86 kernel/cred.c atomic_add(n, &cred->subscribers); n 1712 kernel/debug/kdb/kdb_main.c int n, z, num = (symbolic ? 1 : (16 / bytesperword)); n 1724 kernel/debug/kdb/kdb_main.c n = min(num, repeat); n 1727 kernel/debug/kdb/kdb_main.c addr += bytesperword * n; n 1728 kernel/debug/kdb/kdb_main.c repeat -= n; n 2098 kernel/debug/kdb/kdb_main.c int n = 0; n 2127 kernel/debug/kdb/kdb_main.c n++; n 2130 kernel/debug/kdb/kdb_main.c if (adjust >= n) n 2132 kernel/debug/kdb/kdb_main.c "printed\n", n); n 2133 kernel/debug/kdb/kdb_main.c else if (adjust - lines >= n) n 2135 kernel/debug/kdb/kdb_main.c "lines printed\n", n, n - adjust); n 2139 kernel/debug/kdb/kdb_main.c skip = n - lines - adjust; n 2141 kernel/debug/kdb/kdb_main.c if (adjust >= n) { n 2143 kernel/debug/kdb/kdb_main.c "nothing printed\n", n); n 2144 kernel/debug/kdb/kdb_main.c skip = n; n 2149 kernel/debug/kdb/kdb_main.c "%d lines printed\n", n, lines); n 2152 kernel/debug/kdb/kdb_main.c lines = n; n 2155 kernel/debug/kdb/kdb_main.c if (skip >= n || skip < 0) n 309 kernel/debug/kdb/kdb_support.c int n = strlen(str)+1; n 310 kernel/debug/kdb/kdb_support.c char *s = kmalloc(n, type); n 4843 kernel/events/core.c int n = 1; /* skip @nr */ n 4858 kernel/events/core.c values[n++] += leader->total_time_enabled + n 4863 kernel/events/core.c values[n++] += leader->total_time_running + n 4870 kernel/events/core.c values[n++] += perf_event_count(leader); n 4872 kernel/events/core.c values[n++] = primary_event_id(leader); n 4875 kernel/events/core.c values[n++] += perf_event_count(sub); n 4877 kernel/events/core.c values[n++] = primary_event_id(sub); n 4935 kernel/events/core.c int n = 0; n 4937 kernel/events/core.c values[n++] = __perf_event_read_value(event, &enabled, &running); n 4939 kernel/events/core.c values[n++] = enabled; n 4941 kernel/events/core.c values[n++] = running; n 4943 kernel/events/core.c values[n++] = primary_event_id(event); n 4945 kernel/events/core.c if (copy_to_user(buf, values, n * sizeof(u64))) n 4948 kernel/events/core.c return n * sizeof(u64); n 6266 kernel/events/core.c int n = 0; n 6268 kernel/events/core.c values[n++] = perf_event_count(event); n 6270 kernel/events/core.c values[n++] = enabled + n 6274 kernel/events/core.c values[n++] = running + n 6278 kernel/events/core.c values[n++] = primary_event_id(event); n 6280 kernel/events/core.c __output_copy(handle, values, n * sizeof(u64)); n 6290 kernel/events/core.c int n = 0; n 6292 kernel/events/core.c values[n++] = 1 + leader->nr_siblings; n 6295 kernel/events/core.c values[n++] = enabled; n 6298 kernel/events/core.c values[n++] = running; n 6304 kernel/events/core.c values[n++] = perf_event_count(leader); n 6306 kernel/events/core.c values[n++] = primary_event_id(leader); n 6308 kernel/events/core.c __output_copy(handle, values, n * sizeof(u64)); n 6311 kernel/events/core.c n = 0; n 6317 kernel/events/core.c values[n++] = perf_event_count(sub); n 6319 kernel/events/core.c values[n++] = primary_event_id(sub); n 6321 kernel/events/core.c __output_copy(handle, values, n * sizeof(u64)); n 171 kernel/events/internal.h memcpy_common(void *dst, const void *src, unsigned long n) n 173 kernel/events/internal.h memcpy(dst, src, n); n 180 kernel/events/internal.h memcpy_skip(void *dst, const void *src, unsigned long n) n 191 kernel/events/internal.h arch_perf_out_copy_user(void *dst, const void *src, unsigned long n) n 196 kernel/events/internal.h ret = __copy_from_user_inatomic(dst, src, n); n 642 kernel/events/uprobes.c struct rb_node *n = uprobes_tree.rb_node; n 646 kernel/events/uprobes.c while (n) { n 647 kernel/events/uprobes.c uprobe = rb_entry(n, struct uprobe, rb_node); n 653 kernel/events/uprobes.c n = n->rb_left; n 655 kernel/events/uprobes.c n = n->rb_right; n 1274 kernel/events/uprobes.c struct rb_node *n = uprobes_tree.rb_node; n 1276 kernel/events/uprobes.c while (n) { n 1277 kernel/events/uprobes.c struct uprobe *u = rb_entry(n, struct uprobe, rb_node); n 1280 kernel/events/uprobes.c n = n->rb_left; n 1282 kernel/events/uprobes.c n = n->rb_right; n 1285 kernel/events/uprobes.c n = n->rb_left; n 1287 kernel/events/uprobes.c n = n->rb_right; n 1293 kernel/events/uprobes.c return n; n 1305 kernel/events/uprobes.c struct rb_node *n, *t; n 1313 kernel/events/uprobes.c n = find_node_in_range(inode, min, max); n 1314 kernel/events/uprobes.c if (n) { n 1315 kernel/events/uprobes.c for (t = n; t; t = rb_prev(t)) { n 1322 kernel/events/uprobes.c for (t = n; (t = rb_next(t)); ) { n 1414 kernel/events/uprobes.c struct rb_node *n; n 1422 kernel/events/uprobes.c n = find_node_in_range(inode, min, max); n 1425 kernel/events/uprobes.c return !!n; n 1761 kernel/events/uprobes.c struct return_instance **p, *o, *n; n 1770 kernel/events/uprobes.c n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); n 1771 kernel/events/uprobes.c if (!n) n 1774 kernel/events/uprobes.c *n = *o; n 1775 kernel/events/uprobes.c get_uprobe(n->uprobe); n 1776 kernel/events/uprobes.c n->next = NULL; n 1778 kernel/events/uprobes.c *p = n; n 1779 kernel/events/uprobes.c p = &n->next; n 508 kernel/exit.c struct task_struct *p, *n; n 521 kernel/exit.c list_for_each_entry_safe(p, n, dead, ptrace_entry) { n 648 kernel/exit.c struct task_struct *p, *n; n 681 kernel/exit.c list_for_each_entry_safe(p, n, &dead, ptrace_entry) { n 232 kernel/fail_function.c struct fei_attr *attr, *n; n 234 kernel/fail_function.c list_for_each_entry_safe(attr, n, &fei_attr_list, list) { n 86 kernel/irq/affinity.c int n, nodes = 0; n 89 kernel/irq/affinity.c for_each_node(n) { n 90 kernel/irq/affinity.c if (cpumask_intersects(mask, node_to_cpumask[n])) { n 91 kernel/irq/affinity.c node_set(n, *nodemsk); n 135 kernel/irq/affinity.c unsigned n, remaining_ncpus = 0; n 137 kernel/irq/affinity.c for (n = 0; n < nr_node_ids; n++) { n 138 kernel/irq/affinity.c node_vectors[n].id = n; n 139 kernel/irq/affinity.c node_vectors[n].ncpus = UINT_MAX; n 142 kernel/irq/affinity.c for_each_node_mask(n, nodemsk) { n 145 kernel/irq/affinity.c cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); n 151 kernel/irq/affinity.c node_vectors[n].ncpus = ncpus; n 227 kernel/irq/affinity.c for (n = 0; n < nr_node_ids; n++) { n 230 kernel/irq/affinity.c if (node_vectors[n].ncpus == UINT_MAX) n 235 kernel/irq/affinity.c ncpus = node_vectors[n].ncpus; n 240 kernel/irq/affinity.c node_vectors[n].nvectors = nvectors; n 255 kernel/irq/affinity.c unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0; n 271 kernel/irq/affinity.c for_each_node_mask(n, nodemsk) { n 273 kernel/irq/affinity.c node_to_cpumask[n]); n 69 kernel/irq/irqdomain.c char *n; n 75 kernel/irq/irqdomain.c n = kasprintf(GFP_KERNEL, "%s", name); n 78 kernel/irq/irqdomain.c n = kasprintf(GFP_KERNEL, "%s-%d", name, id); n 81 kernel/irq/irqdomain.c n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa); n 85 kernel/irq/irqdomain.c if (!fwid || !n) { n 87 kernel/irq/irqdomain.c kfree(n); n 92 kernel/irq/irqdomain.c fwid->name = n; n 110 kernel/jump_label.c int n = atomic_read(&key->enabled); n 112 kernel/jump_label.c return n >= 0 ? n : 1; n 144 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c) n 145 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) n 146 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) n 205 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_relaxed(l,c,n) (0) n 206 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_acquire(l,c,n) (0) n 207 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_release(l,c,n) (0) n 21 kernel/locking/rtmutex.h #define debug_rt_mutex_init(m, n, k) do { } while (0) n 278 kernel/locking/test-ww_mutex.c unsigned int n, last = nthreads - 1; n 285 kernel/locking/test-ww_mutex.c for (n = 0; n < nthreads; n++) { n 286 kernel/locking/test-ww_mutex.c struct test_cycle *cycle = &cycles[n]; n 289 kernel/locking/test-ww_mutex.c if (n == last) n 292 kernel/locking/test-ww_mutex.c cycle->b_mutex = &cycles[n + 1].a_mutex; n 294 kernel/locking/test-ww_mutex.c if (n == 0) n 297 kernel/locking/test-ww_mutex.c cycle->a_signal = &cycles[n - 1].b_signal; n 304 kernel/locking/test-ww_mutex.c for (n = 0; n < nthreads; n++) n 305 kernel/locking/test-ww_mutex.c queue_work(wq, &cycles[n].work); n 310 kernel/locking/test-ww_mutex.c for (n = 0; n < nthreads; n++) { n 311 kernel/locking/test-ww_mutex.c struct test_cycle *cycle = &cycles[n]; n 317 kernel/locking/test-ww_mutex.c n, nthreads, cycle->result); n 322 kernel/locking/test-ww_mutex.c for (n = 0; n < nthreads; n++) n 323 kernel/locking/test-ww_mutex.c ww_mutex_destroy(&cycles[n].a_mutex); n 330 kernel/locking/test-ww_mutex.c unsigned int n; n 333 kernel/locking/test-ww_mutex.c for (n = 2; n <= ncpus + 1; n++) { n 334 kernel/locking/test-ww_mutex.c ret = __test_cycle(n); n 352 kernel/locking/test-ww_mutex.c int n, r, tmp; n 358 kernel/locking/test-ww_mutex.c for (n = 0; n < count; n++) n 359 kernel/locking/test-ww_mutex.c order[n] = n; n 361 kernel/locking/test-ww_mutex.c for (n = count - 1; n > 1; n--) { n 362 kernel/locking/test-ww_mutex.c r = get_random_int() % (n + 1); n 363 kernel/locking/test-ww_mutex.c if (r != n) { n 364 kernel/locking/test-ww_mutex.c tmp = order[n]; n 365 kernel/locking/test-ww_mutex.c order[n] = order[r]; n 392 kernel/locking/test-ww_mutex.c int n, err; n 397 kernel/locking/test-ww_mutex.c for (n = 0; n < nlocks; n++) { n 398 kernel/locking/test-ww_mutex.c if (n == contended) n 401 kernel/locking/test-ww_mutex.c err = ww_mutex_lock(&locks[order[n]], &ctx); n 408 kernel/locking/test-ww_mutex.c if (contended > n) n 410 kernel/locking/test-ww_mutex.c contended = n; n 411 kernel/locking/test-ww_mutex.c while (n--) n 412 kernel/locking/test-ww_mutex.c ww_mutex_unlock(&locks[order[n]]); n 444 kernel/locking/test-ww_mutex.c int n, err; n 450 kernel/locking/test-ww_mutex.c for (n = 0; n < stress->nlocks; n++) { n 455 kernel/locking/test-ww_mutex.c ll->lock = &stress->locks[order[n]]; n 527 kernel/locking/test-ww_mutex.c int n; n 533 kernel/locking/test-ww_mutex.c for (n = 0; n < nlocks; n++) n 534 kernel/locking/test-ww_mutex.c ww_mutex_init(&locks[n], &ww_class); n 536 kernel/locking/test-ww_mutex.c for (n = 0; nthreads; n++) { n 541 kernel/locking/test-ww_mutex.c switch (n & 3) { n 574 kernel/locking/test-ww_mutex.c for (n = 0; n < nlocks; n++) n 575 kernel/locking/test-ww_mutex.c ww_mutex_destroy(&locks[n]); n 105 kernel/module.c static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) n 107 kernel/module.c struct module_layout *layout = container_of(n, struct module_layout, mtn.node); n 112 kernel/module.c static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) n 114 kernel/module.c struct module_layout *layout = container_of(n, struct module_layout, mtn.node); n 126 kernel/module.c mod_tree_comp(void *key, struct latch_tree_node *n) n 131 kernel/module.c start = __mod_tree_val(n); n 135 kernel/module.c end = start + __mod_tree_size(n); n 2931 kernel/module.c unsigned long n = min(len, COPY_CHUNK_SIZE); n 2933 kernel/module.c if (copy_from_user(dst, usrc, n) != 0) n 2936 kernel/module.c dst += n; n 2937 kernel/module.c usrc += n; n 2938 kernel/module.c len -= n; n 3533 kernel/module.c struct llist_node *pos, *n, *list; n 3540 kernel/module.c llist_for_each_safe(pos, n, list) { n 23 kernel/notifier.c struct notifier_block *n) n 26 kernel/notifier.c WARN_ONCE(((*nl) == n), "double register detected"); n 27 kernel/notifier.c if (n->priority > (*nl)->priority) n 31 kernel/notifier.c n->next = *nl; n 32 kernel/notifier.c rcu_assign_pointer(*nl, n); n 37 kernel/notifier.c struct notifier_block *n) n 40 kernel/notifier.c if ((*nl) == n) n 42 kernel/notifier.c if (n->priority > (*nl)->priority) n 46 kernel/notifier.c n->next = *nl; n 47 kernel/notifier.c rcu_assign_pointer(*nl, n); n 52 kernel/notifier.c struct notifier_block *n) n 55 kernel/notifier.c if ((*nl) == n) { n 56 kernel/notifier.c rcu_assign_pointer(*nl, n->next); n 124 kernel/notifier.c struct notifier_block *n) n 130 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); n 146 kernel/notifier.c struct notifier_block *n) n 152 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); n 216 kernel/notifier.c struct notifier_block *n) n 226 kernel/notifier.c return notifier_chain_register(&nh->head, n); n 229 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); n 247 kernel/notifier.c struct notifier_block *n) n 252 kernel/notifier.c ret = notifier_chain_cond_register(&nh->head, n); n 269 kernel/notifier.c struct notifier_block *n) n 279 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); n 282 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); n 350 kernel/notifier.c struct notifier_block *n) n 352 kernel/notifier.c return notifier_chain_register(&nh->head, n); n 367 kernel/notifier.c struct notifier_block *n) n 369 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); n 424 kernel/notifier.c struct notifier_block *n) n 434 kernel/notifier.c return notifier_chain_register(&nh->head, n); n 437 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); n 454 kernel/notifier.c struct notifier_block *n) n 464 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); n 467 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); n 84 kernel/params.c bool parameqn(const char *a, const char *b, size_t n) n 88 kernel/params.c for (i = 0; i < n; i++) { n 519 kernel/params.c #define to_module_attr(n) container_of(n, struct module_attribute, attr) n 520 kernel/params.c #define to_module_kobject(n) container_of(n, struct module_kobject, kobj) n 536 kernel/params.c #define to_param_attr(n) container_of(n, struct param_attribute, mattr) n 1000 kernel/power/hibernate.c const char *buf, size_t n) n 1011 kernel/power/hibernate.c p = memchr(buf, '\n', n); n 1012 kernel/power/hibernate.c len = p ? p - buf : n; n 1045 kernel/power/hibernate.c return error ? error : n; n 1058 kernel/power/hibernate.c const char *buf, size_t n) n 1061 kernel/power/hibernate.c int len = n; n 1081 kernel/power/hibernate.c return n; n 1094 kernel/power/hibernate.c size_t n) n 1104 kernel/power/hibernate.c return n; n 1116 kernel/power/hibernate.c const char *buf, size_t n) n 1122 kernel/power/hibernate.c return n; n 1138 kernel/power/hibernate.c const char *buf, size_t n) n 1144 kernel/power/hibernate.c return n; n 107 kernel/power/main.c const char *buf, size_t n) n 118 kernel/power/main.c return n; n 147 kernel/power/main.c static suspend_state_t decode_suspend_state(const char *buf, size_t n) n 153 kernel/power/main.c p = memchr(buf, '\n', n); n 154 kernel/power/main.c len = p ? p - buf : n; n 167 kernel/power/main.c const char *buf, size_t n) n 181 kernel/power/main.c state = decode_suspend_state(buf, n); n 189 kernel/power/main.c return error ? error : n; n 229 kernel/power/main.c const char *buf, size_t n) n 237 kernel/power/main.c p = memchr(buf, '\n', n); n 238 kernel/power/main.c len = p ? p - buf : n; n 252 kernel/power/main.c return error ? error : n; n 450 kernel/power/main.c const char *buf, size_t n) n 461 kernel/power/main.c return n; n 490 kernel/power/main.c const char *buf, size_t n) n 501 kernel/power/main.c return n; n 572 kernel/power/main.c static suspend_state_t decode_state(const char *buf, size_t n) n 580 kernel/power/main.c p = memchr(buf, '\n', n); n 581 kernel/power/main.c len = p ? p - buf : n; n 600 kernel/power/main.c const char *buf, size_t n) n 614 kernel/power/main.c state = decode_state(buf, n); n 628 kernel/power/main.c return error ? error : n; n 674 kernel/power/main.c const char *buf, size_t n) n 691 kernel/power/main.c error = n; n 727 kernel/power/main.c const char *buf, size_t n) n 729 kernel/power/main.c suspend_state_t state = decode_state(buf, n); n 740 kernel/power/main.c return error ? error : n; n 756 kernel/power/main.c const char *buf, size_t n) n 759 kernel/power/main.c return error ? error : n; n 773 kernel/power/main.c const char *buf, size_t n) n 776 kernel/power/main.c return error ? error : n; n 795 kernel/power/main.c const char *buf, size_t n) n 805 kernel/power/main.c return n; n 832 kernel/power/main.c const char *buf, size_t n) n 840 kernel/power/main.c return n; n 1250 kernel/power/snapshot.c unsigned int n = 0; n 1262 kernel/power/snapshot.c n++; n 1264 kernel/power/snapshot.c return n; n 1319 kernel/power/snapshot.c unsigned int n = 0; n 1329 kernel/power/snapshot.c n++; n 1331 kernel/power/snapshot.c return n; n 1340 kernel/power/snapshot.c int n; n 1342 kernel/power/snapshot.c for (n = PAGE_SIZE / sizeof(long); n; n--) n 1931 kernel/power/snapshot.c unsigned int n) { return 0; } n 1389 kernel/printk/printk.c size_t n; n 1413 kernel/printk/printk.c n = msg_print_text(msg, true, syslog_time, text, n 1415 kernel/printk/printk.c if (n - syslog_partial <= size) { n 1419 kernel/printk/printk.c n -= syslog_partial; n 1423 kernel/printk/printk.c n = size; n 1424 kernel/printk/printk.c syslog_partial += n; n 1426 kernel/printk/printk.c n = 0; n 1429 kernel/printk/printk.c if (!n) n 1432 kernel/printk/printk.c if (copy_to_user(buf, text + skip, n)) { n 1438 kernel/printk/printk.c len += n; n 1439 kernel/printk/printk.c size -= n; n 1440 kernel/printk/printk.c buf += n; n 2132 kernel/printk/printk.c int n; n 2138 kernel/printk/printk.c n = vscnprintf(buf, sizeof(buf), fmt, ap); n 2141 kernel/printk/printk.c early_console->write(early_console, buf, n); n 158 kernel/profile.c int task_handoff_register(struct notifier_block *n) n 160 kernel/profile.c return atomic_notifier_chain_register(&task_free_notifier, n); n 164 kernel/profile.c int task_handoff_unregister(struct notifier_block *n) n 166 kernel/profile.c return atomic_notifier_chain_unregister(&task_free_notifier, n); n 170 kernel/profile.c int profile_event_register(enum profile_type type, struct notifier_block *n) n 177 kernel/profile.c &task_exit_notifier, n); n 181 kernel/profile.c &munmap_notifier, n); n 189 kernel/profile.c int profile_event_unregister(enum profile_type type, struct notifier_block *n) n 196 kernel/profile.c &task_exit_notifier, n); n 200 kernel/profile.c &munmap_notifier, n); n 582 kernel/ptrace.c struct task_struct *p, *n; n 584 kernel/ptrace.c list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { n 868 kernel/ptrace.c int n; n 870 kernel/ptrace.c for (n = 0; n < view->n; ++n) { n 871 kernel/ptrace.c regset = view->regsets + n; n 891 kernel/ptrace.c (__kernel_size_t) (regset->n * regset->size)); n 571 kernel/rcu/rcuperf.c static int compute_real(int n) n 575 kernel/rcu/rcuperf.c if (n >= 0) { n 576 kernel/rcu/rcuperf.c nr = n; n 578 kernel/rcu/rcuperf.c nr = num_online_cpus() + 1 + n; n 2317 kernel/rcu/rcutorture.c static unsigned long n; n 2319 kernel/rcu/rcutorture.c if (cur_ops->sync && !(++n & 0xfff)) n 343 kernel/rcu/update.c void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, n 350 kernel/rcu/update.c for (i = 0; i < n; i++) { n 366 kernel/rcu/update.c for (i = 0; i < n; i++) { n 1124 kernel/resource.c resource_size_t start, resource_size_t n, n 1135 kernel/resource.c res->end = start + n - 1; n 1192 kernel/resource.c resource_size_t n) n 1198 kernel/resource.c end = start + n - 1; n 1427 kernel/resource.c resource_size_t n; n 1434 kernel/resource.c __release_region(this->parent, this->start, this->n); n 1442 kernel/resource.c this->start == match->start && this->n == match->n; n 1447 kernel/resource.c resource_size_t start, resource_size_t n, const char *name) n 1459 kernel/resource.c dr->n = n; n 1461 kernel/resource.c res = __request_region(parent, start, n, name, 0); n 1472 kernel/resource.c resource_size_t start, resource_size_t n) n 1474 kernel/resource.c struct region_devres match_data = { parent, start, n }; n 1476 kernel/resource.c __release_region(parent, start, n); n 207 kernel/sched/debug.c static struct ctl_table *sd_alloc_ctl_entry(int n) n 210 kernel/sched/debug.c kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); n 653 kernel/sched/debug.c #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); n 659 kernel/sched/debug.c #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); n 768 kernel/sched/debug.c unsigned long n = *offset; n 770 kernel/sched/debug.c if (n == 0) n 773 kernel/sched/debug.c n--; n 775 kernel/sched/debug.c if (n > 0) n 776 kernel/sched/debug.c n = cpumask_next(n - 1, cpu_online_mask); n 778 kernel/sched/debug.c n = cpumask_first(cpu_online_mask); n 780 kernel/sched/debug.c *offset = n + 1; n 782 kernel/sched/debug.c if (n < nr_cpu_ids) n 783 kernel/sched/debug.c return (void *)(unsigned long)(n + 2); n 110 kernel/sched/loadavg.c fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) n 114 kernel/sched/loadavg.c if (n) { n 116 kernel/sched/loadavg.c if (n & 1) { n 121 kernel/sched/loadavg.c n >>= 1; n 122 kernel/sched/loadavg.c if (!n) n 158 kernel/sched/loadavg.c unsigned long active, unsigned int n) n 160 kernel/sched/loadavg.c return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); n 307 kernel/sched/loadavg.c long delta, active, n; n 315 kernel/sched/loadavg.c n = 1 + (delta / LOAD_FREQ); n 320 kernel/sched/loadavg.c avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); n 321 kernel/sched/loadavg.c avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); n 322 kernel/sched/loadavg.c avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); n 324 kernel/sched/loadavg.c WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); n 37 kernel/sched/pelt.c static u64 decay_load(u64 val, u64 n) n 41 kernel/sched/pelt.c if (unlikely(n > LOAD_AVG_PERIOD * 63)) n 45 kernel/sched/pelt.c local_n = n; n 85 kernel/sched/stats.c unsigned long n = *offset; n 87 kernel/sched/stats.c if (n == 0) n 90 kernel/sched/stats.c n--; n 92 kernel/sched/stats.c if (n > 0) n 93 kernel/sched/stats.c n = cpumask_next(n - 1, cpu_online_mask); n 95 kernel/sched/stats.c n = cpumask_first(cpu_online_mask); n 97 kernel/sched/stats.c *offset = n + 1; n 99 kernel/sched/stats.c if (n < nr_cpu_ids) n 100 kernel/sched/stats.c return (void *)(unsigned long)(n + 2); n 1524 kernel/sched/topology.c int a, b, c, n; n 1526 kernel/sched/topology.c n = sched_max_numa_distance; n 1536 kernel/sched/topology.c if (node_distance(a, b) < n) n 1541 kernel/sched/topology.c if (node_distance(a, c) < n && n 1542 kernel/sched/topology.c node_distance(b, c) < n) { n 2225 kernel/sched/topology.c int i, j, n; n 2238 kernel/sched/topology.c n = 0; n 2241 kernel/sched/topology.c n = 1; n 2246 kernel/sched/topology.c n = ndoms_new; n 2251 kernel/sched/topology.c for (j = 0; j < n && !new_topology; j++) { n 2273 kernel/sched/topology.c n = ndoms_cur; n 2275 kernel/sched/topology.c n = 0; n 2283 kernel/sched/topology.c for (j = 0; j < n && !new_topology; j++) { n 2297 kernel/sched/topology.c for (j = 0; j < n && !sched_energy_update; j++) { n 741 kernel/seccomp.c struct seccomp_knotif n = {}; n 748 kernel/seccomp.c n.task = current; n 749 kernel/seccomp.c n.state = SECCOMP_NOTIFY_INIT; n 750 kernel/seccomp.c n.data = sd; n 751 kernel/seccomp.c n.id = seccomp_next_notify_id(match); n 752 kernel/seccomp.c init_completion(&n.ready); n 753 kernel/seccomp.c list_add(&n.list, &match->notif->notifications); n 762 kernel/seccomp.c err = wait_for_completion_interruptible(&n.ready); n 765 kernel/seccomp.c ret = n.val; n 766 kernel/seccomp.c err = n.error; n 780 kernel/seccomp.c list_del(&n.list); n 491 kernel/signal.c struct sigqueue *q, *n; n 496 kernel/signal.c list_for_each_entry_safe(q, n, &pending->list, list) { n 781 kernel/signal.c struct sigqueue *q, *n; n 789 kernel/signal.c list_for_each_entry_safe(q, n, &s->list, list) { n 813 kernel/time/time.c u64 nsecs_to_jiffies64(u64 n) n 817 kernel/time/time.c return div_u64(n, NSEC_PER_SEC / HZ); n 820 kernel/time/time.c return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); n 826 kernel/time/time.c return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); n 844 kernel/time/time.c unsigned long nsecs_to_jiffies(u64 n) n 846 kernel/time/time.c return (unsigned long)nsecs_to_jiffies64(n); n 154 kernel/time/timer.c #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT) n 155 kernel/time/timer.c #define LVL_GRAN(n) (1UL << LVL_SHIFT(n)) n 161 kernel/time/timer.c #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT)) n 167 kernel/time/timer.c #define LVL_OFFS(n) ((n) * LVL_SIZE) n 149 kernel/trace/blktrace.c int n; n 168 kernel/trace/blktrace.c n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); n 174 kernel/trace/blktrace.c trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, n 177 kernel/trace/blktrace.c trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL); n 1233 kernel/trace/ftrace.c struct ftrace_mod_load *p, *n; n 1240 kernel/trace/ftrace.c list_for_each_entry_safe(p, n, head, list) n 3915 kernel/trace/ftrace.c int n; n 3917 kernel/trace/ftrace.c n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); n 3919 kernel/trace/ftrace.c if (n > sizeof(modname) - 1) n 3929 kernel/trace/ftrace.c struct ftrace_mod_load *ftrace_mod, *n; n 3941 kernel/trace/ftrace.c list_for_each_entry_safe(ftrace_mod, n, head, list) { n 3978 kernel/trace/ftrace.c struct ftrace_mod_load *ftrace_mod, *n; n 3998 kernel/trace/ftrace.c list_for_each_entry_safe(ftrace_mod, n, head, list) { n 4021 kernel/trace/ftrace.c list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { n 4583 kernel/trace/ftrace.c struct ftrace_func_probe *probe, *n; n 4585 kernel/trace/ftrace.c list_for_each_entry_safe(probe, n, &tr->func_probes, list) n 4621 kernel/trace/ftrace.c struct ftrace_func_command *p, *n; n 4625 kernel/trace/ftrace.c list_for_each_entry_safe(p, n, &ftrace_commands, list) { n 5753 kernel/trace/ftrace.c struct ftrace_mod_func *n; n 5756 kernel/trace/ftrace.c list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { n 5768 kernel/trace/ftrace.c struct ftrace_mod_map *n; n 5780 kernel/trace/ftrace.c list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { n 1733 kernel/trace/trace.c struct trace_selftests *p, *n; n 1747 kernel/trace/trace.c list_for_each_entry_safe(p, n, &postponed_selftests, list) { n 36 kernel/trace/trace_dynevent.c struct dyn_event *pos, *n; n 62 kernel/trace/trace_dynevent.c for_each_dyn_event_safe(pos, n) { n 117 kernel/trace/trace_dynevent.h #define for_each_dyn_event_safe(pos, n) \ n 118 kernel/trace/trace_dynevent.h list_for_each_entry_safe(pos, n, &dyn_event_list, list) n 3209 kernel/trace/trace_events_hist.c unsigned int n, i, j; n 3217 kernel/trace/trace_events_hist.c for (n = 0; n < n_keys; n++) { n 3218 kernel/trace/trace_events_hist.c hist_field = hist_data->fields[i + n]; n 3219 kernel/trace/trace_events_hist.c target_hist_field = target_hist_data->fields[j + n]; n 3321 kernel/trace/trace_events_hist.c unsigned int i, n, first = true; n 3427 kernel/trace/trace_events_hist.c n = target_hist_data->n_field_var_hists; n 3428 kernel/trace/trace_events_hist.c target_hist_data->field_var_hists[n] = var_hist; n 5563 kernel/trace/trace_events_hist.c struct event_trigger_data *data, int n) n 5568 kernel/trace/trace_events_hist.c if (n > 0) n 5591 kernel/trace/trace_events_hist.c int n = 0, ret = 0; n 5603 kernel/trace/trace_events_hist.c hist_trigger_show(m, data, n++); n 5704 kernel/trace/trace_events_hist.c unsigned int n = 0; n 5712 kernel/trace/trace_events_hist.c if (n++) n 6176 kernel/trace/trace_events_hist.c struct event_trigger_data *test, *n; n 6186 kernel/trace/trace_events_hist.c list_for_each_entry_safe(test, n, &file->triggers, list) { n 6478 kernel/trace/trace_events_hist.c struct event_trigger_data *test, *n; n 6480 kernel/trace/trace_events_hist.c list_for_each_entry_safe(test, n, &file->triggers, list) { n 348 kernel/trace/trace_events_trigger.c struct event_command *p, *n; n 352 kernel/trace/trace_events_trigger.c list_for_each_entry_safe(p, n, &trigger_commands, list) { n 480 kernel/trace/trace_events_trigger.c struct event_trigger_data *data, *n; n 481 kernel/trace/trace_events_trigger.c list_for_each_entry_safe(data, n, &file->triggers, list) { n 1323 kernel/trace/trace_functions_graph.c int n; n 1325 kernel/trace/trace_functions_graph.c n = sprintf(buf, "%d\n", fgraph_max_depth); n 1327 kernel/trace/trace_functions_graph.c return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); n 83 kernel/trace/trace_kprobe.c #define SIZEOF_TRACE_KPROBE(n) \ n 85 kernel/trace/trace_kprobe.c (sizeof(struct probe_arg) * (n))) n 139 kernel/trace/trace_mmiotrace.c unsigned long n; n 141 kernel/trace/trace_mmiotrace.c n = count_overruns(iter); n 142 kernel/trace/trace_mmiotrace.c if (n) { n 144 kernel/trace/trace_mmiotrace.c trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n); n 379 kernel/trace/trace_stack.c long n = *pos - 1; n 381 kernel/trace/trace_stack.c if (n >= stack_trace_nr_entries) n 384 kernel/trace/trace_stack.c m->private = (void *)n; n 49 kernel/trace/trace_stat.c struct stat_node *snode, *n; n 51 kernel/trace/trace_stat.c rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { n 177 kernel/trace/trace_stat.c int n = *pos; n 185 kernel/trace/trace_stat.c if (n == 0) n 187 kernel/trace/trace_stat.c n--; n 191 kernel/trace/trace_stat.c for (i = 0; node && i < n; i++) n 86 kernel/trace/trace_uprobe.c #define SIZEOF_TRACE_UPROBE(n) \ n 88 kernel/trace/trace_uprobe.c (sizeof(struct probe_arg) * (n))) n 103 kernel/trace/trace_uprobe.c static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) n 105 kernel/trace/trace_uprobe.c return addr - (n * sizeof(long)); n 108 kernel/trace/trace_uprobe.c static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) n 110 kernel/trace/trace_uprobe.c return addr + (n * sizeof(long)); n 114 kernel/trace/trace_uprobe.c static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) n 119 kernel/trace/trace_uprobe.c addr = adjust_stack_addr(addr, n); n 38 kernel/trace/tracing_map.c void tracing_map_update_sum(struct tracing_map_elt *elt, unsigned int i, u64 n) n 40 kernel/trace/tracing_map.c atomic64_add(n, &elt->fields[i].sum); n 70 kernel/trace/tracing_map.c void tracing_map_set_var(struct tracing_map_elt *elt, unsigned int i, u64 n) n 72 kernel/trace/tracing_map.c atomic64_set(&elt->vars[i], n); n 267 kernel/trace/tracing_map.h unsigned int i, u64 n); n 269 kernel/trace/tracing_map.h unsigned int i, u64 n); n 214 kernel/user.c int n; n 219 kernel/user.c for(n = 0; n < UIDHASH_SZ; ++n) n 220 kernel/user.c INIT_HLIST_HEAD(uidhash_table + n); n 1062 kernel/workqueue.c struct work_struct *n; n 1068 kernel/workqueue.c list_for_each_entry_safe_from(work, n, NULL, entry) { n 1080 kernel/workqueue.c *nextp = n; n 2498 kernel/workqueue.c struct work_struct *work, *n; n 2515 kernel/workqueue.c list_for_each_entry_safe(work, n, &pool->worklist, entry) { n 2519 kernel/workqueue.c move_linked_works(work, scheduled, &n); n 5246 kernel/workqueue.c struct apply_wqattrs_ctx *ctx, *n; n 5266 kernel/workqueue.c list_for_each_entry_safe(ctx, n, &ctxs, list) { n 122 lib/842/842_compress.c #define find_index(p, b, n) ({ \ n 124 lib/842/842_compress.c p->index##b[n] = INDEX_NOT_FOUND; \ n 125 lib/842/842_compress.c hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \ n 126 lib/842/842_compress.c if (p->data##b[n] == _n->data) { \ n 127 lib/842/842_compress.c p->index##b[n] = _n->index; \ n 131 lib/842/842_compress.c p->index##b[n] >= 0; \ n 134 lib/842/842_compress.c #define check_index(p, b, n) \ n 135 lib/842/842_compress.c ((p)->index##b[n] == INDEX_NOT_CHECKED \ n 136 lib/842/842_compress.c ? find_index(p, b, n) \ n 137 lib/842/842_compress.c : (p)->index##b[n] >= 0) n 152 lib/842/842_compress.c static int add_bits(struct sw842_param *p, u64 d, u8 n); n 154 lib/842/842_compress.c static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) n 158 lib/842/842_compress.c if (n <= s) n 161 lib/842/842_compress.c ret = add_bits(p, d >> s, n - s); n 167 lib/842/842_compress.c static int add_bits(struct sw842_param *p, u64 d, u8 n) n 169 lib/842/842_compress.c int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits; n 173 lib/842/842_compress.c pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); n 175 lib/842/842_compress.c if (n > 64) n 182 lib/842/842_compress.c return __split_add_bits(p, d, n, 32); n 184 lib/842/842_compress.c return __split_add_bits(p, d, n, 16); n 186 lib/842/842_compress.c return __split_add_bits(p, d, n, 8); n 211 lib/842/842_compress.c p->bit += n; n 65 lib/842/842_decompress.c static int next_bits(struct sw842_param *p, u64 *d, u8 n); n 67 lib/842/842_decompress.c static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s) n 72 lib/842/842_decompress.c if (n <= s) { n 73 lib/842/842_decompress.c pr_debug("split_next_bits invalid n %u s %u\n", n, s); n 77 lib/842/842_decompress.c ret = next_bits(p, &tmp, n - s); n 87 lib/842/842_decompress.c static int next_bits(struct sw842_param *p, u64 *d, u8 n) n 89 lib/842/842_decompress.c u8 *in = p->in, b = p->bit, bits = b + n; n 91 lib/842/842_decompress.c if (n > 64) { n 92 lib/842/842_decompress.c pr_debug("next_bits invalid n %u\n", n); n 100 lib/842/842_decompress.c return __split_next_bits(p, d, n, 32); n 102 lib/842/842_decompress.c return __split_next_bits(p, d, n, 16); n 104 lib/842/842_decompress.c return __split_next_bits(p, d, n, 8); n 118 lib/842/842_decompress.c *d &= GENMASK_ULL(n - 1, 0); n 120 lib/842/842_decompress.c p->bit += n; n 131 lib/842/842_decompress.c static int do_data(struct sw842_param *p, u8 n) n 136 lib/842/842_decompress.c if (n > p->olen) n 139 lib/842/842_decompress.c ret = next_bits(p, &v, n * 8); n 143 lib/842/842_decompress.c switch (n) { n 157 lib/842/842_decompress.c p->out += n; n 158 lib/842/842_decompress.c p->olen -= n; n 212 lib/842/842_decompress.c static int do_index(struct sw842_param *p, u8 n) n 214 lib/842/842_decompress.c switch (n) { n 62 lib/asn1_decoder.c size_t dp = *_dp, len, n; n 107 lib/asn1_decoder.c n = len - 0x80; n 108 lib/asn1_decoder.c if (unlikely(n > sizeof(len) - 1)) n 110 lib/asn1_decoder.c if (unlikely(n > datalen - dp)) n 113 lib/asn1_decoder.c for (; n > 0; n--) { n 270 lib/asn1_decoder.c int n = len - 0x80; n 271 lib/asn1_decoder.c if (unlikely(n > 2)) n 273 lib/asn1_decoder.c if (unlikely(n > datalen - dp)) n 275 lib/asn1_decoder.c hdr += n; n 276 lib/asn1_decoder.c for (len = 0; n > 0; n--) { n 1318 lib/assoc_array.c struct assoc_array_node *n = n 1320 lib/assoc_array.c n->back_pointer = NULL; n 1673 lib/assoc_array.c struct assoc_array_node *n; n 1684 lib/assoc_array.c n = assoc_array_ptr_to_node(new_parent); n 1685 lib/assoc_array.c n->slots[slot] = assoc_array_node_to_ptr(new_n); n 148 lib/atomic64.c s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) n 157 lib/atomic64.c v->counter = n; n 86 lib/bch.c #define GF_N(_p) ((_p)->n) n 268 lib/bch.c const unsigned int n = GF_N(bch); n 269 lib/bch.c while (v >= n) { n 270 lib/bch.c v -= n; n 271 lib/bch.c v = (v & n) + (v >> GF_M(bch)); n 281 lib/bch.c const unsigned int n = GF_N(bch); n 282 lib/bch.c return (v < n) ? v : v-n; n 390 lib/bch.c const unsigned int n = GF_N(bch); n 411 lib/bch.c tmp = a_log(bch, d)+n-a_log(bch, pd); n 558 lib/bch.c int n = 0; n 562 lib/bch.c roots[n++] = mod_s(bch, GF_N(bch)-bch->a_log_tab[poly->c[0]]+ n 564 lib/bch.c return n; n 573 lib/bch.c int n = 0, i, l0, l1, l2; n 600 lib/bch.c roots[n++] = modulo(bch, 2*GF_N(bch)-l1- n 602 lib/bch.c roots[n++] = modulo(bch, 2*GF_N(bch)-l1- n 606 lib/bch.c return n; n 615 lib/bch.c int i, n = 0; n 635 lib/bch.c roots[n++] = a_ilog(bch, tmp[i]); n 639 lib/bch.c return n; n 648 lib/bch.c int i, l, n = 0; n 701 lib/bch.c n = 4; n 703 lib/bch.c return n; n 1006 lib/bch.c if (8*len > (bch->n-bch->ecc_bits)) n 1178 lib/bch.c int n, err = 0; n 1184 lib/bch.c roots = bch_alloc((bch->n+1)*sizeof(*roots), &err); n 1194 lib/bch.c memset(roots , 0, (bch->n+1)*sizeof(*roots)); n 1217 lib/bch.c n = g->deg+1; n 1220 lib/bch.c while (n > 0) { n 1221 lib/bch.c nbits = (n > 32) ? 32 : n; n 1223 lib/bch.c if (g->c[n-1-j]) n 1227 lib/bch.c n -= nbits; n 1312 lib/bch.c bch->n = (1 << m)-1; n 1315 lib/bch.c bch->a_pow_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_pow_tab), &err); n 1316 lib/bch.c bch->a_log_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_log_tab), &err); n 538 lib/bitmap.c unsigned long long n; n 541 lib/bitmap.c len = _parse_integer(str, 10, &n); n 544 lib/bitmap.c if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n) n 547 lib/bitmap.c *num = n; n 803 lib/bitmap.c int n = bitmap_pos_to_ord(old, oldbit, nbits); n 805 lib/bitmap.c if (n < 0 || w == 0) n 808 lib/bitmap.c set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst); n 842 lib/bitmap.c int n = bitmap_pos_to_ord(old, oldbit, bits); n 843 lib/bitmap.c if (n < 0 || w == 0) n 846 lib/bitmap.c return bitmap_ord_to_pos(new, n % w, bits); n 958 lib/bitmap.c unsigned int n, m; /* same meaning as in above comment */ n 975 lib/bitmap.c for_each_set_bit(n, relmap, bits) { n 978 lib/bitmap.c set_bit(n, dst); n 103 lib/btree.c static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n) n 107 lib/btree.c for (i = 0; i < n; i++) { n 117 lib/btree.c size_t n) n 121 lib/btree.c for (i = 0; i < n; i++) n 126 lib/btree.c static unsigned long *longset(unsigned long *s, unsigned long c, size_t n) n 130 lib/btree.c for (i = 0; i < n; i++) n 148 lib/btree.c static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n) n 150 lib/btree.c return &node[n * geo->keylen]; n 153 lib/btree.c static void *bval(struct btree_geo *geo, unsigned long *node, int n) n 155 lib/btree.c return (void *)node[geo->no_longs + n]; n 158 lib/btree.c static void setkey(struct btree_geo *geo, unsigned long *node, int n, n 161 lib/btree.c longcpy(bkey(geo, node, n), key, geo->keylen); n 164 lib/btree.c static void setval(struct btree_geo *geo, unsigned long *node, int n, n 167 lib/btree.c node[geo->no_longs + n] = (unsigned long) val; n 170 lib/btree.c static void clearpair(struct btree_geo *geo, unsigned long *node, int n) n 172 lib/btree.c longset(bkey(geo, node, n), 0, geo->keylen); n 173 lib/btree.c node[geo->no_longs + n] = 0; n 23 lib/cmdline.c static int get_range(char **str, int *pint, int n) n 30 lib/cmdline.c for (x = *pint; n && x < upper_range; x++, n--) n 17 lib/cpumask.c unsigned int cpumask_next(int n, const struct cpumask *srcp) n 20 lib/cpumask.c if (n != -1) n 21 lib/cpumask.c cpumask_check(n); n 22 lib/cpumask.c return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); n 34 lib/cpumask.c int cpumask_next_and(int n, const struct cpumask *src1p, n 38 lib/cpumask.c if (n != -1) n 39 lib/cpumask.c cpumask_check(n); n 41 lib/cpumask.c nr_cpumask_bits, n + 1); n 77 lib/cpumask.c int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) n 82 lib/cpumask.c next = cpumask_next(n, mask); n 84 lib/cpumask.c if (wrap && n < start && next >= start) { n 89 lib/cpumask.c n = -1; n 772 lib/dynamic_debug.c int n = *pos; n 778 lib/dynamic_debug.c if (!n) n 780 lib/dynamic_debug.c if (n < 0) n 783 lib/dynamic_debug.c while (dp != NULL && --n > 0) n 883 lib/dynamic_debug.c int ddebug_add_module(struct _ddebug *tab, unsigned int n, n 900 lib/dynamic_debug.c dt->num_ddebugs = n; n 907 lib/dynamic_debug.c vpr_info("%u debug prints in module %s\n", n, dt->mod_name); n 1013 lib/dynamic_debug.c int n = 0, entries = 0, modct = 0; n 1030 lib/dynamic_debug.c ret = ddebug_add_module(iter_start, n, modname); n 1033 lib/dynamic_debug.c n = 0; n 1037 lib/dynamic_debug.c n++; n 1039 lib/dynamic_debug.c ret = ddebug_add_module(iter_start, n, modname); n 112 lib/error-inject.c struct ei_entry *ent, *n; n 118 lib/error-inject.c list_for_each_entry_safe(ent, n, &error_injection_list, list) { n 71 lib/fault-inject.c int n, nr_entries; n 78 lib/fault-inject.c for (n = 0; n < nr_entries; n++) { n 79 lib/fault-inject.c if (attr->reject_start <= entries[n] && n 80 lib/fault-inject.c entries[n] < attr->reject_end) n 82 lib/fault-inject.c if (attr->require_start <= entries[n] && n 83 lib/fault-inject.c entries[n] < attr->require_end) n 57 lib/generic-radix-tree.c struct genradix_node *n = genradix_root_to_node(r); n 64 lib/generic-radix-tree.c if (!n) n 71 lib/generic-radix-tree.c n = n->children[offset >> genradix_depth_shift(level)]; n 75 lib/generic-radix-tree.c return &n->data[offset]; n 108 lib/generic-radix-tree.c struct genradix_node *n, *new_node = NULL; n 115 lib/generic-radix-tree.c n = genradix_root_to_node(r); n 118 lib/generic-radix-tree.c if (n && ilog2(offset) < genradix_depth_shift(level)) n 127 lib/generic-radix-tree.c new_node->children[0] = n; n 129 lib/generic-radix-tree.c ((unsigned long) new_node | (n ? level + 1 : 0))); n 139 lib/generic-radix-tree.c &n->children[offset >> genradix_depth_shift(level)]; n 142 lib/generic-radix-tree.c n = READ_ONCE(*p); n 143 lib/generic-radix-tree.c if (!n) { n 150 lib/generic-radix-tree.c if (!(n = cmpxchg_release(p, NULL, new_node))) n 151 lib/generic-radix-tree.c swap(n, new_node); n 158 lib/generic-radix-tree.c return &n->data[offset]; n 167 lib/generic-radix-tree.c struct genradix_node *n; n 174 lib/generic-radix-tree.c n = genradix_root_to_node(r); n 186 lib/generic-radix-tree.c while (!n->children[i]) { n 197 lib/generic-radix-tree.c n = n->children[i]; n 200 lib/generic-radix-tree.c return &n->data[iter->offset & (PAGE_SIZE - 1)]; n 204 lib/generic-radix-tree.c static void genradix_free_recurse(struct genradix_node *n, unsigned level) n 210 lib/generic-radix-tree.c if (n->children[i]) n 211 lib/generic-radix-tree.c genradix_free_recurse(n->children[i], level - 1); n 214 lib/generic-radix-tree.c genradix_free_node(n); n 132 lib/globtest.c unsigned n = 0; n 150 lib/globtest.c n++; n 153 lib/globtest.c n -= successes; n 154 lib/globtest.c printk(message, successes, n); n 157 lib/globtest.c return n ? -ECANCELED : 0; n 143 lib/inflate.c ush n; /* literal, length base, or distance base */ n 234 lib/inflate.c #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} n 235 lib/inflate.c #define DUMPBITS(n) {b>>=(n);k-=(n);} n 324 lib/inflate.c unsigned n, /* number of codes (assumed <= N_MAX) */ n 375 lib/inflate.c p = b; i = n; n 377 lib/inflate.c Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), n 378 lib/inflate.c n-i, *p)); n 382 lib/inflate.c if (c[0] == n) /* null input--all zero length codes */ n 438 lib/inflate.c } while (++i < n); n 439 lib/inflate.c n = x[g]; /* set n to length of v */ n 519 lib/inflate.c if (p >= v + n) n 524 lib/inflate.c r.v.n = (ush)(*p); /* simple code is just the value */ n 530 lib/inflate.c r.v.n = d[*p++ - s]; n 599 lib/inflate.c unsigned n, d; /* length and index for copy */ n 629 lib/inflate.c slide[w++] = (uch)t->v.n; n 645 lib/inflate.c n = t->v.n + ((unsigned)b & mask_bits[e]); n 660 lib/inflate.c d = w - t->v.n - ((unsigned)b & mask_bits[e]); n 662 lib/inflate.c Tracevv((stderr,"\\[%d,%d]", w-d, n)); n 666 lib/inflate.c n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); n 685 lib/inflate.c } while (n); n 707 lib/inflate.c unsigned n; /* number of bytes in block */ n 721 lib/inflate.c n = k & 7; n 722 lib/inflate.c DUMPBITS(n); n 727 lib/inflate.c n = ((unsigned)b & 0xffff); n 730 lib/inflate.c if (n != (unsigned)((~b) & 0xffff)) n 736 lib/inflate.c while (n--) n 836 lib/inflate.c unsigned n; /* number of lengths to get */ n 912 lib/inflate.c n = nl + nd; n 915 lib/inflate.c while ((unsigned)i < n) n 920 lib/inflate.c j = td->v.n; n 928 lib/inflate.c if ((unsigned)i + j > n) { n 940 lib/inflate.c if ((unsigned)i + j > n) { n 953 lib/inflate.c if ((unsigned)i + j > n) { n 14 lib/iov_iter.c #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ n 16 lib/iov_iter.c size_t wanted = n; \ n 18 lib/iov_iter.c __v.iov_len = min(n, __p->iov_len - skip); \ n 24 lib/iov_iter.c n -= __v.iov_len; \ n 28 lib/iov_iter.c while (unlikely(!left && n)) { \ n 30 lib/iov_iter.c __v.iov_len = min(n, __p->iov_len); \ n 37 lib/iov_iter.c n -= __v.iov_len; \ n 39 lib/iov_iter.c n = wanted - n; \ n 42 lib/iov_iter.c #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ n 43 lib/iov_iter.c size_t wanted = n; \ n 45 lib/iov_iter.c __v.iov_len = min(n, __p->iov_len - skip); \ n 50 lib/iov_iter.c n -= __v.iov_len; \ n 52 lib/iov_iter.c while (unlikely(n)) { \ n 54 lib/iov_iter.c __v.iov_len = min(n, __p->iov_len); \ n 60 lib/iov_iter.c n -= __v.iov_len; \ n 62 lib/iov_iter.c n = wanted; \ n 65 lib/iov_iter.c #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ n 67 lib/iov_iter.c __start.bi_size = n; \ n 77 lib/iov_iter.c #define iterate_all_kinds(i, n, v, I, B, K) { \ n 78 lib/iov_iter.c if (likely(n)) { \ n 83 lib/iov_iter.c iterate_bvec(i, n, v, __bi, skip, (B)) \ n 87 lib/iov_iter.c iterate_kvec(i, n, v, kvec, skip, (K)) \ n 92 lib/iov_iter.c iterate_iovec(i, n, v, iov, skip, (I)) \ n 97 lib/iov_iter.c #define iterate_and_advance(i, n, v, I, B, K) { \ n 98 lib/iov_iter.c if (unlikely(i->count < n)) \ n 99 lib/iov_iter.c n = i->count; \ n 106 lib/iov_iter.c iterate_bvec(i, n, v, __bi, skip, (B)) \ n 113 lib/iov_iter.c iterate_kvec(i, n, v, kvec, skip, (K)) \ n 121 lib/iov_iter.c skip += n; \ n 125 lib/iov_iter.c iterate_iovec(i, n, v, iov, skip, (I)) \ n 133 lib/iov_iter.c i->count -= n; \ n 138 lib/iov_iter.c static int copyout(void __user *to, const void *from, size_t n) n 140 lib/iov_iter.c if (access_ok(to, n)) { n 141 lib/iov_iter.c kasan_check_read(from, n); n 142 lib/iov_iter.c n = raw_copy_to_user(to, from, n); n 144 lib/iov_iter.c return n; n 147 lib/iov_iter.c static int copyin(void *to, const void __user *from, size_t n) n 149 lib/iov_iter.c if (access_ok(from, n)) { n 150 lib/iov_iter.c kasan_check_write(to, n); n 151 lib/iov_iter.c n = raw_copy_from_user(to, from, n); n 153 lib/iov_iter.c return n; n 544 lib/iov_iter.c size_t n, off; n 550 lib/iov_iter.c bytes = n = push_pipe(i, bytes, &idx, &off); n 551 lib/iov_iter.c if (unlikely(!n)) n 553 lib/iov_iter.c for ( ; n; idx = next_idx(idx, pipe), off = 0) { n 554 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - off); n 558 lib/iov_iter.c n -= chunk; n 576 lib/iov_iter.c size_t n, r; n 584 lib/iov_iter.c bytes = n = push_pipe(i, bytes, &idx, &r); n 585 lib/iov_iter.c if (unlikely(!n)) n 587 lib/iov_iter.c for ( ; n; idx = next_idx(idx, pipe), r = 0) { n 588 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - r); n 594 lib/iov_iter.c n -= chunk; n 622 lib/iov_iter.c static int copyout_mcsafe(void __user *to, const void *from, size_t n) n 624 lib/iov_iter.c if (access_ok(to, n)) { n 625 lib/iov_iter.c kasan_check_read(from, n); n 626 lib/iov_iter.c n = copy_to_user_mcsafe((__force void *) to, from, n); n 628 lib/iov_iter.c return n; n 648 lib/iov_iter.c size_t n, off, xfer = 0; n 654 lib/iov_iter.c bytes = n = push_pipe(i, bytes, &idx, &off); n 655 lib/iov_iter.c if (unlikely(!n)) n 657 lib/iov_iter.c for ( ; n; idx = next_idx(idx, pipe), off = 0) { n 658 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - off); n 668 lib/iov_iter.c n -= chunk; n 863 lib/iov_iter.c static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) n 866 lib/iov_iter.c size_t v = n + offset; n 875 lib/iov_iter.c if (n <= v && v <= PAGE_SIZE) n 881 lib/iov_iter.c if (likely(n <= v && v <= (page_size(head)))) n 928 lib/iov_iter.c size_t n, off; n 934 lib/iov_iter.c bytes = n = push_pipe(i, bytes, &idx, &off); n 935 lib/iov_iter.c if (unlikely(!n)) n 938 lib/iov_iter.c for ( ; n; idx = next_idx(idx, pipe), off = 0) { n 939 lib/iov_iter.c size_t chunk = min_t(size_t, n, PAGE_SIZE - off); n 943 lib/iov_iter.c n -= chunk; n 1059 lib/iov_iter.c size_t n = off - pipe->bufs[idx].offset; n 1060 lib/iov_iter.c if (unroll < n) { n 1064 lib/iov_iter.c unroll -= n; n 1088 lib/iov_iter.c size_t n = (--bvec)->bv_len; n 1090 lib/iov_iter.c if (unroll <= n) { n 1092 lib/iov_iter.c i->iov_offset = n - unroll; n 1095 lib/iov_iter.c unroll -= n; n 1100 lib/iov_iter.c size_t n = (--iov)->iov_len; n 1102 lib/iov_iter.c if (unroll <= n) { n 1104 lib/iov_iter.c i->iov_offset = n - unroll; n 1107 lib/iov_iter.c unroll -= n; n 1238 lib/iov_iter.c ssize_t n = push_pipe(i, maxsize, &idx, start); n 1239 lib/iov_iter.c if (!n) n 1242 lib/iov_iter.c maxsize = n; n 1243 lib/iov_iter.c n += *start; n 1244 lib/iov_iter.c while (n > 0) { n 1247 lib/iov_iter.c n -= PAGE_SIZE; n 1290 lib/iov_iter.c int n; n 1296 lib/iov_iter.c n = DIV_ROUND_UP(len, PAGE_SIZE); n 1297 lib/iov_iter.c res = get_user_pages_fast(addr, n, n 1302 lib/iov_iter.c return (res == n ? len : res * PAGE_SIZE) - *start; n 1316 lib/iov_iter.c static struct page **get_pages_array(size_t n) n 1318 lib/iov_iter.c return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); n 1326 lib/iov_iter.c ssize_t n; n 1339 lib/iov_iter.c n = npages * PAGE_SIZE - *start; n 1340 lib/iov_iter.c if (maxsize > n) n 1341 lib/iov_iter.c maxsize = n; n 1347 lib/iov_iter.c n = __pipe_get_pages(i, maxsize, p, idx, start); n 1348 lib/iov_iter.c if (n > 0) n 1352 lib/iov_iter.c return n; n 1372 lib/iov_iter.c int n; n 1376 lib/iov_iter.c n = DIV_ROUND_UP(len, PAGE_SIZE); n 1377 lib/iov_iter.c p = get_pages_array(n); n 1380 lib/iov_iter.c res = get_user_pages_fast(addr, n, n 1387 lib/iov_iter.c return (res == n ? len : res * PAGE_SIZE) - *start; n 1643 lib/iov_iter.c ssize_t n; n 1645 lib/iov_iter.c n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, n 1647 lib/iov_iter.c if (n < 0) { n 1651 lib/iov_iter.c return n; n 1653 lib/iov_iter.c iov_iter_init(i, type, p, nr_segs, n); n 1655 lib/iov_iter.c return n; n 1667 lib/iov_iter.c ssize_t n; n 1669 lib/iov_iter.c n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, n 1671 lib/iov_iter.c if (n < 0) { n 1675 lib/iov_iter.c return n; n 1677 lib/iov_iter.c iov_iter_init(i, type, p, nr_segs, n); n 1679 lib/iov_iter.c return n; n 298 lib/kfifo.c int n; n 309 lib/kfifo.c n = 0; n 323 lib/kfifo.c if (++n == nents || sgl == NULL) n 324 lib/kfifo.c return n; n 331 lib/kfifo.c return n + 1; n 340 lib/kfifo.c unsigned int n; n 350 lib/kfifo.c n = setup_sgl_buf(sgl, fifo->data + off, nents, l); n 351 lib/kfifo.c n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); n 353 lib/kfifo.c return n; n 421 lib/kfifo.c static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize) n 426 lib/kfifo.c __KFIFO_POKE(data, fifo->in, mask, n); n 429 lib/kfifo.c __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8); n 453 lib/kfifo.c void *buf, unsigned int len, size_t recsize, unsigned int *n) n 455 lib/kfifo.c *n = __kfifo_peek_n(fifo, recsize); n 457 lib/kfifo.c if (len > *n) n 458 lib/kfifo.c len = *n; n 467 lib/kfifo.c unsigned int n; n 472 lib/kfifo.c return kfifo_out_copy_r(fifo, buf, len, recsize, &n); n 479 lib/kfifo.c unsigned int n; n 484 lib/kfifo.c len = kfifo_out_copy_r(fifo, buf, len, recsize, &n); n 485 lib/kfifo.c fifo->out += n + recsize; n 492 lib/kfifo.c unsigned int n; n 494 lib/kfifo.c n = __kfifo_peek_n(fifo, recsize); n 495 lib/kfifo.c fifo->out += n + recsize; n 527 lib/kfifo.c unsigned int n; n 534 lib/kfifo.c n = __kfifo_peek_n(fifo, recsize); n 535 lib/kfifo.c if (len > n) n 536 lib/kfifo.c len = n; n 543 lib/kfifo.c fifo->out += n + recsize; n 94 lib/klist.c static void add_head(struct klist *k, struct klist_node *n) n 97 lib/klist.c list_add(&n->n_node, &k->k_list); n 101 lib/klist.c static void add_tail(struct klist *k, struct klist_node *n) n 104 lib/klist.c list_add_tail(&n->n_node, &k->k_list); n 108 lib/klist.c static void klist_node_init(struct klist *k, struct klist_node *n) n 110 lib/klist.c INIT_LIST_HEAD(&n->n_node); n 111 lib/klist.c kref_init(&n->n_ref); n 112 lib/klist.c knode_set_klist(n, k); n 114 lib/klist.c k->get(n); n 122 lib/klist.c void klist_add_head(struct klist_node *n, struct klist *k) n 124 lib/klist.c klist_node_init(k, n); n 125 lib/klist.c add_head(k, n); n 134 lib/klist.c void klist_add_tail(struct klist_node *n, struct klist *k) n 136 lib/klist.c klist_node_init(k, n); n 137 lib/klist.c add_tail(k, n); n 146 lib/klist.c void klist_add_behind(struct klist_node *n, struct klist_node *pos) n 150 lib/klist.c klist_node_init(k, n); n 152 lib/klist.c list_add(&n->n_node, &pos->n_node); n 162 lib/klist.c void klist_add_before(struct klist_node *n, struct klist_node *pos) n 166 lib/klist.c klist_node_init(k, n); n 168 lib/klist.c list_add_tail(&n->n_node, &pos->n_node); n 186 lib/klist.c struct klist_node *n = container_of(kref, struct klist_node, n_ref); n 188 lib/klist.c WARN_ON(!knode_dead(n)); n 189 lib/klist.c list_del(&n->n_node); n 192 lib/klist.c if (waiter->node != n) n 201 lib/klist.c knode_set_klist(n, NULL); n 204 lib/klist.c static int klist_dec_and_del(struct klist_node *n) n 206 lib/klist.c return kref_put(&n->n_ref, klist_release); n 209 lib/klist.c static void klist_put(struct klist_node *n, bool kill) n 211 lib/klist.c struct klist *k = knode_klist(n); n 216 lib/klist.c knode_kill(n); n 217 lib/klist.c if (!klist_dec_and_del(n)) n 221 lib/klist.c put(n); n 228 lib/klist.c void klist_del(struct klist_node *n) n 230 lib/klist.c klist_put(n, true); n 238 lib/klist.c void klist_remove(struct klist_node *n) n 242 lib/klist.c waiter.node = n; n 249 lib/klist.c klist_del(n); n 265 lib/klist.c int klist_node_attached(struct klist_node *n) n 267 lib/klist.c return (n->n_klist != NULL); n 281 lib/klist.c struct klist_node *n) n 285 lib/klist.c if (n && kref_get_unless_zero(&n->n_ref)) n 286 lib/klist.c i->i_cur = n; n 320 lib/klist.c static struct klist_node *to_klist_node(struct list_head *n) n 322 lib/klist.c return container_of(n, struct klist_node, n_node); n 323 lib/lru_cache.c struct list_head *n; n 327 lib/lru_cache.c n = lc->free.next; n 329 lib/lru_cache.c n = lc->lru.prev; n 333 lib/lru_cache.c e = list_entry(n, struct lc_element, list); n 29 lib/math/div64.c uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) n 31 lib/math/div64.c uint64_t rem = *n; n 58 lib/math/div64.c *n = res; n 112 lib/math/div64.c int n = fls(high); n 113 lib/math/div64.c quot = div_u64(dividend >> n, divisor >> n); n 150 lib/math/div64.c int n = fls(high); n 151 lib/math/div64.c quot = div_u64(dividend >> n, divisor >> n); n 36 lib/math/rational.c unsigned long n, d, n0, d0, n1, d1; n 37 lib/math/rational.c n = given_numerator; n 51 lib/math/rational.c a = n / d; n 52 lib/math/rational.c d = n % d; n 53 lib/math/rational.c n = t; n 41 lib/mpi/mpi-bit.c unsigned n; n 48 lib/mpi/mpi-bit.c n = count_leading_zeros(alimb); n 50 lib/mpi/mpi-bit.c n = BITS_PER_MPI_LIMB; n 51 lib/mpi/mpi-bit.c n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB; n 53 lib/mpi/mpi-bit.c n = 0; n 54 lib/mpi/mpi-bit.c return n; n 56 lib/mpi/mpi-internal.h #define MPN_COPY(d, s, n) \ n 59 lib/mpi/mpi-internal.h for (_i = 0; _i < (n); _i++) \ n 63 lib/mpi/mpi-internal.h #define MPN_COPY_DECR(d, s, n) \ n 66 lib/mpi/mpi-internal.h for (_i = (n)-1; _i >= 0; _i--) \ n 71 lib/mpi/mpi-internal.h #define MPN_ZERO(d, n) \ n 74 lib/mpi/mpi-internal.h for (_i = 0; _i < (n); _i++) \ n 78 lib/mpi/mpi-internal.h #define MPN_NORMALIZE(d, n) \ n 80 lib/mpi/mpi-internal.h while ((n) > 0) { \ n 81 lib/mpi/mpi-internal.h if ((d)[(n)-1]) \ n 83 lib/mpi/mpi-internal.h (n)--; \ n 154 lib/mpi/mpicoder.c unsigned int n = mpi_get_size(a); n 165 lib/mpi/mpicoder.c if (buf_len < n - lzeros) { n 166 lib/mpi/mpicoder.c *nbytes = n - lzeros; n 171 lib/mpi/mpicoder.c *nbytes = n - lzeros; n 206 lib/mpi/mpicoder.c unsigned int n; n 212 lib/mpi/mpicoder.c n = mpi_get_size(a); n 214 lib/mpi/mpicoder.c if (!n) n 215 lib/mpi/mpicoder.c n++; n 217 lib/mpi/mpicoder.c buf = kmalloc(n, GFP_KERNEL); n 222 lib/mpi/mpicoder.c ret = mpi_read_buffer(a, buf, n, nbytes, sign); n 258 lib/mpi/mpicoder.c unsigned int n = mpi_get_size(a); n 266 lib/mpi/mpicoder.c if (nbytes < n) n 278 lib/mpi/mpicoder.c while (nbytes > n) { n 279 lib/mpi/mpicoder.c i = min_t(unsigned, nbytes - n, buf_len); n 440 lib/nlattr.c nla_policy_len(const struct nla_policy *p, int n) n 444 lib/nlattr.c for (i = 0; i < n; i++, p++) { n 110 lib/oid_registry.c unsigned char n; n 117 lib/oid_registry.c n = *v++; n 118 lib/oid_registry.c ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); n 126 lib/oid_registry.c n = *v++; n 127 lib/oid_registry.c if (!(n & 0x80)) { n 128 lib/oid_registry.c num = n; n 130 lib/oid_registry.c num = n & 0x7f; n 134 lib/oid_registry.c n = *v++; n 136 lib/oid_registry.c num |= n & 0x7f; n 137 lib/oid_registry.c } while (n & 0x80); n 33 lib/plist.c struct list_head *n) n 35 lib/plist.c WARN(n->prev != p || p->next != n, n 41 lib/plist.c n, n->next, n->prev); n 468 lib/rbtree.c struct rb_node *n; n 470 lib/rbtree.c n = root->rb_node; n 471 lib/rbtree.c if (!n) n 473 lib/rbtree.c while (n->rb_left) n 474 lib/rbtree.c n = n->rb_left; n 475 lib/rbtree.c return n; n 481 lib/rbtree.c struct rb_node *n; n 483 lib/rbtree.c n = root->rb_node; n 484 lib/rbtree.c if (!n) n 486 lib/rbtree.c while (n->rb_right) n 487 lib/rbtree.c n = n->rb_right; n 488 lib/rbtree.c return n; n 173 lib/rbtree_test.c struct test_node *cur, *n; n 175 lib/rbtree_test.c rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb) n 58 lib/sort.c static void swap_words_32(void *a, void *b, size_t n) n 61 lib/sort.c u32 t = *(u32 *)(a + (n -= 4)); n 62 lib/sort.c *(u32 *)(a + n) = *(u32 *)(b + n); n 63 lib/sort.c *(u32 *)(b + n) = t; n 64 lib/sort.c } while (n); n 83 lib/sort.c static void swap_words_64(void *a, void *b, size_t n) n 87 lib/sort.c u64 t = *(u64 *)(a + (n -= 8)); n 88 lib/sort.c *(u64 *)(a + n) = *(u64 *)(b + n); n 89 lib/sort.c *(u64 *)(b + n) = t; n 92 lib/sort.c u32 t = *(u32 *)(a + (n -= 4)); n 93 lib/sort.c *(u32 *)(a + n) = *(u32 *)(b + n); n 94 lib/sort.c *(u32 *)(b + n) = t; n 96 lib/sort.c t = *(u32 *)(a + (n -= 4)); n 97 lib/sort.c *(u32 *)(a + n) = *(u32 *)(b + n); n 98 lib/sort.c *(u32 *)(b + n) = t; n 100 lib/sort.c } while (n); n 111 lib/sort.c static void swap_bytes(void *a, void *b, size_t n) n 114 lib/sort.c char t = ((char *)a)[--n]; n 115 lib/sort.c ((char *)a)[n] = ((char *)b)[n]; n 116 lib/sort.c ((char *)b)[n] = t; n 117 lib/sort.c } while (n); n 210 lib/sort.c size_t n = num * size, a = (num/2) * size; n 237 lib/sort.c else if (n -= size) /* Sorting: Extract root to --n */ n 238 lib/sort.c do_swap(base, base + n, size, swap_func); n 254 lib/sort.c for (b = a; c = 2*b + size, (d = c + size) < n;) n 256 lib/sort.c if (d == n) /* Special case last leaf with no sibling */ n 167 lib/stackdepot.c unsigned int n) n 169 lib/stackdepot.c for ( ; n-- ; u1++, u2++) { n 688 lib/string.c int match_string(const char * const *array, size_t n, const char *string) n 693 lib/string.c for (index = 0; index < n; index++) { n 714 lib/string.c int __sysfs_match_string(const char * const *array, size_t n, const char *str) n 719 lib/string.c for (index = 0; index < n; index++) { n 997 lib/string.c void *memchr(const void *s, int c, size_t n) n 1000 lib/string.c while (n-- != 0) { n 160 lib/test_vmalloc.c unsigned int n; n 165 lib/test_vmalloc.c get_random_bytes(&n, sizeof(i)); n 166 lib/test_vmalloc.c n = (n % 100) + 1; n 168 lib/test_vmalloc.c p = vmalloc(n * PAGE_SIZE); n 365 lib/test_vmalloc.c static void shuffle_array(int *arr, int n) n 370 lib/test_vmalloc.c for (i = n - 1; i > 0; i--) { n 550 lib/test_xarray.c int n = 0; n 557 lib/test_xarray.c n++; n 559 lib/test_xarray.c XA_BUG_ON(xa, n != 1); n 563 lib/test_xarray.c n++; n 565 lib/test_xarray.c XA_BUG_ON(xa, n != 2); n 8 lib/usercopy.c unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) n 10 lib/usercopy.c unsigned long res = n; n 12 lib/usercopy.c if (likely(access_ok(from, n))) { n 13 lib/usercopy.c kasan_check_write(to, n); n 14 lib/usercopy.c res = raw_copy_from_user(to, from, n); n 17 lib/usercopy.c memset(to + (n - res), 0, res); n 24 lib/usercopy.c unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) n 27 lib/usercopy.c if (likely(access_ok(to, n))) { n 28 lib/usercopy.c kasan_check_read(from, n); n 29 lib/usercopy.c n = raw_copy_to_user(to, from, n); n 31 lib/usercopy.c return n; n 253 lib/vsprintf.c char *put_dec(char *buf, unsigned long long n) n 255 lib/vsprintf.c if (n >= 100*1000*1000) n 256 lib/vsprintf.c buf = put_dec_full8(buf, do_div(n, 100*1000*1000)); n 258 lib/vsprintf.c if (n >= 100*1000*1000) n 259 lib/vsprintf.c buf = put_dec_full8(buf, do_div(n, 100*1000*1000)); n 261 lib/vsprintf.c return put_dec_trunc8(buf, n); n 301 lib/vsprintf.c char *put_dec(char *buf, unsigned long long n) n 305 lib/vsprintf.c if (n < 100*1000*1000) n 306 lib/vsprintf.c return put_dec_trunc8(buf, n); n 308 lib/vsprintf.c d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */ n 309 lib/vsprintf.c h = (n >> 32); n 315 lib/vsprintf.c q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); n 577 lib/vsprintf.c char *widen_string(char *buf, int n, char *end, struct printf_spec spec) n 581 lib/vsprintf.c if (likely(n >= spec.field_width)) n 584 lib/vsprintf.c spaces = spec.field_width - n; n 586 lib/vsprintf.c move_right(buf - n, end, n, spaces); n 837 lib/vsprintf.c int i, n; n 864 lib/vsprintf.c for (n = 0; n != spec.precision; n++, buf++) { n 876 lib/vsprintf.c return widen_string(buf, n, end, spec); n 1897 lib/xarray.c unsigned long max, unsigned int n) n 1907 lib/xarray.c if (i == n) n 1916 lib/xarray.c unsigned long max, unsigned int n, xa_mark_t mark) n 1926 lib/xarray.c if (i == n) n 1963 lib/xarray.c unsigned long max, unsigned int n, xa_mark_t filter) n 1967 lib/xarray.c if (!n) n 1971 lib/xarray.c return xas_extract_marked(&xas, dst, max, n, filter); n 1972 lib/xarray.c return xas_extract_present(&xas, dst, max, n); n 737 lib/zlib_deflate/deflate.c register unsigned n, m; n 772 lib/zlib_deflate/deflate.c n = s->hash_size; n 773 lib/zlib_deflate/deflate.c p = &s->head[n]; n 777 lib/zlib_deflate/deflate.c } while (--n); n 779 lib/zlib_deflate/deflate.c n = wsize; n 780 lib/zlib_deflate/deflate.c p = &s->prev[n]; n 787 lib/zlib_deflate/deflate.c } while (--n); n 805 lib/zlib_deflate/deflate.c n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); n 806 lib/zlib_deflate/deflate.c s->lookahead += n; n 228 lib/zlib_deflate/deftree.c int n; /* iterates over tree elements */ n 242 lib/zlib_deflate/deftree.c for (n = 0; n < (1<<extra_lbits[code]); n++) { n 257 lib/zlib_deflate/deftree.c for (n = 0; n < (1<<extra_dbits[code]); n++) { n 265 lib/zlib_deflate/deftree.c for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { n 273 lib/zlib_deflate/deftree.c n = 0; n 274 lib/zlib_deflate/deftree.c while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; n 275 lib/zlib_deflate/deftree.c while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; n 276 lib/zlib_deflate/deftree.c while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; n 277 lib/zlib_deflate/deftree.c while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; n 285 lib/zlib_deflate/deftree.c for (n = 0; n < D_CODES; n++) { n 286 lib/zlib_deflate/deftree.c static_dtree[n].Len = 5; n 287 lib/zlib_deflate/deftree.c static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5); n 330 lib/zlib_deflate/deftree.c int n; /* iterates over tree elements */ n 333 lib/zlib_deflate/deftree.c for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; n 334 lib/zlib_deflate/deftree.c for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; n 335 lib/zlib_deflate/deftree.c for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; n 361 lib/zlib_deflate/deftree.c #define smaller(tree, n, m, depth) \ n 362 lib/zlib_deflate/deftree.c (tree[n].Freq < tree[m].Freq || \ n 363 lib/zlib_deflate/deftree.c (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) n 419 lib/zlib_deflate/deftree.c int n, m; /* iterate over the tree elements */ n 433 lib/zlib_deflate/deftree.c n = s->heap[h]; n 434 lib/zlib_deflate/deftree.c bits = tree[tree[n].Dad].Len + 1; n 436 lib/zlib_deflate/deftree.c tree[n].Len = (ush)bits; n 439 lib/zlib_deflate/deftree.c if (n > max_code) continue; /* not a leaf node */ n 443 lib/zlib_deflate/deftree.c if (n >= base) xbits = extra[n-base]; n 444 lib/zlib_deflate/deftree.c f = tree[n].Freq; n 446 lib/zlib_deflate/deftree.c if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); n 472 lib/zlib_deflate/deftree.c n = s->bl_count[bits]; n 473 lib/zlib_deflate/deftree.c while (n != 0) { n 482 lib/zlib_deflate/deftree.c n--; n 504 lib/zlib_deflate/deftree.c int n; /* code index */ n 519 lib/zlib_deflate/deftree.c for (n = 0; n <= max_code; n++) { n 520 lib/zlib_deflate/deftree.c int len = tree[n].Len; n 523 lib/zlib_deflate/deftree.c tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len); n 526 lib/zlib_deflate/deftree.c n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); n 546 lib/zlib_deflate/deftree.c int n, m; /* iterate over heap elements */ n 556 lib/zlib_deflate/deftree.c for (n = 0; n < elems; n++) { n 557 lib/zlib_deflate/deftree.c if (tree[n].Freq != 0) { n 558 lib/zlib_deflate/deftree.c s->heap[++(s->heap_len)] = max_code = n; n 559 lib/zlib_deflate/deftree.c s->depth[n] = 0; n 561 lib/zlib_deflate/deftree.c tree[n].Len = 0; n 582 lib/zlib_deflate/deftree.c for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); n 589 lib/zlib_deflate/deftree.c pqremove(s, tree, n); /* n = node of least frequency */ n 592 lib/zlib_deflate/deftree.c s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ n 596 lib/zlib_deflate/deftree.c tree[node].Freq = tree[n].Freq + tree[m].Freq; n 597 lib/zlib_deflate/deftree.c s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1); n 598 lib/zlib_deflate/deftree.c tree[n].Dad = tree[m].Dad = (ush)node; n 602 lib/zlib_deflate/deftree.c node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); n 632 lib/zlib_deflate/deftree.c int n; /* iterates over all tree elements */ n 643 lib/zlib_deflate/deftree.c for (n = 0; n <= max_code; n++) { n 644 lib/zlib_deflate/deftree.c curlen = nextlen; nextlen = tree[n+1].Len; n 678 lib/zlib_deflate/deftree.c int n; /* iterates over all tree elements */ n 689 lib/zlib_deflate/deftree.c for (n = 0; n <= max_code; n++) { n 690 lib/zlib_deflate/deftree.c curlen = nextlen; nextlen = tree[n+1].Len; n 1076 lib/zlib_deflate/deftree.c int n = 0; n 1079 lib/zlib_deflate/deftree.c while (n < 7) bin_freq += s->dyn_ltree[n++].Freq; n 1080 lib/zlib_deflate/deftree.c while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq; n 1081 lib/zlib_deflate/deftree.c while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq; n 206 lib/zlib_inflate/inflate.c #define NEEDBITS(n) \ n 208 lib/zlib_inflate/inflate.c while (bits < (unsigned)(n)) \ n 213 lib/zlib_inflate/inflate.c #define BITS(n) \ n 214 lib/zlib_inflate/inflate.c ((unsigned)hold & ((1U << (n)) - 1)) n 217 lib/zlib_inflate/inflate.c #define DROPBITS(n) \ n 219 lib/zlib_inflate/inflate.c hold >>= (n); \ n 220 lib/zlib_inflate/inflate.c bits -= (unsigned)(n); \ n 782 lib/zstd/compress.c size_t n; n 783 lib/zstd/compress.c for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */ n 784 lib/zstd/compress.c BYTE const llCode = llCodeTable[n]; n 785 lib/zstd/compress.c BYTE const ofCode = ofCodeTable[n]; n 786 lib/zstd/compress.c BYTE const mlCode = mlCodeTable[n]; n 798 lib/zstd/compress.c BIT_addBits(&blockStream, sequences[n].litLength, llBits); n 801 lib/zstd/compress.c BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); n 807 lib/zstd/compress.c BIT_addBits(&blockStream, sequences[n].offset, extraBits); n 810 lib/zstd/compress.c BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */ n 812 lib/zstd/compress.c BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ n 188 lib/zstd/entropy_common.c U32 n; n 189 lib/zstd/entropy_common.c for (n = 0; n < oSize; n += 2) { n 190 lib/zstd/entropy_common.c huffWeight[n] = ip[n / 2] >> 4; n 191 lib/zstd/entropy_common.c huffWeight[n + 1] = ip[n / 2] & 15; n 206 lib/zstd/entropy_common.c U32 n; n 207 lib/zstd/entropy_common.c for (n = 0; n < oSize; n++) { n 208 lib/zstd/entropy_common.c if (huffWeight[n] >= HUF_TABLELOG_MAX) n 210 lib/zstd/entropy_common.c rankStats[huffWeight[n]]++; n 211 lib/zstd/entropy_common.c weightTotal += (1 << huffWeight[n]) >> 1; n 155 lib/zstd/huf_compress.c U32 n; n 177 lib/zstd/huf_compress.c for (n = 1; n < huffLog + 1; n++) n 178 lib/zstd/huf_compress.c bitsToWeight[n] = (BYTE)(huffLog + 1 - n); n 179 lib/zstd/huf_compress.c for (n = 0; n < maxSymbolValue; n++) n 180 lib/zstd/huf_compress.c huffWeight[n] = bitsToWeight[CTable[n].nbBits]; n 198 lib/zstd/huf_compress.c for (n = 0; n < maxSymbolValue; n += 2) n 199 lib/zstd/huf_compress.c op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]); n 235 lib/zstd/huf_compress.c U32 n, nextRankStart = 0; n 236 lib/zstd/huf_compress.c for (n = 1; n <= tableLog; n++) { n 238 lib/zstd/huf_compress.c nextRankStart += (rankVal[n] << (n - 1)); n 239 lib/zstd/huf_compress.c rankVal[n] = curr; n 245 lib/zstd/huf_compress.c U32 n; n 246 lib/zstd/huf_compress.c for (n = 0; n < nbSymbols; n++) { n 247 lib/zstd/huf_compress.c const U32 w = huffWeight[n]; n 248 lib/zstd/huf_compress.c CTable[n].nbBits = (BYTE)(tableLog + 1 - w); n 257 lib/zstd/huf_compress.c U32 n; n 258 lib/zstd/huf_compress.c for (n = 0; n < nbSymbols; n++) n 259 lib/zstd/huf_compress.c nbPerRank[CTable[n].nbBits]++; n 265 lib/zstd/huf_compress.c U32 n; n 266 lib/zstd/huf_compress.c for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */ n 267 lib/zstd/huf_compress.c valPerRank[n] = min; /* get starting value within each rank */ n 268 lib/zstd/huf_compress.c min += nbPerRank[n]; n 274 lib/zstd/huf_compress.c U32 n; n 275 lib/zstd/huf_compress.c for (n = 0; n <= maxSymbolValue; n++) n 276 lib/zstd/huf_compress.c CTable[n].val = valPerRank[CTable[n].nbBits]++; n 300 lib/zstd/huf_compress.c U32 n = lastNonNull; n 302 lib/zstd/huf_compress.c while (huffNode[n].nbBits > maxNbBits) { n 303 lib/zstd/huf_compress.c totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); n 304 lib/zstd/huf_compress.c huffNode[n].nbBits = (BYTE)maxNbBits; n 305 lib/zstd/huf_compress.c n--; n 307 lib/zstd/huf_compress.c while (huffNode[n].nbBits == maxNbBits) n 308 lib/zstd/huf_compress.c n--; /* n end at index of smallest symbol using < maxNbBits */ n 323 lib/zstd/huf_compress.c for (pos = n; pos >= 0; pos--) { n 367 lib/zstd/huf_compress.c while (huffNode[n].nbBits == maxNbBits) n 368 lib/zstd/huf_compress.c n--; n 369 lib/zstd/huf_compress.c huffNode[n + 1].nbBits--; n 370 lib/zstd/huf_compress.c rankLast[1] = n + 1; n 392 lib/zstd/huf_compress.c U32 n; n 395 lib/zstd/huf_compress.c for (n = 0; n <= maxSymbolValue; n++) { n 396 lib/zstd/huf_compress.c U32 r = BIT_highbit32(count[n] + 1); n 399 lib/zstd/huf_compress.c for (n = 30; n > 0; n--) n 400 lib/zstd/huf_compress.c rank[n - 1].base += rank[n].base; n 401 lib/zstd/huf_compress.c for (n = 0; n < 32; n++) n 402 lib/zstd/huf_compress.c rank[n].curr = rank[n].base; n 403 lib/zstd/huf_compress.c for (n = 0; n <= maxSymbolValue; n++) { n 404 lib/zstd/huf_compress.c U32 const c = count[n]; n 410 lib/zstd/huf_compress.c huffNode[pos].byte = (BYTE)n; n 424 lib/zstd/huf_compress.c U32 n, nonNullRank; n 452 lib/zstd/huf_compress.c for (n = nodeNb; n <= nodeRoot; n++) n 453 lib/zstd/huf_compress.c huffNode[n].count = (U32)(1U << 30); n 467 lib/zstd/huf_compress.c for (n = nodeRoot - 1; n >= STARTNODE; n--) n 468 lib/zstd/huf_compress.c huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; n 469 lib/zstd/huf_compress.c for (n = 0; n <= nonNullRank; n++) n 470 lib/zstd/huf_compress.c huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1; n 481 lib/zstd/huf_compress.c for (n = 0; n <= nonNullRank; n++) n 482 lib/zstd/huf_compress.c nbPerRank[huffNode[n].nbBits]++; n 486 lib/zstd/huf_compress.c for (n = maxNbBits; n > 0; n--) { n 487 lib/zstd/huf_compress.c valPerRank[n] = min; /* get starting value within each rank */ n 488 lib/zstd/huf_compress.c min += nbPerRank[n]; n 492 lib/zstd/huf_compress.c for (n = 0; n <= maxSymbolValue; n++) n 493 lib/zstd/huf_compress.c tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ n 494 lib/zstd/huf_compress.c for (n = 0; n <= maxSymbolValue; n++) n 495 lib/zstd/huf_compress.c tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ n 544 lib/zstd/huf_compress.c size_t n; n 556 lib/zstd/huf_compress.c n = srcSize & ~3; /* join to mod 4 */ n 558 lib/zstd/huf_compress.c case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC); n 560 lib/zstd/huf_compress.c case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC); n 562 lib/zstd/huf_compress.c case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC); n 567 lib/zstd/huf_compress.c for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */ n 568 lib/zstd/huf_compress.c HUF_encodeSymbol(&bitC, ip[n - 1], CTable); n 570 lib/zstd/huf_compress.c HUF_encodeSymbol(&bitC, ip[n - 2], CTable); n 572 lib/zstd/huf_compress.c HUF_encodeSymbol(&bitC, ip[n - 3], CTable); n 574 lib/zstd/huf_compress.c HUF_encodeSymbol(&bitC, ip[n - 4], CTable); n 131 lib/zstd/huf_decompress.c U32 n, nextRankStart = 0; n 132 lib/zstd/huf_decompress.c for (n = 1; n < tableLog + 1; n++) { n 134 lib/zstd/huf_decompress.c nextRankStart += (rankVal[n] << (n - 1)); n 135 lib/zstd/huf_decompress.c rankVal[n] = curr; n 141 lib/zstd/huf_decompress.c U32 n; n 142 lib/zstd/huf_decompress.c for (n = 0; n < nbSymbols; n++) { n 143 lib/zstd/huf_decompress.c U32 const w = huffWeight[n]; n 147 lib/zstd/huf_decompress.c D.byte = (BYTE)n; n 21 mm/cma_debug.c unsigned long n; n 101 mm/cma_debug.c if (mem->n <= count) { n 102 mm/cma_debug.c cma_release(cma, mem->p, mem->n); n 103 mm/cma_debug.c count -= mem->n; n 108 mm/cma_debug.c mem->n -= count; n 147 mm/cma_debug.c mem->n = count; n 968 mm/filemap.c int n; n 975 mm/filemap.c n = cpuset_mem_spread_node(); n 976 mm/filemap.c page = __alloc_pages_node(n, gfp, 0); n 1617 mm/kmemleak.c loff_t n = *pos; n 1626 mm/kmemleak.c if (n-- > 0) n 216 mm/list_lru.c struct list_head *item, *n; n 221 mm/list_lru.c list_for_each_safe(item, n, &l->list) { n 4991 mm/memcontrol.c static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) n 4993 mm/memcontrol.c refcount_add(n, &memcg->id.ref); n 4996 mm/memcontrol.c static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) n 4998 mm/memcontrol.c if (refcount_sub_and_test(n, &memcg->id.ref)) { n 4483 mm/memory.c int i, n, base, l; n 4489 mm/memory.c n = (addr_hint - addr) / PAGE_SIZE; n 4490 mm/memory.c if (2 * n <= pages_per_huge_page) { n 4493 mm/memory.c l = n; n 4495 mm/memory.c for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { n 4501 mm/memory.c base = pages_per_huge_page - 2 * (pages_per_huge_page - n); n 4502 mm/memory.c l = pages_per_huge_page - n; n 1888 mm/mempolicy.c static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) n 1897 mm/mempolicy.c target = (unsigned int)n % nnodes; n 2290 mm/mempolicy.c struct rb_node *n = sp->root.rb_node; n 2292 mm/mempolicy.c while (n) { n 2293 mm/mempolicy.c struct sp_node *p = rb_entry(n, struct sp_node, nd); n 2296 mm/mempolicy.c n = n->rb_right; n 2298 mm/mempolicy.c n = n->rb_left; n 2302 mm/mempolicy.c if (!n) n 2306 mm/mempolicy.c struct rb_node *prev = rb_prev(n); n 2312 mm/mempolicy.c n = prev; n 2314 mm/mempolicy.c return rb_entry(n, struct sp_node, nd); n 2362 mm/mempolicy.c static void sp_free(struct sp_node *n) n 2364 mm/mempolicy.c mpol_put(n->policy); n 2365 mm/mempolicy.c kmem_cache_free(sn_cache, n); n 2468 mm/mempolicy.c static void sp_delete(struct shared_policy *sp, struct sp_node *n) n 2470 mm/mempolicy.c pr_debug("deleting %lx-l%lx\n", n->start, n->end); n 2471 mm/mempolicy.c rb_erase(&n->nd, &sp->root); n 2472 mm/mempolicy.c sp_free(n); n 2486 mm/mempolicy.c struct sp_node *n; n 2489 mm/mempolicy.c n = kmem_cache_alloc(sn_cache, GFP_KERNEL); n 2490 mm/mempolicy.c if (!n) n 2495 mm/mempolicy.c kmem_cache_free(sn_cache, n); n 2499 mm/mempolicy.c sp_node_init(n, start, end, newpol); n 2501 mm/mempolicy.c return n; n 2508 mm/mempolicy.c struct sp_node *n; n 2515 mm/mempolicy.c n = sp_lookup(sp, start, end); n 2517 mm/mempolicy.c while (n && n->start < end) { n 2518 mm/mempolicy.c struct rb_node *next = rb_next(&n->nd); n 2519 mm/mempolicy.c if (n->start >= start) { n 2520 mm/mempolicy.c if (n->end <= end) n 2521 mm/mempolicy.c sp_delete(sp, n); n 2523 mm/mempolicy.c n->start = end; n 2526 mm/mempolicy.c if (n->end > end) { n 2530 mm/mempolicy.c *mpol_new = *n->policy; n 2532 mm/mempolicy.c sp_node_init(n_new, end, n->end, mpol_new); n 2533 mm/mempolicy.c n->end = start; n 2539 mm/mempolicy.c n->end = start; n 2543 mm/mempolicy.c n = rb_entry(next, struct sp_node, nd); n 2646 mm/mempolicy.c struct sp_node *n; n 2654 mm/mempolicy.c n = rb_entry(next, struct sp_node, nd); n 2655 mm/mempolicy.c next = rb_next(&n->nd); n 2656 mm/mempolicy.c sp_delete(p, n); n 5537 mm/page_alloc.c int n, val; n 5548 mm/page_alloc.c for_each_node_state(n, N_MEMORY) { n 5551 mm/page_alloc.c if (node_isset(n, *used_node_mask)) n 5555 mm/page_alloc.c val = node_distance(node, n); n 5558 mm/page_alloc.c val += (n < node); n 5561 mm/page_alloc.c tmp = cpumask_of_node(n); n 5567 mm/page_alloc.c val += node_load[n]; n 5571 mm/page_alloc.c best_node = n; n 51 mm/page_poison.c static void poison_pages(struct page *page, int n) n 55 mm/page_poison.c for (i = 0; i < n; i++) n 110 mm/page_poison.c static void unpoison_pages(struct page *page, int n) n 114 mm/page_poison.c for (i = 0; i < n; i++) n 210 mm/slab.c struct kmem_cache_node *n, int tofree); n 220 mm/slab.c struct kmem_cache_node *n, struct page *page, n 554 mm/slab.c struct kmem_cache_node *n; n 559 mm/slab.c n = get_node(cachep, page_node); n 561 mm/slab.c spin_lock(&n->list_lock); n 563 mm/slab.c spin_unlock(&n->list_lock); n 594 mm/slab.c #define reap_alien(cachep, n) do { } while (0) n 688 mm/slab.c struct kmem_cache_node *n = get_node(cachep, node); n 691 mm/slab.c spin_lock(&n->list_lock); n 697 mm/slab.c if (n->shared) n 698 mm/slab.c transfer_objects(n->shared, ac, ac->limit); n 702 mm/slab.c spin_unlock(&n->list_lock); n 709 mm/slab.c static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) n 713 mm/slab.c if (n->alien) { n 714 mm/slab.c struct alien_cache *alc = n->alien[node]; n 755 mm/slab.c struct kmem_cache_node *n; n 760 mm/slab.c n = get_node(cachep, node); n 762 mm/slab.c if (n->alien && n->alien[page_node]) { n 763 mm/slab.c alien = n->alien[page_node]; n 774 mm/slab.c n = get_node(cachep, page_node); n 775 mm/slab.c spin_lock(&n->list_lock); n 777 mm/slab.c spin_unlock(&n->list_lock); n 809 mm/slab.c struct kmem_cache_node *n; n 816 mm/slab.c n = get_node(cachep, node); n 817 mm/slab.c if (n) { n 818 mm/slab.c spin_lock_irq(&n->list_lock); n 819 mm/slab.c n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + n 821 mm/slab.c spin_unlock_irq(&n->list_lock); n 826 mm/slab.c n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); n 827 mm/slab.c if (!n) n 830 mm/slab.c kmem_cache_node_init(n); n 831 mm/slab.c n->next_reap = jiffies + REAPTIMEOUT_NODE + n 834 mm/slab.c n->free_limit = n 842 mm/slab.c cachep->node[node] = n; n 876 mm/slab.c struct kmem_cache_node *n; n 899 mm/slab.c n = get_node(cachep, node); n 900 mm/slab.c spin_lock_irq(&n->list_lock); n 901 mm/slab.c if (n->shared && force_change) { n 902 mm/slab.c free_block(cachep, n->shared->entry, n 903 mm/slab.c n->shared->avail, node, &list); n 904 mm/slab.c n->shared->avail = 0; n 907 mm/slab.c if (!n->shared || force_change) { n 908 mm/slab.c old_shared = n->shared; n 909 mm/slab.c n->shared = new_shared; n 913 mm/slab.c if (!n->alien) { n 914 mm/slab.c n->alien = new_alien; n 918 mm/slab.c spin_unlock_irq(&n->list_lock); n 943 mm/slab.c struct kmem_cache_node *n = NULL; n 953 mm/slab.c n = get_node(cachep, node); n 954 mm/slab.c if (!n) n 957 mm/slab.c spin_lock_irq(&n->list_lock); n 960 mm/slab.c n->free_limit -= cachep->batchcount; n 968 mm/slab.c spin_unlock_irq(&n->list_lock); n 972 mm/slab.c shared = n->shared; n 976 mm/slab.c n->shared = NULL; n 979 mm/slab.c alien = n->alien; n 980 mm/slab.c n->alien = NULL; n 982 mm/slab.c spin_unlock_irq(&n->list_lock); n 999 mm/slab.c n = get_node(cachep, node); n 1000 mm/slab.c if (!n) n 1002 mm/slab.c drain_freelist(cachep, n, INT_MAX); n 1101 mm/slab.c struct kmem_cache_node *n; n 1103 mm/slab.c n = get_node(cachep, node); n 1104 mm/slab.c if (!n) n 1107 mm/slab.c drain_freelist(cachep, n, INT_MAX); n 1109 mm/slab.c if (!list_empty(&n->slabs_full) || n 1110 mm/slab.c !list_empty(&n->slabs_partial)) { n 1320 mm/slab.c struct kmem_cache_node *n; n 1334 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { n 1337 mm/slab.c spin_lock_irqsave(&n->list_lock, flags); n 1338 mm/slab.c total_slabs = n->total_slabs; n 1339 mm/slab.c free_slabs = n->free_slabs; n 1340 mm/slab.c free_objs = n->free_objects; n 1341 mm/slab.c spin_unlock_irqrestore(&n->list_lock, flags); n 1631 mm/slab.c struct page *page, *n; n 1633 mm/slab.c list_for_each_entry_safe(page, n, list, slab_list) { n 2141 mm/slab.c struct kmem_cache_node *n; n 2146 mm/slab.c n = get_node(cachep, node); n 2147 mm/slab.c spin_lock(&n->list_lock); n 2149 mm/slab.c spin_unlock(&n->list_lock); n 2156 mm/slab.c struct kmem_cache_node *n; n 2162 mm/slab.c for_each_kmem_cache_node(cachep, node, n) n 2163 mm/slab.c if (n->alien) n 2164 mm/slab.c drain_alien_cache(cachep, n->alien); n 2166 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { n 2167 mm/slab.c spin_lock_irq(&n->list_lock); n 2168 mm/slab.c drain_array_locked(cachep, n->shared, node, true, &list); n 2169 mm/slab.c spin_unlock_irq(&n->list_lock); n 2182 mm/slab.c struct kmem_cache_node *n, int tofree) n 2189 mm/slab.c while (nr_freed < tofree && !list_empty(&n->slabs_free)) { n 2191 mm/slab.c spin_lock_irq(&n->list_lock); n 2192 mm/slab.c p = n->slabs_free.prev; n 2193 mm/slab.c if (p == &n->slabs_free) { n 2194 mm/slab.c spin_unlock_irq(&n->list_lock); n 2200 mm/slab.c n->free_slabs--; n 2201 mm/slab.c n->total_slabs--; n 2206 mm/slab.c n->free_objects -= cache->num; n 2207 mm/slab.c spin_unlock_irq(&n->list_lock); n 2218 mm/slab.c struct kmem_cache_node *n; n 2220 mm/slab.c for_each_kmem_cache_node(s, node, n) n 2221 mm/slab.c if (!list_empty(&n->slabs_full) || n 2222 mm/slab.c !list_empty(&n->slabs_partial)) n 2231 mm/slab.c struct kmem_cache_node *n; n 2236 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { n 2237 mm/slab.c drain_freelist(cachep, n, INT_MAX); n 2239 mm/slab.c ret += !list_empty(&n->slabs_full) || n 2240 mm/slab.c !list_empty(&n->slabs_partial); n 2264 mm/slab.c struct kmem_cache_node *n; n 2271 mm/slab.c for_each_kmem_cache_node(cachep, i, n) { n 2272 mm/slab.c kfree(n->shared); n 2273 mm/slab.c free_alien_cache(n->alien); n 2274 mm/slab.c kfree(n); n 2574 mm/slab.c struct kmem_cache_node *n; n 2604 mm/slab.c n = get_node(cachep, page_node); n 2607 mm/slab.c n->colour_next++; n 2608 mm/slab.c if (n->colour_next >= cachep->colour) n 2609 mm/slab.c n->colour_next = 0; n 2611 mm/slab.c offset = n->colour_next; n 2649 mm/slab.c struct kmem_cache_node *n; n 2658 mm/slab.c n = get_node(cachep, page_to_nid(page)); n 2660 mm/slab.c spin_lock(&n->list_lock); n 2661 mm/slab.c n->total_slabs++; n 2663 mm/slab.c list_add_tail(&page->slab_list, &n->slabs_free); n 2664 mm/slab.c n->free_slabs++; n 2666 mm/slab.c fixup_slab_list(cachep, n, page, &list); n 2669 mm/slab.c n->free_objects += cachep->num - page->active; n 2670 mm/slab.c spin_unlock(&n->list_lock); n 2766 mm/slab.c struct kmem_cache_node *n, struct page *page, n 2772 mm/slab.c list_add(&page->slab_list, &n->slabs_full); n 2786 mm/slab.c list_add(&page->slab_list, &n->slabs_partial); n 2790 mm/slab.c static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n, n 2803 mm/slab.c if (n->free_objects > n->free_limit) { n 2811 mm/slab.c list_add_tail(&page->slab_list, &n->slabs_free); n 2812 mm/slab.c n->free_slabs++; n 2814 mm/slab.c list_add_tail(&page->slab_list, &n->slabs_partial); n 2816 mm/slab.c list_for_each_entry(page, &n->slabs_partial, slab_list) { n 2821 mm/slab.c n->free_touched = 1; n 2822 mm/slab.c list_for_each_entry(page, &n->slabs_free, slab_list) { n 2824 mm/slab.c n->free_slabs--; n 2832 mm/slab.c static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) n 2836 mm/slab.c assert_spin_locked(&n->list_lock); n 2837 mm/slab.c page = list_first_entry_or_null(&n->slabs_partial, struct page, n 2840 mm/slab.c n->free_touched = 1; n 2841 mm/slab.c page = list_first_entry_or_null(&n->slabs_free, struct page, n 2844 mm/slab.c n->free_slabs--; n 2848 mm/slab.c page = get_valid_first_slab(n, page, pfmemalloc); n 2854 mm/slab.c struct kmem_cache_node *n, gfp_t flags) n 2863 mm/slab.c spin_lock(&n->list_lock); n 2864 mm/slab.c page = get_first_slab(n, true); n 2866 mm/slab.c spin_unlock(&n->list_lock); n 2871 mm/slab.c n->free_objects--; n 2873 mm/slab.c fixup_slab_list(cachep, n, page, &list); n 2875 mm/slab.c spin_unlock(&n->list_lock); n 2908 mm/slab.c struct kmem_cache_node *n; n 2927 mm/slab.c n = get_node(cachep, node); n 2929 mm/slab.c BUG_ON(ac->avail > 0 || !n); n 2930 mm/slab.c shared = READ_ONCE(n->shared); n 2931 mm/slab.c if (!n->free_objects && (!shared || !shared->avail)) n 2934 mm/slab.c spin_lock(&n->list_lock); n 2935 mm/slab.c shared = READ_ONCE(n->shared); n 2945 mm/slab.c page = get_first_slab(n, false); n 2952 mm/slab.c fixup_slab_list(cachep, n, page, &list); n 2956 mm/slab.c n->free_objects -= ac->avail; n 2958 mm/slab.c spin_unlock(&n->list_lock); n 2965 mm/slab.c void *obj = cache_alloc_pfmemalloc(cachep, n, flags); n 3174 mm/slab.c struct kmem_cache_node *n; n 3179 mm/slab.c n = get_node(cachep, nodeid); n 3180 mm/slab.c BUG_ON(!n); n 3183 mm/slab.c spin_lock(&n->list_lock); n 3184 mm/slab.c page = get_first_slab(n, false); n 3197 mm/slab.c n->free_objects--; n 3199 mm/slab.c fixup_slab_list(cachep, n, page, &list); n 3201 mm/slab.c spin_unlock(&n->list_lock); n 3206 mm/slab.c spin_unlock(&n->list_lock); n 3331 mm/slab.c struct kmem_cache_node *n = get_node(cachep, node); n 3334 mm/slab.c n->free_objects += nr_objects; n 3350 mm/slab.c list_add(&page->slab_list, &n->slabs_free); n 3351 mm/slab.c n->free_slabs++; n 3357 mm/slab.c list_add_tail(&page->slab_list, &n->slabs_partial); n 3361 mm/slab.c while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { n 3362 mm/slab.c n->free_objects -= cachep->num; n 3364 mm/slab.c page = list_last_entry(&n->slabs_free, struct page, slab_list); n 3366 mm/slab.c n->free_slabs--; n 3367 mm/slab.c n->total_slabs--; n 3374 mm/slab.c struct kmem_cache_node *n; n 3381 mm/slab.c n = get_node(cachep, node); n 3382 mm/slab.c spin_lock(&n->list_lock); n 3383 mm/slab.c if (n->shared) { n 3384 mm/slab.c struct array_cache *shared_array = n->shared; n 3403 mm/slab.c list_for_each_entry(page, &n->slabs_free, slab_list) { n 3411 mm/slab.c spin_unlock(&n->list_lock); n 3768 mm/slab.c struct kmem_cache_node *n; n 3784 mm/slab.c n = get_node(cachep, node); n 3785 mm/slab.c if (n) { n 3786 mm/slab.c kfree(n->shared); n 3787 mm/slab.c free_alien_cache(n->alien); n 3788 mm/slab.c kfree(n); n 3828 mm/slab.c struct kmem_cache_node *n; n 3832 mm/slab.c n = get_node(cachep, node); n 3833 mm/slab.c spin_lock_irq(&n->list_lock); n 3835 mm/slab.c spin_unlock_irq(&n->list_lock); n 3944 mm/slab.c static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, n 3960 mm/slab.c spin_lock_irq(&n->list_lock); n 3962 mm/slab.c spin_unlock_irq(&n->list_lock); n 3982 mm/slab.c struct kmem_cache_node *n; n 3998 mm/slab.c n = get_node(searchp, node); n 4000 mm/slab.c reap_alien(searchp, n); n 4002 mm/slab.c drain_array(searchp, n, cpu_cache_get(searchp), node); n 4008 mm/slab.c if (time_after(n->next_reap, jiffies)) n 4011 mm/slab.c n->next_reap = jiffies + REAPTIMEOUT_NODE; n 4013 mm/slab.c drain_array(searchp, n, n->shared, node); n 4015 mm/slab.c if (n->free_touched) n 4016 mm/slab.c n->free_touched = 0; n 4020 mm/slab.c freed = drain_freelist(searchp, n, (n->free_limit + n 4042 mm/slab.c struct kmem_cache_node *n; n 4044 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { n 4046 mm/slab.c spin_lock_irq(&n->list_lock); n 4048 mm/slab.c total_slabs += n->total_slabs; n 4049 mm/slab.c free_slabs += n->free_slabs; n 4050 mm/slab.c free_objs += n->free_objects; n 4052 mm/slab.c if (n->shared) n 4053 mm/slab.c shared_avail += n->shared->avail; n 4055 mm/slab.c spin_unlock_irq(&n->list_lock); n 4167 mm/slab.c void __check_heap_object(const void *ptr, unsigned long n, struct page *page, n 4187 mm/slab.c n <= cachep->useroffset - offset + cachep->usersize) n 4198 mm/slab.c n <= cachep->object_size - offset) { n 4199 mm/slab.c usercopy_warn("SLAB object", cachep->name, to_user, offset, n); n 4203 mm/slab.c usercopy_abort("SLAB object", cachep->name, to_user, offset, n); n 1295 mm/slab_common.c const char *n = kmalloc_cache_name("dma-kmalloc", size); n 1297 mm/slab_common.c BUG_ON(!n); n 1299 mm/slab_common.c n, size, SLAB_CACHE_DMA | flags, 0, 0); n 368 mm/slub.c const char *n) n 396 mm/slub.c pr_info("%s %s: cmpxchg double redo ", n, s->name); n 405 mm/slub.c const char *n) n 437 mm/slub.c pr_info("%s %s: cmpxchg double redo ", n, s->name); n 1012 mm/slub.c struct kmem_cache_node *n, struct page *page) n 1017 mm/slub.c lockdep_assert_held(&n->list_lock); n 1018 mm/slub.c list_add(&page->slab_list, &n->full); n 1021 mm/slub.c static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) n 1026 mm/slub.c lockdep_assert_held(&n->list_lock); n 1033 mm/slub.c struct kmem_cache_node *n = get_node(s, node); n 1035 mm/slub.c return atomic_long_read(&n->nr_slabs); n 1038 mm/slub.c static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) n 1040 mm/slub.c return atomic_long_read(&n->nr_slabs); n 1045 mm/slub.c struct kmem_cache_node *n = get_node(s, node); n 1053 mm/slub.c if (likely(n)) { n 1054 mm/slub.c atomic_long_inc(&n->nr_slabs); n 1055 mm/slub.c atomic_long_add(objects, &n->total_objects); n 1060 mm/slub.c struct kmem_cache_node *n = get_node(s, node); n 1062 mm/slub.c atomic_long_dec(&n->nr_slabs); n 1063 mm/slub.c atomic_long_sub(objects, &n->total_objects); n 1173 mm/slub.c struct kmem_cache_node *n = get_node(s, page_to_nid(page)); n 1179 mm/slub.c spin_lock_irqsave(&n->list_lock, flags); n 1214 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 1359 mm/slub.c static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, n 1361 mm/slub.c static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, n 1375 mm/slub.c static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) n 1759 mm/slub.c __add_partial(struct kmem_cache_node *n, struct page *page, int tail) n 1761 mm/slub.c n->nr_partial++; n 1763 mm/slub.c list_add_tail(&page->slab_list, &n->partial); n 1765 mm/slub.c list_add(&page->slab_list, &n->partial); n 1768 mm/slub.c static inline void add_partial(struct kmem_cache_node *n, n 1771 mm/slub.c lockdep_assert_held(&n->list_lock); n 1772 mm/slub.c __add_partial(n, page, tail); n 1775 mm/slub.c static inline void remove_partial(struct kmem_cache_node *n, n 1778 mm/slub.c lockdep_assert_held(&n->list_lock); n 1780 mm/slub.c n->nr_partial--; n 1790 mm/slub.c struct kmem_cache_node *n, struct page *page, n 1797 mm/slub.c lockdep_assert_held(&n->list_lock); n 1824 mm/slub.c remove_partial(n, page); n 1835 mm/slub.c static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, n 1849 mm/slub.c if (!n || !n->nr_partial) n 1852 mm/slub.c spin_lock(&n->list_lock); n 1853 mm/slub.c list_for_each_entry_safe(page, page2, &n->partial, slab_list) { n 1859 mm/slub.c t = acquire_slab(s, n, page, object == NULL, &objects); n 1877 mm/slub.c spin_unlock(&n->list_lock); n 1921 mm/slub.c struct kmem_cache_node *n; n 1923 mm/slub.c n = get_node(s, zone_to_nid(zone)); n 1925 mm/slub.c if (n && cpuset_zone_allowed(zone, flags) && n 1926 mm/slub.c n->nr_partial > s->min_partial) { n 1927 mm/slub.c object = get_partial_node(s, n, c, flags); n 2001 mm/slub.c static inline void note_cmpxchg_failure(const char *n, n 2007 mm/slub.c pr_info("%s %s: cmpxchg redo ", n, s->name); n 2040 mm/slub.c struct kmem_cache_node *n = get_node(s, page_to_nid(page)); n 2112 mm/slub.c if (!new.inuse && n->nr_partial >= s->min_partial) n 2123 mm/slub.c spin_lock(&n->list_lock); n 2134 mm/slub.c spin_lock(&n->list_lock); n 2140 mm/slub.c remove_partial(n, page); n 2142 mm/slub.c remove_full(s, n, page); n 2145 mm/slub.c add_partial(n, page, tail); n 2147 mm/slub.c add_full(s, n, page); n 2158 mm/slub.c spin_unlock(&n->list_lock); n 2185 mm/slub.c struct kmem_cache_node *n = NULL, *n2 = NULL; n 2195 mm/slub.c if (n != n2) { n 2196 mm/slub.c if (n) n 2197 mm/slub.c spin_unlock(&n->list_lock); n 2199 mm/slub.c n = n2; n 2200 mm/slub.c spin_lock(&n->list_lock); n 2219 mm/slub.c if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { n 2223 mm/slub.c add_partial(n, page, DEACTIVATE_TO_TAIL); n 2228 mm/slub.c if (n) n 2229 mm/slub.c spin_unlock(&n->list_lock); n 2382 mm/slub.c static inline unsigned long node_nr_objs(struct kmem_cache_node *n) n 2384 mm/slub.c return atomic_long_read(&n->total_objects); n 2389 mm/slub.c static unsigned long count_partial(struct kmem_cache_node *n, n 2396 mm/slub.c spin_lock_irqsave(&n->list_lock, flags); n 2397 mm/slub.c list_for_each_entry(page, &n->partial, slab_list) n 2399 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 2411 mm/slub.c struct kmem_cache_node *n; n 2426 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 2431 mm/slub.c nr_free = count_partial(n, count_free); n 2432 mm/slub.c nr_slabs = node_nr_slabs(n); n 2433 mm/slub.c nr_objs = node_nr_objs(n); n 2845 mm/slub.c struct kmem_cache_node *n = NULL; n 2855 mm/slub.c if (unlikely(n)) { n 2856 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 2857 mm/slub.c n = NULL; n 2879 mm/slub.c n = get_node(s, page_to_nid(page)); n 2888 mm/slub.c spin_lock_irqsave(&n->list_lock, flags); n 2898 mm/slub.c if (likely(!n)) { n 2917 mm/slub.c if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) n 2925 mm/slub.c remove_full(s, n, page); n 2926 mm/slub.c add_partial(n, page, DEACTIVATE_TO_TAIL); n 2929 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 2937 mm/slub.c remove_partial(n, page); n 2941 mm/slub.c remove_full(s, n, page); n 2944 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 3342 mm/slub.c init_kmem_cache_node(struct kmem_cache_node *n) n 3344 mm/slub.c n->nr_partial = 0; n 3345 mm/slub.c spin_lock_init(&n->list_lock); n 3346 mm/slub.c INIT_LIST_HEAD(&n->partial); n 3348 mm/slub.c atomic_long_set(&n->nr_slabs, 0); n 3349 mm/slub.c atomic_long_set(&n->total_objects, 0); n 3350 mm/slub.c INIT_LIST_HEAD(&n->full); n 3388 mm/slub.c struct kmem_cache_node *n; n 3400 mm/slub.c n = page->freelist; n 3401 mm/slub.c BUG_ON(!n); n 3403 mm/slub.c init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); n 3404 mm/slub.c init_tracking(kmem_cache_node, n); n 3406 mm/slub.c n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node), n 3408 mm/slub.c page->freelist = get_freepointer(kmem_cache_node, n); n 3411 mm/slub.c kmem_cache_node->node[node] = n; n 3412 mm/slub.c init_kmem_cache_node(n); n 3419 mm/slub.c __add_partial(n, page, DEACTIVATE_TO_HEAD); n 3425 mm/slub.c struct kmem_cache_node *n; n 3427 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 3429 mm/slub.c kmem_cache_free(kmem_cache_node, n); n 3445 mm/slub.c struct kmem_cache_node *n; n 3451 mm/slub.c n = kmem_cache_alloc_node(kmem_cache_node, n 3454 mm/slub.c if (!n) { n 3459 mm/slub.c init_kmem_cache_node(n); n 3460 mm/slub.c s->node[node] = n; n 3720 mm/slub.c static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) n 3726 mm/slub.c spin_lock_irq(&n->list_lock); n 3727 mm/slub.c list_for_each_entry_safe(page, h, &n->partial, slab_list) { n 3729 mm/slub.c remove_partial(n, page); n 3736 mm/slub.c spin_unlock_irq(&n->list_lock); n 3745 mm/slub.c struct kmem_cache_node *n; n 3747 mm/slub.c for_each_kmem_cache_node(s, node, n) n 3748 mm/slub.c if (n->nr_partial || slabs_node(s, node)) n 3759 mm/slub.c struct kmem_cache_node *n; n 3763 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 3764 mm/slub.c free_partial(s, n); n 3765 mm/slub.c if (n->nr_partial || slabs_node(s, node)) n 3885 mm/slub.c void __check_heap_object(const void *ptr, unsigned long n, struct page *page, n 3900 mm/slub.c to_user, 0, n); n 3909 mm/slub.c s->name, to_user, offset, n); n 3916 mm/slub.c n <= s->useroffset - offset + s->usersize) n 3927 mm/slub.c offset <= object_size && n <= object_size - offset) { n 3928 mm/slub.c usercopy_warn("SLUB object", s->name, to_user, offset, n); n 3932 mm/slub.c usercopy_abort("SLUB object", s->name, to_user, offset, n); n 3994 mm/slub.c struct kmem_cache_node *n; n 4003 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 4008 mm/slub.c spin_lock_irqsave(&n->list_lock, flags); n 4016 mm/slub.c list_for_each_entry_safe(page, t, &n->partial, slab_list) { n 4027 mm/slub.c n->nr_partial--; n 4037 mm/slub.c list_splice(promote + i, &n->partial); n 4039 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 4096 mm/slub.c struct kmem_cache_node *n; n 4112 mm/slub.c n = get_node(s, offline_node); n 4113 mm/slub.c if (n) { n 4123 mm/slub.c kmem_cache_free(kmem_cache_node, n); n 4131 mm/slub.c struct kmem_cache_node *n; n 4156 mm/slub.c n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); n 4157 mm/slub.c if (!n) { n 4161 mm/slub.c init_kmem_cache_node(n); n 4162 mm/slub.c s->node[nid] = n; n 4215 mm/slub.c struct kmem_cache_node *n; n 4225 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 4228 mm/slub.c list_for_each_entry(p, &n->partial, slab_list) n 4232 mm/slub.c list_for_each_entry(p, &n->full, slab_list) n 4441 mm/slub.c struct kmem_cache_node *n, unsigned long *map) n 4447 mm/slub.c spin_lock_irqsave(&n->list_lock, flags); n 4449 mm/slub.c list_for_each_entry(page, &n->partial, slab_list) { n 4453 mm/slub.c if (count != n->nr_partial) n 4455 mm/slub.c s->name, count, n->nr_partial); n 4460 mm/slub.c list_for_each_entry(page, &n->full, slab_list) { n 4464 mm/slub.c if (count != atomic_long_read(&n->nr_slabs)) n 4466 mm/slub.c s->name, count, atomic_long_read(&n->nr_slabs)); n 4469 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 4477 mm/slub.c struct kmem_cache_node *n; n 4484 mm/slub.c for_each_kmem_cache_node(s, node, n) n 4485 mm/slub.c count += validate_slab_node(s, n, map); n 4637 mm/slub.c struct kmem_cache_node *n; n 4648 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 4652 mm/slub.c if (!atomic_long_read(&n->nr_slabs)) n 4655 mm/slub.c spin_lock_irqsave(&n->list_lock, flags); n 4656 mm/slub.c list_for_each_entry(page, &n->partial, slab_list) n 4658 mm/slub.c list_for_each_entry(page, &n->full, slab_list) n 4660 mm/slub.c spin_unlock_irqrestore(&n->list_lock, flags); n 4870 mm/slub.c struct kmem_cache_node *n; n 4872 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 4875 mm/slub.c x = atomic_long_read(&n->total_objects); n 4877 mm/slub.c x = atomic_long_read(&n->total_objects) - n 4878 mm/slub.c count_partial(n, count_free); n 4880 mm/slub.c x = atomic_long_read(&n->nr_slabs); n 4888 mm/slub.c struct kmem_cache_node *n; n 4890 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 4892 mm/slub.c x = count_partial(n, count_total); n 4894 mm/slub.c x = count_partial(n, count_inuse); n 4896 mm/slub.c x = n->nr_partial; n 4916 mm/slub.c struct kmem_cache_node *n; n 4918 mm/slub.c for_each_kmem_cache_node(s, node, n) n 4919 mm/slub.c if (atomic_long_read(&n->total_objects)) n 4926 mm/slub.c #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) n 4927 mm/slub.c #define to_slab(n) container_of(n, struct kmem_cache, kobj) n 5924 mm/slub.c struct kmem_cache_node *n; n 5926 mm/slub.c for_each_kmem_cache_node(s, node, n) { n 5927 mm/slub.c nr_slabs += node_nr_slabs(n); n 5928 mm/slub.c nr_objs += node_nr_objs(n); n 5929 mm/slub.c nr_free += count_partial(n, count_free); n 298 mm/swapfile.c unsigned int n) n 300 mm/swapfile.c info->data = n; n 304 mm/swapfile.c unsigned int n, unsigned int f) n 307 mm/swapfile.c info->data = n; n 1396 mm/swapfile.c void swapcache_free_entries(swp_entry_t *entries, int n) n 1401 mm/swapfile.c if (n <= 0) n 1413 mm/swapfile.c sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); n 1414 mm/swapfile.c for (i = 0; i < n; ++i) { n 1498 mm/swapfile.c int count, tmp_count, n; n 1518 mm/swapfile.c n = SWAP_MAP_MAX + 1; n 1530 mm/swapfile.c count += (tmp_count & ~COUNT_CONTINUED) * n; n 1531 mm/swapfile.c n *= (SWAP_CONT_MAX + 1); n 1824 mm/swapfile.c unsigned int n = 0; n 1832 mm/swapfile.c n = sis->pages; n 1834 mm/swapfile.c n -= sis->inuse_pages; n 1839 mm/swapfile.c return n; n 103 mm/usercopy.c static bool overlaps(const unsigned long ptr, unsigned long n, n 107 mm/usercopy.c unsigned long check_high = check_low + n; n 118 mm/usercopy.c unsigned long n, bool to_user) n 124 mm/usercopy.c if (overlaps(ptr, n, textlow, texthigh)) n 125 mm/usercopy.c usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n); n 142 mm/usercopy.c if (overlaps(ptr, n, textlow_linear, texthigh_linear)) n 144 mm/usercopy.c ptr - textlow_linear, n); n 147 mm/usercopy.c static inline void check_bogus_address(const unsigned long ptr, unsigned long n, n 151 mm/usercopy.c if (ptr + (n - 1) < ptr) n 152 mm/usercopy.c usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); n 156 mm/usercopy.c usercopy_abort("null address", NULL, to_user, ptr, n); n 160 mm/usercopy.c static inline void check_page_span(const void *ptr, unsigned long n, n 164 mm/usercopy.c const void *end = ptr + n - 1; n 178 mm/usercopy.c usercopy_abort("rodata", NULL, to_user, 0, n); n 209 mm/usercopy.c usercopy_abort("spans multiple pages", NULL, to_user, 0, n); n 215 mm/usercopy.c NULL, to_user, 0, n); n 218 mm/usercopy.c to_user, 0, n); n 223 mm/usercopy.c static inline void check_heap_object(const void *ptr, unsigned long n, n 240 mm/usercopy.c __check_heap_object(ptr, n, page, to_user); n 243 mm/usercopy.c check_page_span(ptr, n, page, to_user); n 256 mm/usercopy.c void __check_object_size(const void *ptr, unsigned long n, bool to_user) n 262 mm/usercopy.c if (!n) n 266 mm/usercopy.c check_bogus_address((const unsigned long)ptr, n, to_user); n 269 mm/usercopy.c switch (check_stack_object(ptr, n)) { n 282 mm/usercopy.c usercopy_abort("process stack", NULL, to_user, 0, n); n 286 mm/usercopy.c check_heap_object(ptr, n, to_user); n 289 mm/usercopy.c check_kernel_text_object((const unsigned long)ptr, n, to_user); n 218 mm/util.c char *strndup_user(const char __user *s, long n) n 223 mm/util.c length = strnlen_user(s, n); n 228 mm/util.c if (length > n) n 416 mm/vmalloc.c struct rb_node *n = vmap_area_root.rb_node; n 418 mm/vmalloc.c while (n) { n 421 mm/vmalloc.c va = rb_entry(n, struct vmap_area, rb_node); n 423 mm/vmalloc.c n = n->rb_left; n 425 mm/vmalloc.c n = n->rb_right; n 557 mm/vmalloc.c augment_tree_propagate_check(struct rb_node *n) n 564 mm/vmalloc.c if (n == NULL) n 567 mm/vmalloc.c va = rb_entry(n, struct vmap_area, rb_node); n 569 mm/vmalloc.c node = n; n 587 mm/vmalloc.c va = rb_entry(n, struct vmap_area, rb_node); n 592 mm/vmalloc.c augment_tree_propagate_check(n->rb_left); n 593 mm/vmalloc.c augment_tree_propagate_check(n->rb_right); n 2852 mm/vmalloc.c unsigned long n; n 2878 mm/vmalloc.c n = vaddr + get_vm_area_size(vm) - addr; n 2879 mm/vmalloc.c if (n > count) n 2880 mm/vmalloc.c n = count; n 2882 mm/vmalloc.c aligned_vread(buf, addr, n); n 2884 mm/vmalloc.c memset(buf, 0, n); n 2885 mm/vmalloc.c buf += n; n 2886 mm/vmalloc.c addr += n; n 2887 mm/vmalloc.c count -= n; n 2930 mm/vmalloc.c unsigned long n, buflen; n 2957 mm/vmalloc.c n = vaddr + get_vm_area_size(vm) - addr; n 2958 mm/vmalloc.c if (n > count) n 2959 mm/vmalloc.c n = count; n 2961 mm/vmalloc.c aligned_vwrite(buf, addr, n); n 2964 mm/vmalloc.c buf += n; n 2965 mm/vmalloc.c addr += n; n 2966 mm/vmalloc.c count -= n; n 3135 mm/vmalloc.c static struct vmap_area *node_to_va(struct rb_node *n) n 3137 mm/vmalloc.c return rb_entry_safe(n, struct vmap_area, rb_node); n 3153 mm/vmalloc.c struct rb_node *n; n 3155 mm/vmalloc.c n = free_vmap_area_root.rb_node; n 3158 mm/vmalloc.c while (n) { n 3159 mm/vmalloc.c tmp = rb_entry(n, struct vmap_area, rb_node); n 3165 mm/vmalloc.c n = n->rb_right; n 3167 mm/vmalloc.c n = n->rb_left; n 485 mm/vmstat.c long o, n, t, z; n 503 mm/vmstat.c n = delta + o; n 505 mm/vmstat.c if (n > t || n < -t) { n 509 mm/vmstat.c z = n + os; n 510 mm/vmstat.c n = -os; n 512 mm/vmstat.c } while (this_cpu_cmpxchg(*p, o, n) != o); n 542 mm/vmstat.c long o, n, t, z; n 560 mm/vmstat.c n = delta + o; n 562 mm/vmstat.c if (n > t || n < -t) { n 566 mm/vmstat.c z = n + os; n 567 mm/vmstat.c n = -os; n 569 mm/vmstat.c } while (this_cpu_cmpxchg(*p, o, n) != o); n 1205 mm/zswap.c struct zswap_entry *entry, *n; n 1212 mm/zswap.c rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) n 132 net/6lowpan/debugfs.c int status = count, n, i; n 141 net/6lowpan/debugfs.c n = sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", n 144 net/6lowpan/debugfs.c if (n != LOWPAN_DEBUGFS_CTX_PFX_NUM_ARGS) { n 56 net/6lowpan/ndisc.c static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags, n 60 net/6lowpan/ndisc.c struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n)); n 93 net/6lowpan/ndisc.c write_lock_bh(&n->lock); n 99 net/6lowpan/ndisc.c write_unlock_bh(&n->lock); n 103 net/6lowpan/ndisc.c struct neighbour *n, u32 flags, u8 icmp6_type, n 111 net/6lowpan/ndisc.c lowpan_ndisc_802154_update(n, flags, icmp6_type, ndopts); n 118 net/6lowpan/ndisc.c struct lowpan_802154_neigh *n; n 127 net/6lowpan/ndisc.c n = lowpan_802154_neigh(neighbour_priv(neigh)); n 130 net/6lowpan/ndisc.c if (lowpan_802154_is_valid_src_short_addr(n->short_addr)) { n 131 net/6lowpan/ndisc.c memcpy(ha_buf, &n->short_addr, n 193 net/8021q/vlan_netlink.c static inline size_t vlan_qos_map_size(unsigned int n) n 195 net/8021q/vlan_netlink.c if (n == 0) n 199 net/8021q/vlan_netlink.c nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n; n 1610 net/9p/client.c int n = copy_to_iter(dataptr, count, to); n 1611 net/9p/client.c total += n; n 1612 net/9p/client.c offset += n; n 1613 net/9p/client.c if (n != count) { n 277 net/9p/trans_fd.c __poll_t n; n 386 net/9p/trans_fd.c n = EPOLLIN; n 388 net/9p/trans_fd.c n = p9_fd_poll(m->client, NULL, NULL); n 390 net/9p/trans_fd.c if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { n 438 net/9p/trans_fd.c __poll_t n; n 500 net/9p/trans_fd.c n = EPOLLOUT; n 502 net/9p/trans_fd.c n = p9_fd_poll(m->client, NULL, NULL); n 504 net/9p/trans_fd.c if ((n & EPOLLOUT) && n 577 net/9p/trans_fd.c __poll_t n; n 593 net/9p/trans_fd.c n = p9_fd_poll(client, &m->pt, NULL); n 594 net/9p/trans_fd.c if (n & EPOLLIN) { n 599 net/9p/trans_fd.c if (n & EPOLLOUT) { n 613 net/9p/trans_fd.c __poll_t n; n 619 net/9p/trans_fd.c n = p9_fd_poll(m->client, NULL, &err); n 620 net/9p/trans_fd.c if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) { n 621 net/9p/trans_fd.c p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); n 625 net/9p/trans_fd.c if (n & EPOLLIN) { n 634 net/9p/trans_fd.c if (n & EPOLLOUT) { n 658 net/9p/trans_fd.c __poll_t n; n 673 net/9p/trans_fd.c n = EPOLLOUT; n 675 net/9p/trans_fd.c n = p9_fd_poll(m->client, NULL, NULL); n 677 net/9p/trans_fd.c if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) n 318 net/9p/trans_virtio.c int n; n 329 net/9p/trans_virtio.c n = iov_iter_get_pages_alloc(data, pages, count, offs); n 330 net/9p/trans_virtio.c if (n < 0) n 331 net/9p/trans_virtio.c return n; n 333 net/9p/trans_virtio.c nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE); n 335 net/9p/trans_virtio.c return n; n 405 net/9p/trans_virtio.c int n = p9_get_mapped_pages(chan, &out_pages, uodata, n 407 net/9p/trans_virtio.c if (n < 0) { n 408 net/9p/trans_virtio.c err = n; n 411 net/9p/trans_virtio.c out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE); n 412 net/9p/trans_virtio.c if (n != outlen) { n 413 net/9p/trans_virtio.c __le32 v = cpu_to_le32(n); n 415 net/9p/trans_virtio.c outlen = n; n 424 net/9p/trans_virtio.c int n = p9_get_mapped_pages(chan, &in_pages, uidata, n 426 net/9p/trans_virtio.c if (n < 0) { n 427 net/9p/trans_virtio.c err = n; n 430 net/9p/trans_virtio.c in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE); n 431 net/9p/trans_virtio.c if (n != inlen) { n 432 net/9p/trans_virtio.c __le32 v = cpu_to_le32(n); n 434 net/9p/trans_virtio.c inlen = n; n 252 net/appletalk/aarp.c static void __aarp_expire_timer(struct aarp_entry **n) n 256 net/appletalk/aarp.c while (*n) n 258 net/appletalk/aarp.c if (time_after(jiffies, (*n)->expires_at)) { n 259 net/appletalk/aarp.c t = *n; n 260 net/appletalk/aarp.c *n = (*n)->next; n 263 net/appletalk/aarp.c n = &((*n)->next); n 271 net/appletalk/aarp.c static void __aarp_kick(struct aarp_entry **n) n 275 net/appletalk/aarp.c while (*n) n 277 net/appletalk/aarp.c if ((*n)->xmit_count >= sysctl_aarp_retransmit_limit) { n 278 net/appletalk/aarp.c t = *n; n 279 net/appletalk/aarp.c *n = (*n)->next; n 282 net/appletalk/aarp.c __aarp_send_query(*n); n 283 net/appletalk/aarp.c n = &((*n)->next); n 293 net/appletalk/aarp.c static void __aarp_expire_device(struct aarp_entry **n, struct net_device *dev) n 297 net/appletalk/aarp.c while (*n) n 298 net/appletalk/aarp.c if ((*n)->dev == dev) { n 299 net/appletalk/aarp.c t = *n; n 300 net/appletalk/aarp.c *n = (*n)->next; n 303 net/appletalk/aarp.c n = &((*n)->next); n 351 net/appletalk/aarp.c static void __aarp_expire_all(struct aarp_entry **n) n 355 net/appletalk/aarp.c while (*n) { n 356 net/appletalk/aarp.c t = *n; n 357 net/appletalk/aarp.c *n = (*n)->next; n 1114 net/appletalk/ddp.c int n = -EADDRNOTAVAIL; n 1122 net/appletalk/ddp.c n = atalk_pick_and_bind_port(sk, &sat); n 1123 net/appletalk/ddp.c if (!n) n 1126 net/appletalk/ddp.c return n; n 121 net/atm/clip.c static int neigh_check_cb(struct neighbour *n) n 123 net/atm/clip.c struct atmarp_entry *entry = neighbour_priv(n); n 126 net/atm/clip.c if (n->ops != &clip_neigh_ops) n 141 net/atm/clip.c if (refcount_read(&n->refcnt) > 1) { n 145 net/atm/clip.c refcount_read(&n->refcnt)); n 147 net/atm/clip.c while ((skb = skb_dequeue(&n->arp_queue)) != NULL) n 153 net/atm/clip.c pr_debug("expired neigh %p\n", n); n 334 net/atm/clip.c struct neighbour *n; n 353 net/atm/clip.c n = dst_neigh_lookup(dst, daddr); n 354 net/atm/clip.c if (!n) { n 360 net/atm/clip.c entry = neighbour_priv(n); n 365 net/atm/clip.c to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key)); n 377 net/atm/clip.c pr_debug("using neighbour %p, vcc %p\n", n, vcc); n 411 net/atm/clip.c neigh_release(n); n 736 net/atm/clip.c static void atmarp_info(struct seq_file *seq, struct neighbour *n, n 739 net/atm/clip.c struct net_device *dev = n->dev; n 759 net/atm/clip.c off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key); n 826 net/atm/clip.c struct neighbour *n, loff_t * pos) n 830 net/atm/clip.c if (n->dev->type != ARPHRD_ATM) n 833 net/atm/clip.c return clip_seq_vcc_walk(state, neighbour_priv(n), pos); n 853 net/atm/clip.c struct neighbour *n = v; n 855 net/atm/clip.c atmarp_info(seq, n, neighbour_priv(n), vcc); n 320 net/atm/pppoatm.c struct sk_buff *n; n 321 net/atm/pppoatm.c n = skb_realloc_headroom(skb, LLC_LEN); n 322 net/atm/pppoatm.c if (n != NULL && n 323 net/atm/pppoatm.c !pppoatm_may_send(pvcc, n->truesize)) { n 324 net/atm/pppoatm.c kfree_skb(n); n 328 net/atm/pppoatm.c skb = n; n 49 net/ax25/ax25_addr.c int n; n 51 net/ax25/ax25_addr.c for (n = 0, s = buf; n < 6; n++) { n 52 net/ax25/ax25_addr.c c = (a->ax25_call[n] >> 1) & 0x7F; n 59 net/ax25/ax25_addr.c if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { n 61 net/ax25/ax25_addr.c n -= 10; n 64 net/ax25/ax25_addr.c *s++ = n + '0'; n 82 net/ax25/ax25_addr.c int n; n 84 net/ax25/ax25_addr.c for (s = callsign, n = 0; n < 6; n++) { n 86 net/ax25/ax25_addr.c addr->ax25_call[n] = *s++; n 88 net/ax25/ax25_addr.c addr->ax25_call[n] = ' '; n 89 net/ax25/ax25_addr.c addr->ax25_call[n] <<= 1; n 90 net/ax25/ax25_addr.c addr->ax25_call[n] &= 0xFE; n 222 net/ax25/ax25_subr.c int n, t = 2; n 233 net/ax25/ax25_subr.c for (n = 0; n < ax25->n2count; n++) n 15 net/batman-adv/bitarray.c static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n) n 17 net/batman-adv/bitarray.c if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE) n 20 net/batman-adv/bitarray.c bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE); n 44 net/batman-adv/bitarray.h static inline void batadv_set_bit(unsigned long *seq_bits, s32 n) n 47 net/batman-adv/bitarray.h if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE) n 50 net/batman-adv/bitarray.h set_bit(n, seq_bits); /* turn the position on */ n 986 net/bluetooth/6lowpan.c int n; n 988 net/bluetooth/6lowpan.c n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", n 993 net/bluetooth/6lowpan.c if (n < 7) n 196 net/bluetooth/af_bluetooth.c struct bt_sock *s, *n; n 202 net/bluetooth/af_bluetooth.c list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { n 436 net/bluetooth/af_bluetooth.c struct bt_sock *s, *n; n 439 net/bluetooth/af_bluetooth.c list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { n 65 net/bluetooth/amp.c struct amp_ctrl *ctrl, *n; n 70 net/bluetooth/amp.c list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) { n 105 net/bluetooth/bnep/core.c int n; n 110 net/bluetooth/bnep/core.c n = get_unaligned_be16(data); n 114 net/bluetooth/bnep/core.c if (len < n) n 117 net/bluetooth/bnep/core.c BT_DBG("filter len %d", n); n 120 net/bluetooth/bnep/core.c n /= 4; n 121 net/bluetooth/bnep/core.c if (n <= BNEP_MAX_PROTO_FILTERS) { n 125 net/bluetooth/bnep/core.c for (i = 0; i < n; i++) { n 136 net/bluetooth/bnep/core.c if (n == 0) n 151 net/bluetooth/bnep/core.c int n; n 156 net/bluetooth/bnep/core.c n = get_unaligned_be16(data); n 160 net/bluetooth/bnep/core.c if (len < n) n 163 net/bluetooth/bnep/core.c BT_DBG("filter len %d", n); n 166 net/bluetooth/bnep/core.c n /= (ETH_ALEN * 2); n 168 net/bluetooth/bnep/core.c if (n > 0) { n 177 net/bluetooth/bnep/core.c for (; n > 0; n--) { n 689 net/bluetooth/bnep/core.c int err = 0, n = 0; n 703 net/bluetooth/bnep/core.c if (++n >= req->cnum) n 708 net/bluetooth/bnep/core.c req->cnum = n; n 450 net/bluetooth/cmtp/core.c int err = 0, n = 0; n 466 net/bluetooth/cmtp/core.c if (++n >= req->cnum) n 471 net/bluetooth/cmtp/core.c req->cnum = n; n 1491 net/bluetooth/hci_conn.c struct hci_conn *c, *n; n 1495 net/bluetooth/hci_conn.c list_for_each_entry_safe(c, n, &h->list, list) { n 1547 net/bluetooth/hci_conn.c int n = 0, size, err; n 1571 net/bluetooth/hci_conn.c bacpy(&(ci + n)->bdaddr, &c->dst); n 1572 net/bluetooth/hci_conn.c (ci + n)->handle = c->handle; n 1573 net/bluetooth/hci_conn.c (ci + n)->type = c->type; n 1574 net/bluetooth/hci_conn.c (ci + n)->out = c->out; n 1575 net/bluetooth/hci_conn.c (ci + n)->state = c->state; n 1576 net/bluetooth/hci_conn.c (ci + n)->link_mode = get_link_mode(c); n 1577 net/bluetooth/hci_conn.c if (++n >= req.conn_num) n 1583 net/bluetooth/hci_conn.c cl->conn_num = n; n 1584 net/bluetooth/hci_conn.c size = sizeof(req) + n * sizeof(*ci); n 1689 net/bluetooth/hci_conn.c struct hci_chan *chan, *n; n 1693 net/bluetooth/hci_conn.c list_for_each_entry_safe(chan, n, &conn->chan_list, list) n 1073 net/bluetooth/hci_core.c struct inquiry_entry *p, *n; n 1075 net/bluetooth/hci_core.c list_for_each_entry_safe(p, n, &cache->all, all) { n 2050 net/bluetooth/hci_core.c int n = 0, size, err; n 2078 net/bluetooth/hci_core.c (dr + n)->dev_id = hdev->id; n 2079 net/bluetooth/hci_core.c (dr + n)->dev_opt = flags; n 2081 net/bluetooth/hci_core.c if (++n >= dev_num) n 2086 net/bluetooth/hci_core.c dl->dev_num = n; n 2087 net/bluetooth/hci_core.c size = sizeof(*dl) + n * sizeof(*dr); n 2686 net/bluetooth/hci_core.c struct oob_data *data, *n; n 2688 net/bluetooth/hci_core.c list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { n 2801 net/bluetooth/hci_core.c struct adv_info *adv_instance, *n; n 2803 net/bluetooth/hci_core.c list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) n 2810 net/bluetooth/hci_core.c struct adv_info *adv_instance, *n; n 2817 net/bluetooth/hci_core.c list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { n 2924 net/bluetooth/hci_core.c struct bdaddr_list *b, *n; n 2926 net/bluetooth/hci_core.c list_for_each_entry_safe(b, n, bdaddr_list, list) { n 1823 net/bluetooth/hci_request.c struct adv_info *adv_instance, *n, *next_instance = NULL; n 1839 net/bluetooth/hci_request.c list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, n 1420 net/bluetooth/hidp/core.c int err = 0, n = 0; n 1436 net/bluetooth/hidp/core.c if (++n >= req->cnum) n 1441 net/bluetooth/hidp/core.c req->cnum = n; n 6492 net/bluetooth/mgmt.c struct adv_info *adv_instance, *n; n 6501 net/bluetooth/mgmt.c list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { n 696 net/bluetooth/rfcomm/core.c struct rfcomm_session *s, *n; n 698 net/bluetooth/rfcomm/core.c list_for_each_entry_safe(s, n, &session_list, list) { n 711 net/bluetooth/rfcomm/core.c struct rfcomm_dlc *d, *n; n 718 net/bluetooth/rfcomm/core.c list_for_each_entry_safe(d, n, &s->dlcs, list) { n 1771 net/bluetooth/rfcomm/core.c struct rfcomm_dlc *d, *n; n 1775 net/bluetooth/rfcomm/core.c list_for_each_entry_safe(d, n, &s->dlcs, list) { n 1841 net/bluetooth/rfcomm/core.c struct rfcomm_dlc *d, *n; n 1845 net/bluetooth/rfcomm/core.c list_for_each_entry_safe(d, n, &s->dlcs, list) { n 1980 net/bluetooth/rfcomm/core.c struct rfcomm_session *s, *n; n 1984 net/bluetooth/rfcomm/core.c list_for_each_entry_safe(s, n, &session_list, list) { n 2067 net/bluetooth/rfcomm/core.c struct rfcomm_session *s, *n; n 2071 net/bluetooth/rfcomm/core.c list_for_each_entry_safe(s, n, &session_list, list) n 2102 net/bluetooth/rfcomm/core.c struct rfcomm_dlc *d, *n; n 2110 net/bluetooth/rfcomm/core.c list_for_each_entry_safe(d, n, &s->dlcs, list) { n 506 net/bluetooth/rfcomm/tty.c int n = 0, size, err; n 530 net/bluetooth/rfcomm/tty.c (di + n)->id = dev->id; n 531 net/bluetooth/rfcomm/tty.c (di + n)->flags = dev->flags; n 532 net/bluetooth/rfcomm/tty.c (di + n)->state = dev->dlc->state; n 533 net/bluetooth/rfcomm/tty.c (di + n)->channel = dev->channel; n 534 net/bluetooth/rfcomm/tty.c bacpy(&(di + n)->src, &dev->src); n 535 net/bluetooth/rfcomm/tty.c bacpy(&(di + n)->dst, &dev->dst); n 537 net/bluetooth/rfcomm/tty.c if (++n >= dev_num) n 543 net/bluetooth/rfcomm/tty.c dl->dev_num = n; n 544 net/bluetooth/rfcomm/tty.c size = sizeof(*dl) + n * sizeof(*di); n 43 net/bpfilter/bpfilter_kern.c ssize_t n; n 53 net/bpfilter/bpfilter_kern.c n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), n 55 net/bpfilter/bpfilter_kern.c if (n != sizeof(req)) { n 56 net/bpfilter/bpfilter_kern.c pr_err("write fail %zd\n", n); n 62 net/bpfilter/bpfilter_kern.c n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply), n 64 net/bpfilter/bpfilter_kern.c if (n != sizeof(reply)) { n 65 net/bpfilter/bpfilter_kern.c pr_err("read fail %zd\n", n); n 36 net/bpfilter/main.c int n; n 38 net/bpfilter/main.c n = read(0, &req, sizeof(req)); n 39 net/bpfilter/main.c if (n != sizeof(req)) { n 40 net/bpfilter/main.c fprintf(debug_f, "invalid request %d\n", n); n 48 net/bpfilter/main.c n = write(1, &reply, sizeof(reply)); n 49 net/bpfilter/main.c if (n != sizeof(reply)) { n 50 net/bpfilter/main.c fprintf(debug_f, "reply failed %d\n", n); n 125 net/bridge/br_arp_nd_proxy.c struct neighbour *n; n 184 net/bridge/br_arp_nd_proxy.c n = neigh_lookup(&arp_tbl, &tip, vlandev); n 185 net/bridge/br_arp_nd_proxy.c if (n) { n 188 net/bridge/br_arp_nd_proxy.c if (!(n->nud_state & NUD_VALID)) { n 189 net/bridge/br_arp_nd_proxy.c neigh_release(n); n 193 net/bridge/br_arp_nd_proxy.c f = br_fdb_find_rcu(br, n->ha, vid); n 202 net/bridge/br_arp_nd_proxy.c sha, n->ha, sha, 0, 0); n 205 net/bridge/br_arp_nd_proxy.c sha, n->ha, sha, n 219 net/bridge/br_arp_nd_proxy.c neigh_release(n); n 243 net/bridge/br_arp_nd_proxy.c struct sk_buff *request, struct neighbour *n, n 291 net/bridge/br_arp_nd_proxy.c ether_addr_copy(eth_hdr(reply)->h_source, n->ha); n 307 net/bridge/br_arp_nd_proxy.c pip6->saddr = *(struct in6_addr *)n->primary_key; n 317 net/bridge/br_arp_nd_proxy.c na->icmph.icmp6_router = (n->flags & NTF_ROUTER) ? 1 : 0; n 321 net/bridge/br_arp_nd_proxy.c ether_addr_copy(&na->opt[2], n->ha); n 350 net/bridge/br_arp_nd_proxy.c dev->name, &pip6->daddr, daddr, &pip6->saddr, n->ha); n 394 net/bridge/br_arp_nd_proxy.c struct neighbour *n; n 439 net/bridge/br_arp_nd_proxy.c n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, vlandev); n 440 net/bridge/br_arp_nd_proxy.c if (n) { n 443 net/bridge/br_arp_nd_proxy.c if (!(n->nud_state & NUD_VALID)) { n 444 net/bridge/br_arp_nd_proxy.c neigh_release(n); n 448 net/bridge/br_arp_nd_proxy.c f = br_fdb_find_rcu(br, n->ha, vid); n 454 net/bridge/br_arp_nd_proxy.c br_nd_send(br, p, skb, n, n 458 net/bridge/br_arp_nd_proxy.c br_nd_send(br, p, skb, n, 0, 0, msg); n 470 net/bridge/br_arp_nd_proxy.c neigh_release(n); n 370 net/bridge/br_if.c struct net_bridge_port *p, *n; n 372 net/bridge/br_if.c list_for_each_entry_safe(p, n, &br->port_list, list) { n 850 net/bridge/br_multicast.c struct hlist_node *n; n 854 net/bridge/br_multicast.c hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) n 899 net/bridge/br_multicast.c struct hlist_node *n; n 902 net/bridge/br_multicast.c hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) n 921 net/bridge/br_sysfs_br.c int n; n 927 net/bridge/br_sysfs_br.c n = br_fdb_fillbuf(br, buf, n 931 net/bridge/br_sysfs_br.c if (n > 0) n 932 net/bridge/br_sysfs_br.c n *= sizeof(struct __fdb_entry); n 934 net/bridge/br_sysfs_br.c return n; n 38 net/bridge/netfilter/ebtables.c #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter))) n 39 net/bridge/netfilter/ebtables.c #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ n 40 net/bridge/netfilter/ebtables.c COUNTER_OFFSET(n) * cpu)) n 257 net/bridge/netfilter/ebtables.c i = cs[sp].n; n 274 net/bridge/netfilter/ebtables.c cs[sp].n = i + 1; n 496 net/bridge/netfilter/ebtables.c unsigned int *n, unsigned int *cnt, n 512 net/bridge/netfilter/ebtables.c if (*n != *cnt) n 526 net/bridge/netfilter/ebtables.c *n = ((struct ebt_entries *)e)->nentries; n 556 net/bridge/netfilter/ebtables.c unsigned int *n, struct ebt_cl_stack *udc) n 571 net/bridge/netfilter/ebtables.c udc[*n].cs.chaininfo = (struct ebt_entries *)e; n 573 net/bridge/netfilter/ebtables.c udc[*n].cs.n = 0; n 574 net/bridge/netfilter/ebtables.c udc[*n].hookmask = 0; n 576 net/bridge/netfilter/ebtables.c (*n)++; n 778 net/bridge/netfilter/ebtables.c pos = cl_s[chain_nr].cs.n; n 780 net/bridge/netfilter/ebtables.c cl_s[chain_nr].cs.n = 0; n 803 net/bridge/netfilter/ebtables.c if (cl_s[i].cs.n) n 809 net/bridge/netfilter/ebtables.c cl_s[i].cs.n = pos + 1; n 59 net/caif/chnl_net.c struct list_head *n; n 61 net/caif/chnl_net.c list_for_each_safe(list_node, n, &chnl_net_list) { n 747 net/can/bcm.c struct bcm_op *op, *n; n 749 net/can/bcm.c list_for_each_entry_safe(op, n, ops, list) { n 795 net/can/bcm.c struct bcm_op *op, *n; n 797 net/can/bcm.c list_for_each_entry_safe(op, n, ops, list) { n 169 net/ceph/crush/mapper.c static int height(int n) n 172 net/ceph/crush/mapper.c while ((n & 1) == 0) { n 174 net/ceph/crush/mapper.c n = n >> 1; n 199 net/ceph/crush/mapper.c int n; n 204 net/ceph/crush/mapper.c n = bucket->num_nodes >> 1; n 206 net/ceph/crush/mapper.c while (!terminal(n)) { n 209 net/ceph/crush/mapper.c w = bucket->node_weights[n]; n 210 net/ceph/crush/mapper.c t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r, n 215 net/ceph/crush/mapper.c l = left(n); n 217 net/ceph/crush/mapper.c n = l; n 219 net/ceph/crush/mapper.c n = right(n); n 222 net/ceph/crush/mapper.c return bucket->h.items[n >> 1]; n 60 net/ceph/debugfs.c struct rb_node *n; n 69 net/ceph/debugfs.c for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { n 71 net/ceph/debugfs.c rb_entry(n, struct ceph_pg_pool_info, node); n 90 net/ceph/debugfs.c for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { n 92 net/ceph/debugfs.c rb_entry(n, struct ceph_pg_mapping, node); n 101 net/ceph/debugfs.c for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { n 103 net/ceph/debugfs.c rb_entry(n, struct ceph_pg_mapping, node); n 108 net/ceph/debugfs.c for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) { n 110 net/ceph/debugfs.c rb_entry(n, struct ceph_pg_mapping, node); n 119 net/ceph/debugfs.c for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) { n 121 net/ceph/debugfs.c rb_entry(n, struct ceph_pg_mapping, node); n 231 net/ceph/debugfs.c struct rb_node *n; n 234 net/ceph/debugfs.c for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { n 236 net/ceph/debugfs.c rb_entry(n, struct ceph_osd_request, r_node); n 257 net/ceph/debugfs.c struct rb_node *n; n 260 net/ceph/debugfs.c for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { n 262 net/ceph/debugfs.c rb_entry(n, struct ceph_osd_linger_request, node); n 318 net/ceph/debugfs.c struct rb_node *n; n 321 net/ceph/debugfs.c for (n = rb_first(&osd->o_backoffs_by_id); n; n = rb_next(n)) { n 323 net/ceph/debugfs.c rb_entry(n, struct ceph_osd_backoff, id_node); n 341 net/ceph/debugfs.c struct rb_node *n; n 347 net/ceph/debugfs.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { n 348 net/ceph/debugfs.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 355 net/ceph/debugfs.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { n 356 net/ceph/debugfs.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 363 net/ceph/debugfs.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { n 364 net/ceph/debugfs.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 149 net/ceph/mon_client.c int n; n 158 net/ceph/mon_client.c n = prandom_u32() % max; n 159 net/ceph/mon_client.c if (o >= 0 && n >= o) n 160 net/ceph/mon_client.c n++; n 162 net/ceph/mon_client.c monc->cur_mon = n; n 1156 net/ceph/osd_client.c struct rb_node *n, *p; n 1158 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { n 1159 net/ceph/osd_client.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 1309 net/ceph/osd_client.c struct rb_node *n; n 1316 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; ) { n 1318 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_request, r_node); n 1320 net/ceph/osd_client.c n = rb_next(n); /* unlink_request() */ n 1326 net/ceph/osd_client.c for (n = rb_first(&osd->o_linger_requests); n; ) { n 1328 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_linger_request, node); n 1330 net/ceph/osd_client.c n = rb_next(n); /* unlink_linger() */ n 1362 net/ceph/osd_client.c struct rb_node *n; n 1367 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { n 1369 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_request, r_node); n 1457 net/ceph/osd_client.c struct rb_node *n; n 1459 net/ceph/osd_client.c for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { n 1461 net/ceph/osd_client.c rb_entry(n, struct ceph_pg_pool_info, node); n 1841 net/ceph/osd_client.c struct rb_node *n = root->rb_node; n 1843 net/ceph/osd_client.c while (n) { n 1845 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_backoff, spg_node); n 1850 net/ceph/osd_client.c n = n->rb_left; n 1855 net/ceph/osd_client.c n = n->rb_right; n 3278 net/ceph/osd_client.c struct rb_node *n, *p; n 3288 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { n 3289 net/ceph/osd_client.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 3706 net/ceph/osd_client.c struct rb_node *n; n 3708 net/ceph/osd_client.c for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { n 3710 net/ceph/osd_client.c rb_entry(n, struct ceph_pg_pool_info, node); n 3758 net/ceph/osd_client.c struct rb_node *n; n 3761 net/ceph/osd_client.c for (n = rb_first(&osd->o_linger_requests); n; ) { n 3763 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_linger_request, node); n 3766 net/ceph/osd_client.c n = rb_next(n); /* recalc_linger_target() */ n 3797 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; ) { n 3799 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_request, r_node); n 3802 net/ceph/osd_client.c n = rb_next(n); /* unlink_request(), check_pool_dne() */ n 3835 net/ceph/osd_client.c struct rb_node *n; n 3855 net/ceph/osd_client.c for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { n 3857 net/ceph/osd_client.c rb_entry(n, struct ceph_pg_pool_info, node); n 3881 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; ) { n 3882 net/ceph/osd_client.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 3884 net/ceph/osd_client.c n = rb_next(n); /* close_osd() */ n 3904 net/ceph/osd_client.c struct rb_node *n; n 3907 net/ceph/osd_client.c for (n = rb_first(need_resend); n; ) { n 3909 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_request, r_node); n 3911 net/ceph/osd_client.c n = rb_next(n); n 3922 net/ceph/osd_client.c for (n = rb_first(need_resend); n; ) { n 3924 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_request, r_node); n 3927 net/ceph/osd_client.c n = rb_next(n); n 4070 net/ceph/osd_client.c struct rb_node *n; n 4074 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; ) { n 4076 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_request, r_node); n 4078 net/ceph/osd_client.c n = rb_next(n); /* cancel_linger_request() */ n 4087 net/ceph/osd_client.c for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { n 4089 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_linger_request, node); n 4280 net/ceph/osd_client.c struct rb_node *n; n 4313 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { n 4315 net/ceph/osd_client.c rb_entry(n, struct ceph_osd_request, r_node); n 4539 net/ceph/osd_client.c struct rb_node *n, *p; n 4544 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { n 4545 net/ceph/osd_client.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 5117 net/ceph/osd_client.c struct rb_node *n; n 5120 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; ) { n 5121 net/ceph/osd_client.c struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); n 5123 net/ceph/osd_client.c n = rb_next(n); n 664 net/ceph/osdmap.c struct rb_node *n = root->rb_node; n 666 net/ceph/osdmap.c while (n) { n 667 net/ceph/osdmap.c pi = rb_entry(n, struct ceph_pg_pool_info, node); n 669 net/ceph/osdmap.c n = n->rb_left; n 671 net/ceph/osdmap.c n = n->rb_right; n 1147 net/ceph/osdmap.c u32 n; n 1149 net/ceph/osdmap.c ceph_decode_32_safe(p, end, n, e_inval); n 1150 net/ceph/osdmap.c while (n--) { n 1198 net/ceph/osdmap.c u32 n; n 1202 net/ceph/osdmap.c ceph_decode_32_safe(p, end, n, e_inval); n 1203 net/ceph/osdmap.c while (n--) { n 1376 net/ceph/osdmap.c u32 n; n 1378 net/ceph/osdmap.c ceph_decode_32_safe(p, end, n, e_inval); n 1379 net/ceph/osdmap.c while (n--) { n 2081 net/ceph/osdmap.c int n; n 2087 net/ceph/osdmap.c for (n = 1; ; n++) { n 2088 net/ceph/osdmap.c int next_bit = n << (old_bits - 1); n 520 net/core/bpf_sk_storage.c struct hlist_node *n; n 539 net/core/bpf_sk_storage.c hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) { n 422 net/core/datagram.c int i, copy = start - offset, start_off = offset, n; n 429 net/core/datagram.c n = INDIRECT_CALL_1(cb, simple_copy_to_iter, n 431 net/core/datagram.c offset += n; n 432 net/core/datagram.c if (n != copy) n 452 net/core/datagram.c n = INDIRECT_CALL_1(cb, simple_copy_to_iter, n 456 net/core/datagram.c offset += n; n 457 net/core/datagram.c if (n != copy) n 633 net/core/datagram.c int n = 0; n 658 net/core/datagram.c skb_fill_page_desc(skb, frag++, pages[n], start, size); n 661 net/core/datagram.c n++; n 5908 net/core/dev.c void __napi_schedule(struct napi_struct *n) n 5913 net/core/dev.c ____napi_schedule(this_cpu_ptr(&softnet_data), n); n 5927 net/core/dev.c bool napi_schedule_prep(struct napi_struct *n) n 5932 net/core/dev.c val = READ_ONCE(n->state); n 5945 net/core/dev.c } while (cmpxchg(&n->state, val, new) != val); n 5957 net/core/dev.c void __napi_schedule_irqoff(struct napi_struct *n) n 5959 net/core/dev.c ____napi_schedule(this_cpu_ptr(&softnet_data), n); n 5963 net/core/dev.c bool napi_complete_done(struct napi_struct *n, int work_done) n 5973 net/core/dev.c if (unlikely(n->state & (NAPIF_STATE_NPSVC | n 5977 net/core/dev.c if (n->gro_bitmask) { n 5981 net/core/dev.c timeout = n->dev->gro_flush_timeout; n 5987 net/core/dev.c napi_gro_flush(n, !!timeout); n 5989 net/core/dev.c hrtimer_start(&n->timer, ns_to_ktime(timeout), n 5993 net/core/dev.c gro_normal_list(n); n 5995 net/core/dev.c if (unlikely(!list_empty(&n->poll_list))) { n 5998 net/core/dev.c list_del_init(&n->poll_list); n 6003 net/core/dev.c val = READ_ONCE(n->state); n 6015 net/core/dev.c } while (cmpxchg(&n->state, val, new) != val); n 6018 net/core/dev.c __napi_schedule(n); n 6245 net/core/dev.c void napi_disable(struct napi_struct *n) n 6248 net/core/dev.c set_bit(NAPI_STATE_DISABLE, &n->state); n 6250 net/core/dev.c while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) n 6252 net/core/dev.c while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) n 6255 net/core/dev.c hrtimer_cancel(&n->timer); n 6257 net/core/dev.c clear_bit(NAPI_STATE_DISABLE, &n->state); n 6266 net/core/dev.c struct sk_buff *skb, *n; n 6268 net/core/dev.c list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) n 6288 net/core/dev.c static int napi_poll(struct napi_struct *n, struct list_head *repoll) n 6293 net/core/dev.c list_del_init(&n->poll_list); n 6295 net/core/dev.c have = netpoll_poll_lock(n); n 6297 net/core/dev.c weight = n->weight; n 6306 net/core/dev.c if (test_bit(NAPI_STATE_SCHED, &n->state)) { n 6307 net/core/dev.c work = n->poll(n, weight); n 6308 net/core/dev.c trace_napi_poll(n, work, weight); n 6321 net/core/dev.c if (unlikely(napi_disable_pending(n))) { n 6322 net/core/dev.c napi_complete(n); n 6326 net/core/dev.c if (n->gro_bitmask) { n 6330 net/core/dev.c napi_gro_flush(n, HZ >= 1000); n 6333 net/core/dev.c gro_normal_list(n); n 6338 net/core/dev.c if (unlikely(!list_empty(&n->poll_list))) { n 6340 net/core/dev.c n->dev ? n->dev->name : "backlog"); n 6344 net/core/dev.c list_add_tail(&n->poll_list, repoll); n 6366 net/core/dev.c struct napi_struct *n; n 6374 net/core/dev.c n = list_first_entry(&list, struct napi_struct, poll_list); n 6375 net/core/dev.c budget -= napi_poll(n, &repoll); n 9376 net/core/dev.c size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); n 9380 net/core/dev.c BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); n 9381 net/core/dev.c for (i = 0; i < n; i++) n 9384 net/core/dev.c memset((char *)stats64 + n * sizeof(u64), 0, n 9385 net/core/dev.c sizeof(*stats64) - n * sizeof(u64)); n 9586 net/core/dev.c struct napi_struct *p, *n; n 9597 net/core/dev.c list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) n 6575 net/core/devlink.c int n = 0; n 6583 net/core/devlink.c n = snprintf(name, len, "p%u", attrs->phys.port_number); n 6585 net/core/devlink.c n = snprintf(name, len, "p%us%u", n 6597 net/core/devlink.c n = snprintf(name, len, "pf%u", attrs->pci_pf.pf); n 6600 net/core/devlink.c n = snprintf(name, len, "pf%uvf%u", n 6605 net/core/devlink.c if (n >= len) n 1861 net/core/ethtool.c int n = rc * 2, i, interval = HZ / n; n 1866 net/core/ethtool.c i = n; n 901 net/core/fib_rules.c struct fib_rule *n; n 903 net/core/fib_rules.c n = list_next_entry(rule, list); n 904 net/core/fib_rules.c if (&n->list == &ops->rules_list || n->pref != rule->pref) n 905 net/core/fib_rules.c n = NULL; n 909 net/core/fib_rules.c rcu_assign_pointer(r->ctarget, n); n 910 net/core/fib_rules.c if (!n) n 55 net/core/neighbour.c static void __neigh_notify(struct neighbour *n, int type, int flags, n 119 net/core/neighbour.c static void neigh_mark_dead(struct neighbour *n) n 121 net/core/neighbour.c n->dead = 1; n 122 net/core/neighbour.c if (!list_empty(&n->gc_list)) { n 123 net/core/neighbour.c list_del_init(&n->gc_list); n 124 net/core/neighbour.c atomic_dec(&n->tbl->gc_entries); n 128 net/core/neighbour.c static void neigh_update_gc_list(struct neighbour *n) n 132 net/core/neighbour.c write_lock_bh(&n->tbl->lock); n 133 net/core/neighbour.c write_lock(&n->lock); n 138 net/core/neighbour.c exempt_from_gc = n->nud_state & NUD_PERMANENT || n 139 net/core/neighbour.c n->flags & NTF_EXT_LEARNED; n 140 net/core/neighbour.c on_gc_list = !list_empty(&n->gc_list); n 143 net/core/neighbour.c list_del_init(&n->gc_list); n 144 net/core/neighbour.c atomic_dec(&n->tbl->gc_entries); n 147 net/core/neighbour.c list_add_tail(&n->gc_list, &n->tbl->gc_list); n 148 net/core/neighbour.c atomic_inc(&n->tbl->gc_entries); n 151 net/core/neighbour.c write_unlock(&n->lock); n 152 net/core/neighbour.c write_unlock_bh(&n->tbl->lock); n 177 net/core/neighbour.c static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, n 182 net/core/neighbour.c write_lock(&n->lock); n 183 net/core/neighbour.c if (refcount_read(&n->refcnt) == 1) { n 186 net/core/neighbour.c neigh = rcu_dereference_protected(n->next, n 189 net/core/neighbour.c neigh_mark_dead(n); n 192 net/core/neighbour.c write_unlock(&n->lock); n 194 net/core/neighbour.c neigh_cleanup_and_release(n); n 203 net/core/neighbour.c struct neighbour *n; n 212 net/core/neighbour.c while ((n = rcu_dereference_protected(*np, n 214 net/core/neighbour.c if (n == ndel) n 215 net/core/neighbour.c return neigh_del(n, np, tbl); n 216 net/core/neighbour.c np = &n->next; n 225 net/core/neighbour.c struct neighbour *n, *tmp; n 232 net/core/neighbour.c list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { n 233 net/core/neighbour.c if (refcount_read(&n->refcnt) == 1) { n 236 net/core/neighbour.c write_lock(&n->lock); n 237 net/core/neighbour.c if ((n->nud_state == NUD_FAILED) || n 238 net/core/neighbour.c time_after(tref, n->updated)) n 240 net/core/neighbour.c write_unlock(&n->lock); n 242 net/core/neighbour.c if (remove && neigh_remove_one(n, tbl)) n 256 net/core/neighbour.c static void neigh_add_timer(struct neighbour *n, unsigned long when) n 258 net/core/neighbour.c neigh_hold(n); n 259 net/core/neighbour.c if (unlikely(mod_timer(&n->timer, when))) { n 261 net/core/neighbour.c n->nud_state); n 266 net/core/neighbour.c static int neigh_del_timer(struct neighbour *n) n 268 net/core/neighbour.c if ((n->nud_state & NUD_IN_TIMER) && n 269 net/core/neighbour.c del_timer(&n->timer)) { n 270 net/core/neighbour.c neigh_release(n); n 296 net/core/neighbour.c struct neighbour *n; n 299 net/core/neighbour.c while ((n = rcu_dereference_protected(*np, n 301 net/core/neighbour.c if (dev && n->dev != dev) { n 302 net/core/neighbour.c np = &n->next; n 305 net/core/neighbour.c if (skip_perm && n->nud_state & NUD_PERMANENT) { n 306 net/core/neighbour.c np = &n->next; n 310 net/core/neighbour.c rcu_dereference_protected(n->next, n 312 net/core/neighbour.c write_lock(&n->lock); n 313 net/core/neighbour.c neigh_del_timer(n); n 314 net/core/neighbour.c neigh_mark_dead(n); n 315 net/core/neighbour.c if (refcount_read(&n->refcnt) != 1) { n 325 net/core/neighbour.c __skb_queue_purge(&n->arp_queue); n 326 net/core/neighbour.c n->arp_queue_len_bytes = 0; n 327 net/core/neighbour.c n->output = neigh_blackhole; n 328 net/core/neighbour.c if (n->nud_state & NUD_VALID) n 329 net/core/neighbour.c n->nud_state = NUD_NOARP; n 331 net/core/neighbour.c n->nud_state = NUD_NONE; n 332 net/core/neighbour.c neigh_dbg(2, "neigh %p is stray\n", n); n 334 net/core/neighbour.c write_unlock(&n->lock); n 335 net/core/neighbour.c neigh_cleanup_and_release(n); n 378 net/core/neighbour.c struct neighbour *n = NULL; n 399 net/core/neighbour.c n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); n 400 net/core/neighbour.c if (!n) n 403 net/core/neighbour.c __skb_queue_head_init(&n->arp_queue); n 404 net/core/neighbour.c rwlock_init(&n->lock); n 405 net/core/neighbour.c seqlock_init(&n->ha_lock); n 406 net/core/neighbour.c n->updated = n->used = now; n 407 net/core/neighbour.c n->nud_state = NUD_NONE; n 408 net/core/neighbour.c n->output = neigh_blackhole; n 409 net/core/neighbour.c seqlock_init(&n->hh.hh_lock); n 410 net/core/neighbour.c n->parms = neigh_parms_clone(&tbl->parms); n 411 net/core/neighbour.c timer_setup(&n->timer, neigh_timer_handler, 0); n 414 net/core/neighbour.c n->tbl = tbl; n 415 net/core/neighbour.c refcount_set(&n->refcnt, 1); n 416 net/core/neighbour.c n->dead = 1; n 417 net/core/neighbour.c INIT_LIST_HEAD(&n->gc_list); n 421 net/core/neighbour.c return n; n 495 net/core/neighbour.c struct neighbour *n, *next; n 497 net/core/neighbour.c for (n = rcu_dereference_protected(old_nht->hash_buckets[i], n 499 net/core/neighbour.c n != NULL; n 500 net/core/neighbour.c n = next) { n 501 net/core/neighbour.c hash = tbl->hash(n->primary_key, n->dev, n 505 net/core/neighbour.c next = rcu_dereference_protected(n->next, n 508 net/core/neighbour.c rcu_assign_pointer(n->next, n 512 net/core/neighbour.c rcu_assign_pointer(new_nht->hash_buckets[hash], n); n 524 net/core/neighbour.c struct neighbour *n; n 529 net/core/neighbour.c n = __neigh_lookup_noref(tbl, pkey, dev); n 530 net/core/neighbour.c if (n) { n 531 net/core/neighbour.c if (!refcount_inc_not_zero(&n->refcnt)) n 532 net/core/neighbour.c n = NULL; n 537 net/core/neighbour.c return n; n 544 net/core/neighbour.c struct neighbour *n; n 555 net/core/neighbour.c for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); n 556 net/core/neighbour.c n != NULL; n 557 net/core/neighbour.c n = rcu_dereference_bh(n->next)) { n 558 net/core/neighbour.c if (!memcmp(n->primary_key, pkey, key_len) && n 559 net/core/neighbour.c net_eq(dev_net(n->dev), net)) { n 560 net/core/neighbour.c if (!refcount_inc_not_zero(&n->refcnt)) n 561 net/core/neighbour.c n = NULL; n 568 net/core/neighbour.c return n; n 577 net/core/neighbour.c struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc); n 583 net/core/neighbour.c trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); n 585 net/core/neighbour.c if (!n) { n 590 net/core/neighbour.c memcpy(n->primary_key, pkey, key_len); n 591 net/core/neighbour.c n->dev = dev; n 595 net/core/neighbour.c if (tbl->constructor && (error = tbl->constructor(n)) < 0) { n 601 net/core/neighbour.c error = dev->netdev_ops->ndo_neigh_construct(dev, n); n 609 net/core/neighbour.c if (n->parms->neigh_setup && n 610 net/core/neighbour.c (error = n->parms->neigh_setup(n)) < 0) { n 615 net/core/neighbour.c n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); n 624 net/core/neighbour.c hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); n 626 net/core/neighbour.c if (n->parms->dead) { n 636 net/core/neighbour.c if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { n 644 net/core/neighbour.c n->dead = 0; n 646 net/core/neighbour.c list_add_tail(&n->gc_list, &n->tbl->gc_list); n 649 net/core/neighbour.c neigh_hold(n); n 650 net/core/neighbour.c rcu_assign_pointer(n->next, n 653 net/core/neighbour.c rcu_assign_pointer(nht->hash_buckets[hash_val], n); n 655 net/core/neighbour.c neigh_dbg(2, "neigh %p is created\n", n); n 656 net/core/neighbour.c rc = n; n 664 net/core/neighbour.c neigh_release(n); n 685 net/core/neighbour.c static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, n 691 net/core/neighbour.c while (n) { n 692 net/core/neighbour.c if (!memcmp(n->key, pkey, key_len) && n 693 net/core/neighbour.c net_eq(pneigh_net(n), net) && n 694 net/core/neighbour.c (n->dev == dev || !n->dev)) n 695 net/core/neighbour.c return n; n 696 net/core/neighbour.c n = n->next; n 716 net/core/neighbour.c struct pneigh_entry *n; n 721 net/core/neighbour.c n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], n 725 net/core/neighbour.c if (n || !creat) n 730 net/core/neighbour.c n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); n 731 net/core/neighbour.c if (!n) n 734 net/core/neighbour.c n->protocol = 0; n 735 net/core/neighbour.c write_pnet(&n->net, net); n 736 net/core/neighbour.c memcpy(n->key, pkey, key_len); n 737 net/core/neighbour.c n->dev = dev; n 741 net/core/neighbour.c if (tbl->pconstructor && tbl->pconstructor(n)) { n 744 net/core/neighbour.c kfree(n); n 745 net/core/neighbour.c n = NULL; n 750 net/core/neighbour.c n->next = tbl->phash_buckets[hash_val]; n 751 net/core/neighbour.c tbl->phash_buckets[hash_val] = n; n 754 net/core/neighbour.c return n; n 762 net/core/neighbour.c struct pneigh_entry *n, **np; n 767 net/core/neighbour.c for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; n 768 net/core/neighbour.c np = &n->next) { n 769 net/core/neighbour.c if (!memcmp(n->key, pkey, key_len) && n->dev == dev && n 770 net/core/neighbour.c net_eq(pneigh_net(n), net)) { n 771 net/core/neighbour.c *np = n->next; n 774 net/core/neighbour.c tbl->pdestructor(n); n 775 net/core/neighbour.c if (n->dev) n 776 net/core/neighbour.c dev_put(n->dev); n 777 net/core/neighbour.c kfree(n); n 788 net/core/neighbour.c struct pneigh_entry *n, **np, *freelist = NULL; n 793 net/core/neighbour.c while ((n = *np) != NULL) { n 794 net/core/neighbour.c if (!dev || n->dev == dev) { n 795 net/core/neighbour.c *np = n->next; n 796 net/core/neighbour.c n->next = freelist; n 797 net/core/neighbour.c freelist = n; n 800 net/core/neighbour.c np = &n->next; n 804 net/core/neighbour.c while ((n = freelist)) { n 805 net/core/neighbour.c freelist = n->next; n 806 net/core/neighbour.c n->next = NULL; n 808 net/core/neighbour.c tbl->pdestructor(n); n 809 net/core/neighbour.c if (n->dev) n 810 net/core/neighbour.c dev_put(n->dev); n 811 net/core/neighbour.c kfree(n); n 888 net/core/neighbour.c struct neighbour *n; n 917 net/core/neighbour.c while ((n = rcu_dereference_protected(*np, n 921 net/core/neighbour.c write_lock(&n->lock); n 923 net/core/neighbour.c state = n->nud_state; n 925 net/core/neighbour.c (n->flags & NTF_EXT_LEARNED)) { n 926 net/core/neighbour.c write_unlock(&n->lock); n 930 net/core/neighbour.c if (time_before(n->used, n->confirmed)) n 931 net/core/neighbour.c n->used = n->confirmed; n 933 net/core/neighbour.c if (refcount_read(&n->refcnt) == 1 && n 935 net/core/neighbour.c time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { n 936 net/core/neighbour.c *np = n->next; n 937 net/core/neighbour.c neigh_mark_dead(n); n 938 net/core/neighbour.c write_unlock(&n->lock); n 939 net/core/neighbour.c neigh_cleanup_and_release(n); n 942 net/core/neighbour.c write_unlock(&n->lock); n 945 net/core/neighbour.c np = &n->next; n 967 net/core/neighbour.c static __inline__ int neigh_max_probes(struct neighbour *n) n 969 net/core/neighbour.c struct neigh_parms *p = n->parms; n 971 net/core/neighbour.c (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : n 1448 net/core/neighbour.c static void neigh_hh_init(struct neighbour *n) n 1450 net/core/neighbour.c struct net_device *dev = n->dev; n 1451 net/core/neighbour.c __be16 prot = n->tbl->protocol; n 1452 net/core/neighbour.c struct hh_cache *hh = &n->hh; n 1454 net/core/neighbour.c write_lock_bh(&n->lock); n 1460 net/core/neighbour.c dev->header_ops->cache(n, hh, prot); n 1462 net/core/neighbour.c write_unlock_bh(&n->lock); n 1536 net/core/neighbour.c struct sk_buff *skb, *n; n 1540 net/core/neighbour.c skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { n 2550 net/core/neighbour.c struct neighbour *n; n 2565 net/core/neighbour.c for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; n 2566 net/core/neighbour.c n != NULL; n 2567 net/core/neighbour.c n = rcu_dereference_bh(n->next)) { n 2568 net/core/neighbour.c if (idx < s_idx || !net_eq(dev_net(n->dev), net)) n 2570 net/core/neighbour.c if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || n 2571 net/core/neighbour.c neigh_master_filtered(n->dev, filter->master_idx)) n 2573 net/core/neighbour.c if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, n 2596 net/core/neighbour.c struct pneigh_entry *n; n 2610 net/core/neighbour.c for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { n 2611 net/core/neighbour.c if (idx < s_idx || pneigh_net(n) != net) n 2613 net/core/neighbour.c if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || n 2614 net/core/neighbour.c neigh_master_filtered(n->dev, filter->master_idx)) n 2616 net/core/neighbour.c if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, n 2934 net/core/neighbour.c struct neighbour *n; n 2936 net/core/neighbour.c for (n = rcu_dereference_bh(nht->hash_buckets[chain]); n 2937 net/core/neighbour.c n != NULL; n 2938 net/core/neighbour.c n = rcu_dereference_bh(n->next)) n 2939 net/core/neighbour.c cb(n, cookie); n 2956 net/core/neighbour.c struct neighbour *n; n 2960 net/core/neighbour.c while ((n = rcu_dereference_protected(*np, n 2964 net/core/neighbour.c write_lock(&n->lock); n 2965 net/core/neighbour.c release = cb(n); n 2968 net/core/neighbour.c rcu_dereference_protected(n->next, n 2970 net/core/neighbour.c neigh_mark_dead(n); n 2972 net/core/neighbour.c np = &n->next; n 2973 net/core/neighbour.c write_unlock(&n->lock); n 2975 net/core/neighbour.c neigh_cleanup_and_release(n); n 3032 net/core/neighbour.c struct neighbour *n = NULL; n 3037 net/core/neighbour.c n = rcu_dereference_bh(nht->hash_buckets[bucket]); n 3039 net/core/neighbour.c while (n) { n 3040 net/core/neighbour.c if (!net_eq(dev_net(n->dev), net)) n 3046 net/core/neighbour.c v = state->neigh_sub_iter(state, n, &fakep); n 3052 net/core/neighbour.c if (n->nud_state & ~NUD_NOARP) n 3055 net/core/neighbour.c n = rcu_dereference_bh(n->next); n 3058 net/core/neighbour.c if (n) n 3063 net/core/neighbour.c return n; n 3067 net/core/neighbour.c struct neighbour *n, n 3075 net/core/neighbour.c void *v = state->neigh_sub_iter(state, n, pos); n 3077 net/core/neighbour.c return n; n 3079 net/core/neighbour.c n = rcu_dereference_bh(n->next); n 3082 net/core/neighbour.c while (n) { n 3083 net/core/neighbour.c if (!net_eq(dev_net(n->dev), net)) n 3086 net/core/neighbour.c void *v = state->neigh_sub_iter(state, n, pos); n 3088 net/core/neighbour.c return n; n 3094 net/core/neighbour.c if (n->nud_state & ~NUD_NOARP) n 3097 net/core/neighbour.c n = rcu_dereference_bh(n->next); n 3100 net/core/neighbour.c if (n) n 3106 net/core/neighbour.c n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); n 3109 net/core/neighbour.c if (n && pos) n 3111 net/core/neighbour.c return n; n 3116 net/core/neighbour.c struct neighbour *n = neigh_get_first(seq); n 3118 net/core/neighbour.c if (n) { n 3121 net/core/neighbour.c n = neigh_get_next(seq, n, pos); n 3122 net/core/neighbour.c if (!n) n 3126 net/core/neighbour.c return *pos ? NULL : n; n 3344 net/core/neighbour.c static void __neigh_notify(struct neighbour *n, int type, int flags, n 3347 net/core/neighbour.c struct net *net = dev_net(n->dev); n 3355 net/core/neighbour.c err = neigh_fill_info(skb, n, pid, 0, type, flags); n 3369 net/core/neighbour.c void neigh_app_ns(struct neighbour *n) n 3371 net/core/neighbour.c __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); n 69 net/core/netclassid_cgroup.c static int update_classid_sock(const void *v, struct file *file, unsigned n) n 82 net/core/netclassid_cgroup.c return n + 1; n 221 net/core/netprio_cgroup.c static int update_netprio(const void *v, struct file *file, unsigned n) n 800 net/core/pktgen.c unsigned int n = 0; n 811 net/core/pktgen.c pkt_dev->labels[n] = htonl(tmp); n 812 net/core/pktgen.c if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) n 818 net/core/pktgen.c n++; n 819 net/core/pktgen.c if (n >= MAX_MPLS_LABELS) n 823 net/core/pktgen.c pkt_dev->nr_labels = n; n 1521 net/core/pktgen.c unsigned int n, cnt; n 1528 net/core/pktgen.c for (n = 0; n < pkt_dev->nr_labels; n++) n 1530 net/core/pktgen.c "%08x%s", ntohl(pkt_dev->labels[n]), n 1531 net/core/pktgen.c n == pkt_dev->nr_labels-1 ? "" : ","); n 3231 net/core/pktgen.c struct list_head *q, *n; n 3236 net/core/pktgen.c list_for_each_safe(q, n, &t->if_list) { n 3253 net/core/pktgen.c struct list_head *q, *n; n 3260 net/core/pktgen.c list_for_each_safe(q, n, &t->if_list) { n 3736 net/core/pktgen.c struct list_head *q, *n; n 3740 net/core/pktgen.c list_for_each_safe(q, n, &t->if_list) { n 3832 net/core/pktgen.c struct list_head *q, *n; n 3842 net/core/pktgen.c list_for_each_safe(q, n, &list) { n 981 net/core/skbuff.c static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) n 983 net/core/skbuff.c #define C(x) n->x = skb->x n 985 net/core/skbuff.c n->next = n->prev = NULL; n 986 net/core/skbuff.c n->sk = NULL; n 987 net/core/skbuff.c __copy_skb_header(n, skb); n 992 net/core/skbuff.c n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; n 993 net/core/skbuff.c n->cloned = 1; n 994 net/core/skbuff.c n->nohdr = 0; n 995 net/core/skbuff.c n->peeked = 0; n 997 net/core/skbuff.c n->destructor = NULL; n 1004 net/core/skbuff.c refcount_set(&n->users, 1); n 1009 net/core/skbuff.c return n; n 1019 net/core/skbuff.c struct sk_buff *n; n 1021 net/core/skbuff.c n = alloc_skb(0, GFP_ATOMIC); n 1022 net/core/skbuff.c if (!n) n 1025 net/core/skbuff.c n->len = first->len; n 1026 net/core/skbuff.c n->data_len = first->len; n 1027 net/core/skbuff.c n->truesize = first->truesize; n 1029 net/core/skbuff.c skb_shinfo(n)->frag_list = first; n 1031 net/core/skbuff.c __copy_skb_header(n, first); n 1032 net/core/skbuff.c n->destructor = NULL; n 1034 net/core/skbuff.c return n; n 1435 net/core/skbuff.c struct sk_buff *n; n 1442 net/core/skbuff.c n = &fclones->skb2; n 1448 net/core/skbuff.c n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); n 1449 net/core/skbuff.c if (!n) n 1452 net/core/skbuff.c n->fclone = SKB_FCLONE_UNAVAILABLE; n 1455 net/core/skbuff.c return __skb_clone(n, skb); n 1513 net/core/skbuff.c struct sk_buff *n = __alloc_skb(size, gfp_mask, n 1516 net/core/skbuff.c if (!n) n 1520 net/core/skbuff.c skb_reserve(n, headerlen); n 1522 net/core/skbuff.c skb_put(n, skb->len); n 1524 net/core/skbuff.c BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); n 1526 net/core/skbuff.c skb_copy_header(n, skb); n 1527 net/core/skbuff.c return n; n 1553 net/core/skbuff.c struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); n 1555 net/core/skbuff.c if (!n) n 1559 net/core/skbuff.c skb_reserve(n, headroom); n 1561 net/core/skbuff.c skb_put(n, skb_headlen(skb)); n 1563 net/core/skbuff.c skb_copy_from_linear_data(skb, n->data, n->len); n 1565 net/core/skbuff.c n->truesize += skb->data_len; n 1566 net/core/skbuff.c n->data_len = skb->data_len; n 1567 net/core/skbuff.c n->len = skb->len; n 1573 net/core/skbuff.c skb_zerocopy_clone(n, skb, gfp_mask)) { n 1574 net/core/skbuff.c kfree_skb(n); n 1575 net/core/skbuff.c n = NULL; n 1579 net/core/skbuff.c skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; n 1582 net/core/skbuff.c skb_shinfo(n)->nr_frags = i; n 1586 net/core/skbuff.c skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; n 1587 net/core/skbuff.c skb_clone_fraglist(n); n 1590 net/core/skbuff.c skb_copy_header(n, skb); n 1592 net/core/skbuff.c return n; n 1745 net/core/skbuff.c struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, n 1751 net/core/skbuff.c if (!n) n 1754 net/core/skbuff.c skb_reserve(n, newheadroom); n 1757 net/core/skbuff.c skb_put(n, skb->len); n 1767 net/core/skbuff.c BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, n 1770 net/core/skbuff.c skb_copy_header(n, skb); n 1772 net/core/skbuff.c skb_headers_offset_update(n, newheadroom - oldheadroom); n 1774 net/core/skbuff.c return n; n 1371 net/core/sock.c int ret, n; n 1376 net/core/sock.c n = sk->sk_peer_cred->group_info->ngroups; n 1377 net/core/sock.c if (len < n * sizeof(gid_t)) { n 1378 net/core/sock.c len = n * sizeof(gid_t); n 1381 net/core/sock.c len = n * sizeof(gid_t); n 91 net/dccp/ccids/lib/packet_history.h static inline u8 tfrc_rx_hist_index(const struct tfrc_rx_hist *h, const u8 n) n 93 net/dccp/ccids/lib/packet_history.h return (h->loss_start + n) & TFRC_NDUPACK; n 109 net/dccp/ccids/lib/packet_history.h tfrc_rx_hist_entry(const struct tfrc_rx_hist *h, const u8 n) n 111 net/dccp/ccids/lib/packet_history.h return h->ring[tfrc_rx_hist_index(h, n)]; n 1679 net/decnet/af_decnet.c struct sk_buff *skb, *n; n 1754 net/decnet/af_decnet.c skb_queue_walk_safe(queue, skb, n) { n 2118 net/decnet/af_decnet.c struct sock *n = NULL; n 2123 net/decnet/af_decnet.c n = sk_head(&dn_sk_hash[state->bucket]); n 2124 net/decnet/af_decnet.c if (n) n 2128 net/decnet/af_decnet.c return n; n 2132 net/decnet/af_decnet.c struct sock *n) n 2136 net/decnet/af_decnet.c n = sk_next(n); n 2138 net/decnet/af_decnet.c if (n) n 2142 net/decnet/af_decnet.c n = sk_head(&dn_sk_hash[state->bucket]); n 2145 net/decnet/af_decnet.c return n; n 908 net/decnet/dn_dev.c int n; n 921 net/decnet/dn_dev.c n = mtu2blksize(dev) - 26; n 922 net/decnet/dn_dev.c n /= 7; n 924 net/decnet/dn_dev.c if (n > 32) n 925 net/decnet/dn_dev.c n = 32; n 927 net/decnet/dn_dev.c size = 2 + 26 + 7 * n; n 956 net/decnet/dn_dev.c n = dn_neigh_elist(dev, ptr, n); n 958 net/decnet/dn_dev.c *i2 = 7 * n; n 607 net/decnet/dn_fib.c req.rtm.rtm_table = tb->n; n 114 net/decnet/dn_neigh.c struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n); n 202 net/decnet/dn_neigh.c struct neighbour *neigh = rt->n; n 342 net/decnet/dn_neigh.c struct neighbour *neigh = rt->n; n 343 net/decnet/dn_neigh.c struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n); n 395 net/decnet/dn_neigh.c dn = container_of(neigh, struct dn_neigh, n); n 455 net/decnet/dn_neigh.c dn = container_of(neigh, struct dn_neigh, n); n 503 net/decnet/dn_neigh.c int t, n; n 514 net/decnet/dn_neigh.c dn = container_of(neigh, struct dn_neigh, n); n 518 net/decnet/dn_neigh.c if (s->t == s->n) n 519 net/decnet/dn_neigh.c s->rs = dn_find_slot(s->ptr, s->n, dn->priority); n 532 net/decnet/dn_neigh.c int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n) n 538 net/decnet/dn_neigh.c state.n = n; n 551 net/decnet/dn_neigh.c struct neighbour *n) n 553 net/decnet/dn_neigh.c struct dn_neigh *dn = container_of(n, struct dn_neigh, n); n 556 net/decnet/dn_neigh.c read_lock(&n->lock); n 562 net/decnet/dn_neigh.c dn->n.nud_state, n 563 net/decnet/dn_neigh.c refcount_read(&dn->n.refcnt), n 565 net/decnet/dn_neigh.c (dn->n.dev) ? dn->n.dev->name : "?"); n 566 net/decnet/dn_neigh.c read_unlock(&n->lock); n 373 net/decnet/dn_nsp_out.c struct sk_buff *skb2, *n, *ack = NULL; n 381 net/decnet/dn_nsp_out.c skb_queue_walk_safe(q, skb2, n) { n 151 net/decnet/dn_route.c if (rt->n) n 152 net/decnet/dn_route.c neigh_release(rt->n); n 160 net/decnet/dn_route.c struct neighbour *n = rt->n; n 162 net/decnet/dn_route.c if (n && n->dev == dev) { n 163 net/decnet/dn_route.c n->dev = dev_net(dev)->loopback_dev; n 164 net/decnet/dn_route.c dev_hold(n->dev); n 259 net/decnet/dn_route.c struct neighbour *n = rt->n; n 263 net/decnet/dn_route.c dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL; n 742 net/decnet/dn_route.c if (rt->n == NULL) n 867 net/decnet/dn_route.c struct neighbour *n; n 877 net/decnet/dn_route.c if (dev != NULL && rt->n == NULL) { n 878 net/decnet/dn_route.c n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); n 879 net/decnet/dn_route.c if (IS_ERR(n)) n 880 net/decnet/dn_route.c return PTR_ERR(n); n 881 net/decnet/dn_route.c rt->n = n; n 1196 net/decnet/dn_route.c rt->n = neigh; n 1464 net/decnet/dn_route.c rt->n = neigh; n 145 net/decnet/dn_rules.c rule->table = table->n; n 426 net/decnet/dn_table.c tb->n, n 530 net/decnet/dn_table.c struct nlmsghdr *n, struct netlink_skb_parms *req) n 556 net/decnet/dn_table.c if ((fi = dn_fib_create_info(r, attrs, n, &err)) == NULL) n 591 net/decnet/dn_table.c if (n->nlmsg_flags & NLM_F_EXCL) n 594 net/decnet/dn_table.c if (n->nlmsg_flags & NLM_F_REPLACE) { n 613 net/decnet/dn_table.c if (!(n->nlmsg_flags & NLM_F_APPEND)) { n 621 net/decnet/dn_table.c if (!(n->nlmsg_flags & NLM_F_CREATE)) n 648 net/decnet/dn_table.c dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); n 657 net/decnet/dn_table.c dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req); n 667 net/decnet/dn_table.c struct nlmsghdr *n, struct netlink_skb_parms *req) n 715 net/decnet/dn_table.c dn_fib_nh_match(r, n, attrs, fi) == 0) n 721 net/decnet/dn_table.c dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); n 836 net/decnet/dn_table.c struct dn_fib_table *dn_fib_get_table(u32 n, int create) n 841 net/decnet/dn_table.c if (n < RT_TABLE_MIN) n 844 net/decnet/dn_table.c if (n > RT_TABLE_MAX) n 847 net/decnet/dn_table.c h = n & (DN_FIB_TABLE_HASHSZ - 1); n 850 net/decnet/dn_table.c if (t->n == n) { n 870 net/decnet/dn_table.c t->n = n; n 843 net/dsa/dsa2.c struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n) n 848 net/dsa/dsa2.c ds = devm_kzalloc(dev, struct_size(ds, ports, n), GFP_KERNEL); n 853 net/dsa/dsa2.c ds->num_ports = n; n 77 net/ieee802154/6lowpan/core.c static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n) n 79 net/ieee802154/6lowpan/core.c struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n)); n 41 net/ieee802154/6lowpan/tx.c struct neighbour *n; n 62 net/ieee802154/6lowpan/tx.c n = neigh_lookup(&nd_tbl, &hdr->daddr, ldev); n 63 net/ieee802154/6lowpan/tx.c if (n) { n 64 net/ieee802154/6lowpan/tx.c llneigh = lowpan_802154_neigh(neighbour_priv(n)); n 65 net/ieee802154/6lowpan/tx.c read_lock_bh(&n->lock); n 67 net/ieee802154/6lowpan/tx.c read_unlock_bh(&n->lock); n 80 net/ieee802154/6lowpan/tx.c if (n) n 81 net/ieee802154/6lowpan/tx.c neigh_release(n); n 509 net/ieee802154/nl802154.c #define CMD(op, n) \ n 513 net/ieee802154/nl802154.c if (nla_put_u32(msg, i, NL802154_CMD_ ## n)) \ n 123 net/ipv4/arp.c static bool arp_key_eq(const struct neighbour *n, const void *pkey); n 687 net/ipv4/arp.c struct neighbour *n; n 826 net/ipv4/arp.c n = neigh_event_ns(&arp_tbl, sha, &sip, dev); n 827 net/ipv4/arp.c if (n) { n 832 net/ipv4/arp.c neigh_release(n); n 842 net/ipv4/arp.c n = neigh_event_ns(&arp_tbl, sha, &sip, dev); n 843 net/ipv4/arp.c if (n) n 844 net/ipv4/arp.c neigh_release(n); n 865 net/ipv4/arp.c n = __neigh_lookup(&arp_tbl, &sip, dev, 0); n 868 net/ipv4/arp.c if (n || IN_DEV_ARP_ACCEPT(in_dev)) { n 878 net/ipv4/arp.c if (!n && n 886 net/ipv4/arp.c n = __neigh_lookup(&arp_tbl, &sip, dev, 1); n 889 net/ipv4/arp.c if (n) { n 899 net/ipv4/arp.c n->updated + n 900 net/ipv4/arp.c NEIGH_VAR(n->parms, LOCKTIME)) || n 909 net/ipv4/arp.c neigh_update(n, sha, state, n 911 net/ipv4/arp.c neigh_release(n); n 1311 net/ipv4/arp.c int n; n 1313 net/ipv4/arp.c for (n = 0, s = buf; n < 6; n++) { n 1314 net/ipv4/arp.c c = (a->ax25_call[n] >> 1) & 0x7F; n 1321 net/ipv4/arp.c n = (a->ax25_call[6] >> 1) & 0x0F; n 1322 net/ipv4/arp.c if (n > 9) { n 1324 net/ipv4/arp.c n -= 10; n 1327 net/ipv4/arp.c *s++ = n + '0'; n 1340 net/ipv4/arp.c struct neighbour *n) n 1345 net/ipv4/arp.c struct net_device *dev = n->dev; n 1348 net/ipv4/arp.c read_lock(&n->lock); n 1352 net/ipv4/arp.c ax2asc2((ax25_address *)n->ha, hbuffer); n 1356 net/ipv4/arp.c hbuffer[k++] = hex_asc_hi(n->ha[j]); n 1357 net/ipv4/arp.c hbuffer[k++] = hex_asc_lo(n->ha[j]); n 1366 net/ipv4/arp.c sprintf(tbuf, "%pI4", n->primary_key); n 1368 net/ipv4/arp.c tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); n 1369 net/ipv4/arp.c read_unlock(&n->lock); n 1373 net/ipv4/arp.c struct pneigh_entry *n) n 1375 net/ipv4/arp.c struct net_device *dev = n->dev; n 1379 net/ipv4/arp.c sprintf(tbuf, "%pI4", n->key); n 701 net/ipv4/devinet.c struct hlist_node *n; n 746 net/ipv4/devinet.c hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { n 537 net/ipv4/fib_semantics.c struct neighbour *n; n 541 net/ipv4/fib_semantics.c n = neigh_lookup(&arp_tbl, &nhc->nhc_gw.ipv4, nhc->nhc_dev); n 543 net/ipv4/fib_semantics.c n = neigh_lookup(ipv6_stub->nd_tbl, &nhc->nhc_gw.ipv6, n 546 net/ipv4/fib_semantics.c n = NULL; n 548 net/ipv4/fib_semantics.c if (n) { n 549 net/ipv4/fib_semantics.c state = n->nud_state; n 550 net/ipv4/fib_semantics.c neigh_release(n); n 1243 net/ipv4/fib_semantics.c struct hlist_node *n; n 1246 net/ipv4/fib_semantics.c hlist_for_each_entry_safe(fi, n, head, fib_hash) { n 1259 net/ipv4/fib_semantics.c struct hlist_node *n; n 1262 net/ipv4/fib_semantics.c hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) { n 2137 net/ipv4/fib_semantics.c struct neighbour *n; n 2142 net/ipv4/fib_semantics.c n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, n 2145 net/ipv4/fib_semantics.c n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, n 2148 net/ipv4/fib_semantics.c n = NULL; n 2149 net/ipv4/fib_semantics.c if (n) n 2150 net/ipv4/fib_semantics.c state = n->nud_state; n 116 net/ipv4/fib_trie.c #define IS_TRIE(n) ((n)->pos >= KEYLENGTH) n 117 net/ipv4/fib_trie.c #define IS_TNODE(n) ((n)->bits) n 118 net/ipv4/fib_trie.c #define IS_LEAF(n) (!(n)->bits) n 142 net/ipv4/fib_trie.c #define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n]) n 202 net/ipv4/fib_trie.c static inline void node_set_parent(struct key_vector *n, struct key_vector *tp) n 204 net/ipv4/fib_trie.c if (n) n 205 net/ipv4/fib_trie.c rcu_assign_pointer(tn_info(n)->parent, tp); n 208 net/ipv4/fib_trie.c #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p) n 312 net/ipv4/fib_trie.c struct tnode *n = container_of(head, struct tnode, rcu); n 314 net/ipv4/fib_trie.c if (!n->tn_bits) n 315 net/ipv4/fib_trie.c kmem_cache_free(trie_leaf_kmem, n); n 317 net/ipv4/fib_trie.c kvfree(n); n 320 net/ipv4/fib_trie.c #define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu) n 339 net/ipv4/fib_trie.c static inline void empty_child_inc(struct key_vector *n) n 341 net/ipv4/fib_trie.c tn_info(n)->empty_children++; n 343 net/ipv4/fib_trie.c if (!tn_info(n)->empty_children) n 344 net/ipv4/fib_trie.c tn_info(n)->full_children++; n 347 net/ipv4/fib_trie.c static inline void empty_child_dec(struct key_vector *n) n 349 net/ipv4/fib_trie.c if (!tn_info(n)->empty_children) n 350 net/ipv4/fib_trie.c tn_info(n)->full_children--; n 352 net/ipv4/fib_trie.c tn_info(n)->empty_children--; n 411 net/ipv4/fib_trie.c static inline int tnode_full(struct key_vector *tn, struct key_vector *n) n 413 net/ipv4/fib_trie.c return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n); n 420 net/ipv4/fib_trie.c struct key_vector *n) n 428 net/ipv4/fib_trie.c if (!n && chi) n 430 net/ipv4/fib_trie.c if (n && !chi) n 435 net/ipv4/fib_trie.c isfull = tnode_full(tn, n); n 442 net/ipv4/fib_trie.c if (n && (tn->slen < n->slen)) n 443 net/ipv4/fib_trie.c tn->slen = n->slen; n 445 net/ipv4/fib_trie.c rcu_assign_pointer(tn->tnode[i], n); n 471 net/ipv4/fib_trie.c struct key_vector *n) n 474 net/ipv4/fib_trie.c rcu_assign_pointer(tp->tnode[0], n); n 476 net/ipv4/fib_trie.c put_child(tp, get_index(key, tp), n); n 485 net/ipv4/fib_trie.c struct key_vector *n) n 487 net/ipv4/fib_trie.c tn_info(n)->rcu.next = tn_info(tn)->rcu.next; n 488 net/ipv4/fib_trie.c tn_info(tn)->rcu.next = &tn_info(n)->rcu; n 692 net/ipv4/fib_trie.c struct key_vector *n, *tp; n 696 net/ipv4/fib_trie.c for (n = NULL, i = child_length(oldtnode); !n && i;) n 697 net/ipv4/fib_trie.c n = get_child(oldtnode, --i); n 701 net/ipv4/fib_trie.c put_child_root(tp, oldtnode->key, n); n 702 net/ipv4/fib_trie.c node_set_parent(n, tp); n 728 net/ipv4/fib_trie.c struct key_vector *n = get_child(tn, i); n 730 net/ipv4/fib_trie.c if (!n || (n->slen <= slen)) n 734 net/ipv4/fib_trie.c stride <<= (n->slen - slen); n 735 net/ipv4/fib_trie.c slen = n->slen; n 940 net/ipv4/fib_trie.c struct key_vector *pn, *n = t->kv; n 944 net/ipv4/fib_trie.c pn = n; n 945 net/ipv4/fib_trie.c n = get_child_rcu(n, index); n 947 net/ipv4/fib_trie.c if (!n) n 950 net/ipv4/fib_trie.c index = get_cindex(key, n); n 966 net/ipv4/fib_trie.c if (index >= (1ul << n->bits)) { n 967 net/ipv4/fib_trie.c n = NULL; n 972 net/ipv4/fib_trie.c } while (IS_TNODE(n)); n 976 net/ipv4/fib_trie.c return n; n 1017 net/ipv4/fib_trie.c struct key_vector *n, *l; n 1024 net/ipv4/fib_trie.c n = get_child(tp, get_index(key, tp)); n 1032 net/ipv4/fib_trie.c if (n) { n 1035 net/ipv4/fib_trie.c tn = tnode_new(key, __fls(key ^ n->key), 1); n 1041 net/ipv4/fib_trie.c put_child(tn, get_index(key, tn) ^ 1, n); n 1045 net/ipv4/fib_trie.c node_set_parent(n, tn); n 1304 net/ipv4/fib_trie.c static inline t_key prefix_mismatch(t_key key, struct key_vector *n) n 1306 net/ipv4/fib_trie.c t_key prefix = n->key; n 1320 net/ipv4/fib_trie.c struct key_vector *n, *pn; n 1328 net/ipv4/fib_trie.c n = get_child_rcu(pn, cindex); n 1329 net/ipv4/fib_trie.c if (!n) { n 1340 net/ipv4/fib_trie.c index = get_cindex(key, n); n 1356 net/ipv4/fib_trie.c if (index >= (1ul << n->bits)) n 1360 net/ipv4/fib_trie.c if (IS_LEAF(n)) n 1366 net/ipv4/fib_trie.c if (n->slen > n->pos) { n 1367 net/ipv4/fib_trie.c pn = n; n 1371 net/ipv4/fib_trie.c n = get_child_rcu(n, index); n 1372 net/ipv4/fib_trie.c if (unlikely(!n)) n 1379 net/ipv4/fib_trie.c struct key_vector __rcu **cptr = n->tnode; n 1385 net/ipv4/fib_trie.c if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos)) n 1389 net/ipv4/fib_trie.c if (unlikely(IS_LEAF(n))) n 1397 net/ipv4/fib_trie.c while ((n = rcu_dereference(*cptr)) == NULL) { n 1400 net/ipv4/fib_trie.c if (!n) n 1438 net/ipv4/fib_trie.c index = key ^ n->key; n 1441 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { n 1491 net/ipv4/fib_trie.c res->prefix = htonl(n->key); n 1499 net/ipv4/fib_trie.c res->fa_head = &n->leaf; n 1620 net/ipv4/fib_trie.c struct key_vector *pn, *n = *tn; n 1626 net/ipv4/fib_trie.c pn = n; n 1633 net/ipv4/fib_trie.c n = get_child_rcu(pn, cindex++); n 1634 net/ipv4/fib_trie.c if (!n) n 1638 net/ipv4/fib_trie.c if (IS_LEAF(n) && (n->key >= key)) n 1640 net/ipv4/fib_trie.c } while (IS_TNODE(n)); n 1654 net/ipv4/fib_trie.c n = get_child_rcu(pn, cindex++); n 1655 net/ipv4/fib_trie.c if (!n) n 1659 net/ipv4/fib_trie.c if (IS_LEAF(n)) n 1663 net/ipv4/fib_trie.c pn = n; n 1672 net/ipv4/fib_trie.c return n; n 1685 net/ipv4/fib_trie.c struct key_vector *n; n 1693 net/ipv4/fib_trie.c n = pn; n 1697 net/ipv4/fib_trie.c put_child_root(pn, n->key, NULL); n 1698 net/ipv4/fib_trie.c node_free(n); n 1706 net/ipv4/fib_trie.c n = get_child(pn, cindex); n 1707 net/ipv4/fib_trie.c if (!n) n 1710 net/ipv4/fib_trie.c if (IS_TNODE(n)) { n 1712 net/ipv4/fib_trie.c pn = n; n 1713 net/ipv4/fib_trie.c cindex = 1ul << n->bits; n 1718 net/ipv4/fib_trie.c hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { n 1723 net/ipv4/fib_trie.c put_child_root(pn, n->key, NULL); n 1724 net/ipv4/fib_trie.c node_free(n); n 1803 net/ipv4/fib_trie.c struct key_vector *n; n 1824 net/ipv4/fib_trie.c n = get_child(pn, cindex); n 1825 net/ipv4/fib_trie.c if (!n) n 1828 net/ipv4/fib_trie.c if (IS_TNODE(n)) { n 1830 net/ipv4/fib_trie.c pn = n; n 1831 net/ipv4/fib_trie.c cindex = 1ul << n->bits; n 1836 net/ipv4/fib_trie.c hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { n 1851 net/ipv4/fib_trie.c n->slen = slen; n 1853 net/ipv4/fib_trie.c if (hlist_empty(&n->leaf)) { n 1854 net/ipv4/fib_trie.c put_child_root(pn, n->key, NULL); n 1855 net/ipv4/fib_trie.c node_free(n); n 1873 net/ipv4/fib_trie.c struct key_vector *n; n 1894 net/ipv4/fib_trie.c n = get_child(pn, cindex); n 1895 net/ipv4/fib_trie.c if (!n) n 1898 net/ipv4/fib_trie.c if (IS_TNODE(n)) { n 1900 net/ipv4/fib_trie.c pn = n; n 1901 net/ipv4/fib_trie.c cindex = 1ul << n->bits; n 1906 net/ipv4/fib_trie.c hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { n 1925 net/ipv4/fib_trie.c n->key, n 1935 net/ipv4/fib_trie.c n->slen = slen; n 1937 net/ipv4/fib_trie.c if (hlist_empty(&n->leaf)) { n 1938 net/ipv4/fib_trie.c put_child_root(pn, n->key, NULL); n 1939 net/ipv4/fib_trie.c node_free(n); n 1957 net/ipv4/fib_trie.c struct key_vector *n; n 1971 net/ipv4/fib_trie.c n = get_child(pn, cindex); n 1972 net/ipv4/fib_trie.c if (!n) n 1975 net/ipv4/fib_trie.c if (IS_TNODE(n)) { n 1977 net/ipv4/fib_trie.c pn = n; n 1978 net/ipv4/fib_trie.c cindex = 1ul << n->bits; n 1983 net/ipv4/fib_trie.c hlist_for_each_entry(fa, &n->leaf, fa_list) { n 1989 net/ipv4/fib_trie.c rtmsg_fib(RTM_NEWROUTE, htonl(n->key), fa, n 1998 net/ipv4/fib_trie.c n->key, n 2277 net/ipv4/fib_trie.c struct key_vector *n = get_child_rcu(pn, cindex++); n 2279 net/ipv4/fib_trie.c if (!n) n 2282 net/ipv4/fib_trie.c if (IS_LEAF(n)) { n 2287 net/ipv4/fib_trie.c iter->tnode = n; n 2292 net/ipv4/fib_trie.c return n; n 2312 net/ipv4/fib_trie.c struct key_vector *n, *pn; n 2318 net/ipv4/fib_trie.c n = rcu_dereference(pn->tnode[0]); n 2319 net/ipv4/fib_trie.c if (!n) n 2322 net/ipv4/fib_trie.c if (IS_TNODE(n)) { n 2323 net/ipv4/fib_trie.c iter->tnode = n; n 2332 net/ipv4/fib_trie.c return n; n 2337 net/ipv4/fib_trie.c struct key_vector *n; n 2343 net/ipv4/fib_trie.c for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) { n 2344 net/ipv4/fib_trie.c if (IS_LEAF(n)) { n 2352 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) n 2356 net/ipv4/fib_trie.c if (n->bits < MAX_STAT_DEPTH) n 2357 net/ipv4/fib_trie.c s->nodesizes[n->bits]++; n 2358 net/ipv4/fib_trie.c s->nullpointers += tn_info(n)->empty_children; n 2497 net/ipv4/fib_trie.c struct key_vector *n; n 2499 net/ipv4/fib_trie.c for (n = fib_trie_get_first(iter, n 2501 net/ipv4/fib_trie.c n; n = fib_trie_get_next(iter)) n 2504 net/ipv4/fib_trie.c return n; n 2526 net/ipv4/fib_trie.c struct key_vector *n; n 2530 net/ipv4/fib_trie.c n = fib_trie_get_next(iter); n 2531 net/ipv4/fib_trie.c if (n) n 2532 net/ipv4/fib_trie.c return n; n 2538 net/ipv4/fib_trie.c n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); n 2539 net/ipv4/fib_trie.c if (n) n 2547 net/ipv4/fib_trie.c n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); n 2548 net/ipv4/fib_trie.c if (n) n 2556 net/ipv4/fib_trie.c return n; n 2565 net/ipv4/fib_trie.c static void seq_indent(struct seq_file *seq, int n) n 2567 net/ipv4/fib_trie.c while (n-- > 0) n 2612 net/ipv4/fib_trie.c struct key_vector *n = v; n 2614 net/ipv4/fib_trie.c if (IS_TRIE(node_parent_rcu(n))) n 2617 net/ipv4/fib_trie.c if (IS_TNODE(n)) { n 2618 net/ipv4/fib_trie.c __be32 prf = htonl(n->key); n 2622 net/ipv4/fib_trie.c &prf, KEYLENGTH - n->pos - n->bits, n->bits, n 2623 net/ipv4/fib_trie.c tn_info(n)->full_children, n 2624 net/ipv4/fib_trie.c tn_info(n)->empty_children); n 2626 net/ipv4/fib_trie.c __be32 val = htonl(n->key); n 2632 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { n 1085 net/ipv4/ip_tunnel.c struct hlist_node *n; n 1088 net/ipv4/ip_tunnel.c hlist_for_each_entry_safe(t, n, thead, hash_node) n 2983 net/ipv4/ipmr.c int n; n 3003 net/ipv4/ipmr.c for (n = mfc->_c.mfc_un.res.minvif; n 3004 net/ipv4/ipmr.c n < mfc->_c.mfc_un.res.maxvif; n++) { n 3005 net/ipv4/ipmr.c if (VIF_EXISTS(mrt, n) && n 3006 net/ipv4/ipmr.c mfc->_c.mfc_un.res.ttls[n] < 255) n 3009 net/ipv4/ipmr.c n, mfc->_c.mfc_un.res.ttls[n]); n 181 net/ipv4/netfilter/ipt_CLUSTERIP.c int n; n 183 net/ipv4/netfilter/ipt_CLUSTERIP.c for (n = 0; n < i->num_local_nodes; n++) n 184 net/ipv4/netfilter/ipt_CLUSTERIP.c set_bit(i->local_nodes[n] - 1, &c->local_nodes); n 460 net/ipv4/nexthop.c struct neighbour *n; n 464 net/ipv4/nexthop.c n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); n 465 net/ipv4/nexthop.c if (n) n 466 net/ipv4/nexthop.c state = n->nud_state; n 476 net/ipv4/nexthop.c struct neighbour *n; n 480 net/ipv4/nexthop.c n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, n 482 net/ipv4/nexthop.c if (n) n 483 net/ipv4/nexthop.c state = n->nud_state; n 1073 net/ipv4/nexthop.c struct hlist_node *n; n 1076 net/ipv4/nexthop.c hlist_for_each_entry_safe(nhi, n, head, dev_hash) { n 1770 net/ipv4/nexthop.c struct hlist_node *n; n 1773 net/ipv4/nexthop.c hlist_for_each_entry_safe(nhi, n, head, dev_hash) { n 437 net/ipv4/route.c struct neighbour *n; n 442 net/ipv4/route.c n = ip_neigh_gw4(dev, rt->rt_gw4); n 444 net/ipv4/route.c n = ip_neigh_gw6(dev, &rt->rt_gw6); n 449 net/ipv4/route.c n = ip_neigh_gw4(dev, pkey); n 452 net/ipv4/route.c if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt)) n 453 net/ipv4/route.c n = NULL; n 457 net/ipv4/route.c return n; n 743 net/ipv4/route.c struct neighbour *n; n 780 net/ipv4/route.c n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); n 781 net/ipv4/route.c if (!n) n 782 net/ipv4/route.c n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); n 783 net/ipv4/route.c if (!IS_ERR(n)) { n 784 net/ipv4/route.c if (!(n->nud_state & NUD_VALID)) { n 785 net/ipv4/route.c neigh_event_send(n, NULL); n 796 net/ipv4/route.c call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); n 798 net/ipv4/route.c neigh_release(n); n 4916 net/ipv4/tcp_input.c struct sk_buff *skb = head, *n; n 4924 net/ipv4/tcp_input.c for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) { n 4925 net/ipv4/tcp_input.c n = tcp_skb_next(skb, list); n 4947 net/ipv4/tcp_input.c if (n && n != tail && n 4948 net/ipv4/tcp_input.c TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) { n 5009 net/ipv4/tcp_input.c skb_queue_walk_safe(&tmp, skb, n) n 1137 net/ipv4/tcp_ipv4.c struct hlist_node *n; n 1142 net/ipv4/tcp_ipv4.c hlist_for_each_entry_safe(key, n, &md5sig->head, node) { n 649 net/ipv4/tcp_metrics.c int n = 0; n 663 net/ipv4/tcp_metrics.c n++; n 670 net/ipv4/tcp_metrics.c n++; n 675 net/ipv4/tcp_metrics.c n++; n 677 net/ipv4/tcp_metrics.c if (n) n 828 net/ipv4/tcp_output.c struct list_head *q, *n; n 836 net/ipv4/tcp_output.c list_for_each_safe(q, n, &list) { n 79 net/ipv4/tcp_recovery.c struct sk_buff *skb, *n; n 84 net/ipv4/tcp_recovery.c list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, n 207 net/ipv6/addrlabel.c struct hlist_node *n; n 213 net/ipv6/addrlabel.c hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { n 270 net/ipv6/addrlabel.c struct hlist_node *n; n 276 net/ipv6/addrlabel.c hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { n 333 net/ipv6/addrlabel.c struct hlist_node *n; n 337 net/ipv6/addrlabel.c hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { n 482 net/ipv6/exthdrs.c int n, i; n 563 net/ipv6/exthdrs.c n = hdr->hdrlen >> 1; n 565 net/ipv6/exthdrs.c if (hdr->segments_left > n) { n 590 net/ipv6/exthdrs.c i = n - --hdr->segments_left; n 2473 net/ipv6/ip6_fib.c struct fib6_info *n; n 2480 net/ipv6/ip6_fib.c n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); n 2481 net/ipv6/ip6_fib.c if (n) { n 2483 net/ipv6/ip6_fib.c return n; n 1004 net/ipv6/ip6_output.c struct neighbour *n; n 1069 net/ipv6/ip6_output.c n = __ipv6_neigh_lookup_noref(rt->dst.dev, n 1071 net/ipv6/ip6_output.c err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; n 467 net/ipv6/ip6mr.c int n; n 488 net/ipv6/ip6mr.c for (n = mfc->_c.mfc_un.res.minvif; n 489 net/ipv6/ip6mr.c n < mfc->_c.mfc_un.res.maxvif; n++) { n 490 net/ipv6/ip6mr.c if (VIF_EXISTS(mrt, n) && n 491 net/ipv6/ip6mr.c mfc->_c.mfc_un.res.ttls[n] < 255) n 493 net/ipv6/ip6mr.c " %2d:%-3d", n, n 494 net/ipv6/ip6mr.c mfc->_c.mfc_un.res.ttls[n]); n 26 net/ipv6/mip6.c static inline unsigned int calc_padlen(unsigned int len, unsigned int n) n 28 net/ipv6/mip6.c return (n - len + 16) & 0x7; n 81 net/ipv6/ndisc.c static int pndisc_constructor(struct pneigh_entry *n); n 82 net/ipv6/ndisc.c static void pndisc_destructor(struct pneigh_entry *n); n 317 net/ipv6/ndisc.c static bool ndisc_key_eq(const struct neighbour *n, const void *pkey) n 319 net/ipv6/ndisc.c return neigh_key_eq128(n, pkey); n 370 net/ipv6/ndisc.c static int pndisc_constructor(struct pneigh_entry *n) n 372 net/ipv6/ndisc.c struct in6_addr *addr = (struct in6_addr *)&n->key; n 374 net/ipv6/ndisc.c struct net_device *dev = n->dev; n 383 net/ipv6/ndisc.c static void pndisc_destructor(struct pneigh_entry *n) n 385 net/ipv6/ndisc.c struct in6_addr *addr = (struct in6_addr *)&n->key; n 387 net/ipv6/ndisc.c struct net_device *dev = n->dev; n 746 net/ipv6/ndisc.c struct pneigh_entry *n; n 750 net/ipv6/ndisc.c n = __pneigh_lookup(&nd_tbl, dev_net(dev), pkey, dev); n 751 net/ipv6/ndisc.c if (n) n 752 net/ipv6/ndisc.c ret = !!(n->flags & NTF_ROUTER); n 903 net/ipv6/ndisc.c struct sk_buff *n = skb_clone(skb, GFP_ATOMIC); n 904 net/ipv6/ndisc.c if (n) n 905 net/ipv6/ndisc.c pneigh_enqueue(&nd_tbl, idev->nd_parms, n); n 1486 net/ipv6/ndisc.c __be32 n; n 1489 net/ipv6/ndisc.c memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); n 1490 net/ipv6/ndisc.c mtu = ntohl(n); n 205 net/ipv6/route.c struct neighbour *n; n 208 net/ipv6/route.c n = __ipv6_neigh_lookup(dev, daddr); n 209 net/ipv6/route.c if (n) n 210 net/ipv6/route.c return n; n 212 net/ipv6/route.c n = neigh_create(&nd_tbl, daddr, dev); n 213 net/ipv6/route.c return IS_ERR(n) ? NULL : n; n 732 net/ipv6/route.c int n = rt6_check_neigh(nh); n 733 net/ipv6/route.c if (n < 0) n 734 net/ipv6/route.c return n; n 401 net/ipv6/sit.c struct ip_tunnel_prl_entry *p, *n; n 405 net/ipv6/sit.c n = rcu_dereference_protected(p->next, 1); n 407 net/ipv6/sit.c p = n; n 191 net/ipv6/xfrm6_tunnel.c struct hlist_node *n; n 195 net/ipv6/xfrm6_tunnel.c hlist_for_each_entry_safe(x6spi, n, n 684 net/iucv/af_iucv.c struct iucv_sock *isk, *n; n 687 net/iucv/af_iucv.c list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { n 1365 net/iucv/af_iucv.c struct sock_msg_q *p, *n; n 1367 net/iucv/af_iucv.c list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { n 1501 net/iucv/af_iucv.c struct iucv_sock *isk, *n; n 1504 net/iucv/af_iucv.c list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { n 2275 net/iucv/af_iucv.c enum iucv_tx_notify n) n 2300 net/iucv/af_iucv.c switch (n) { n 734 net/iucv/iucv.c struct iucv_irq_list *p, *n; n 747 net/iucv/iucv.c list_for_each_entry_safe(p, n, &iucv_task_queue, list) { n 803 net/iucv/iucv.c struct iucv_path *p, *n; n 810 net/iucv/iucv.c list_for_each_entry_safe(p, n, &handler->paths, list) { n 1743 net/iucv/iucv.c struct iucv_irq_list *p, *n; n 1756 net/iucv/iucv.c list_for_each_entry_safe(p, n, &task_queue, list) { n 1776 net/iucv/iucv.c struct iucv_irq_list *p, *n; n 1787 net/iucv/iucv.c list_for_each_entry_safe(p, n, &work_queue, list) { n 1886 net/iucv/iucv.c struct iucv_irq_list *p, *n; n 1897 net/iucv/iucv.c list_for_each_entry_safe(p, n, &iucv_work_queue, list) { n 2072 net/iucv/iucv.c struct iucv_irq_list *p, *n; n 2075 net/iucv/iucv.c list_for_each_entry_safe(p, n, &iucv_task_queue, list) n 2077 net/iucv/iucv.c list_for_each_entry_safe(p, n, &iucv_work_queue, list) n 588 net/mac80211/debugfs_sta.c #define PFLAG(t, n, a, b) \ n 590 net/mac80211/debugfs_sta.c if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a) \ n 594 net/mac80211/debugfs_sta.c #define PFLAG_RANGE(t, i, n, s, m, off, fmt) \ n 596 net/mac80211/debugfs_sta.c u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK; \ n 601 net/mac80211/debugfs_sta.c #define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b) \ n 603 net/mac80211/debugfs_sta.c if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) { \ n 607 net/mac80211/debugfs_sta.c PFLAG_RANGE(t, i, n, s, m, off, fmt); \ n 867 net/mac80211/debugfs_sta.c #define PRINT_NSS_SUPP(f, n) \ n 871 net/mac80211/debugfs_sta.c p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \ n 875 net/mac80211/debugfs_sta.c PRINT(n "-%d-SUPPORT-0-7", _i / 2); \ n 878 net/mac80211/debugfs_sta.c PRINT(n "-%d-SUPPORT-0-9", _i / 2); \ n 881 net/mac80211/debugfs_sta.c PRINT(n "-%d-SUPPORT-0-11", _i / 2); \ n 884 net/mac80211/debugfs_sta.c PRINT(n "-%d-NOT-SUPPORTED", _i / 2); \ n 119 net/mac80211/ht.c u8 n = ht_capa->ampdu_params_info & n 121 net/mac80211/ht.c if (n < ht_cap->ampdu_factor) n 122 net/mac80211/ht.c ht_cap->ampdu_factor = n; n 128 net/mac80211/ht.c u8 n = (ht_capa->ampdu_params_info & n 131 net/mac80211/ht.c if (n > ht_cap->ampdu_density) n 132 net/mac80211/ht.c ht_cap->ampdu_density = n; n 2211 net/mac80211/ieee80211_i.h bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n); n 180 net/mac80211/mesh.c struct hlist_node *n; n 187 net/mac80211/mesh.c hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) { n 218 net/mac80211/mesh.c struct hlist_node *n; n 226 net/mac80211/mesh.c hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) { n 550 net/mac80211/mesh_pathtbl.c struct hlist_node *n; n 553 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { n 565 net/mac80211/mesh_pathtbl.c struct hlist_node *n; n 568 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { n 578 net/mac80211/mesh_pathtbl.c struct hlist_node *n; n 581 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { n 795 net/mac80211/mesh_pathtbl.c struct hlist_node *n; n 798 net/mac80211/mesh_pathtbl.c hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { n 482 net/mac80211/rc80211_minstrel.c unsigned int i, n = 0; n 499 net/mac80211/rc80211_minstrel.c struct minstrel_rate *mr = &mi->r[n]; n 500 net/mac80211/rc80211_minstrel.c struct minstrel_rate_stats *mrs = &mi->r[n].stats; n 511 net/mac80211/rc80211_minstrel.c n++; n 553 net/mac80211/rc80211_minstrel.c for (i = n; i < sband->n_bitrates; i++) { n 558 net/mac80211/rc80211_minstrel.c mi->n_rates = n; n 3249 net/mac80211/tx.c int n = 2, nfrags, pad = 0; n 3300 net/mac80211/tx.c n++; n 3303 net/mac80211/tx.c if (max_subframes && n > max_subframes) n 2726 net/mac80211/util.c u8 n; n 2736 net/mac80211/util.c n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); n 2738 net/mac80211/util.c sizeof(he_cap->he_cap_elem) + n + n 2747 net/mac80211/util.c u8 n; n 2759 net/mac80211/util.c n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); n 2761 net/mac80211/util.c sizeof(he_cap->he_cap_elem) + n + n 2776 net/mac80211/util.c memcpy(pos, &he_cap->he_mcs_nss_supp, n); n 2777 net/mac80211/util.c pos += n; n 2788 net/mac80211/util.c n = hweight8(he_cap->ppe_thres[0] & n 2790 net/mac80211/util.c n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >> n 2797 net/mac80211/util.c n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; n 2798 net/mac80211/util.c n = DIV_ROUND_UP(n, 8); n 2801 net/mac80211/util.c memcpy(pos, &he_cap->ppe_thres, n); n 2802 net/mac80211/util.c pos += n; n 3564 net/mac80211/util.c bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n) n 3571 net/mac80211/util.c for (i = 0; i < n; i++) n 3583 net/mac80211/util.c int n = local->hw.n_cipher_schemes; n 3587 net/mac80211/util.c for (i = 0; i < n; i++) { n 60 net/mac80211/vht.c u32 cap, n; n 62 net/mac80211/vht.c n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) & n 64 net/mac80211/vht.c n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; n 68 net/mac80211/vht.c if (n < cap) { n 72 net/mac80211/vht.c n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; n 89 net/mac80211/vht.c u8 m, n, c; n 92 net/mac80211/vht.c n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n 95 net/mac80211/vht.c if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n 96 net/mac80211/vht.c n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { n 102 net/mac80211/vht.c n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n 105 net/mac80211/vht.c if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n 106 net/mac80211/vht.c n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { n 238 net/mpls/af_mpls.c int n = 0; n 260 net/mpls/af_mpls.c if (n == nh_index) n 262 net/mpls/af_mpls.c n++; n 1489 net/ncsi/ncsi-manage.c unsigned int n = 0; n 1526 net/ncsi/ncsi-manage.c n++; n 1530 net/ncsi/ncsi-manage.c return n; n 474 net/netfilter/core.c unsigned int n) n 479 net/netfilter/core.c for (i = 0; i < n; i++) { n 53 net/netfilter/ipset/ip_set_hash_gen.h u32 n; n 58 net/netfilter/ipset/ip_set_hash_gen.h n = curr + AHASH_INIT_SIZE; n 62 net/netfilter/ipset/ip_set_hash_gen.h return n > curr && n <= AHASH_MAX_TUNED ? n : curr; n 90 net/netfilter/ipset/ip_set_hash_gen.h #define ahash_region(n, htable_bits) \ n 91 net/netfilter/ipset/ip_set_hash_gen.h ((n) % ahash_numof_locks(htable_bits)) n 116 net/netfilter/ipset/ip_set_hash_gen.h #define ext_size(n, dsize) \ n 117 net/netfilter/ipset/ip_set_hash_gen.h (sizeof(struct hbucket) + (n) * (dsize)) n 352 net/netfilter/ipset/ip_set_hash_gen.h mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n) n 358 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) { n 361 net/netfilter/ipset/ip_set_hash_gen.h } else if (h->nets[i].cidr[n] < cidr) { n 363 net/netfilter/ipset/ip_set_hash_gen.h } else if (h->nets[i].cidr[n] == cidr) { n 364 net/netfilter/ipset/ip_set_hash_gen.h h->nets[CIDR_POS(cidr)].nets[n]++; n 370 net/netfilter/ipset/ip_set_hash_gen.h h->nets[i].cidr[n] = h->nets[i - 1].cidr[n]; n 372 net/netfilter/ipset/ip_set_hash_gen.h h->nets[i].cidr[n] = cidr; n 373 net/netfilter/ipset/ip_set_hash_gen.h h->nets[CIDR_POS(cidr)].nets[n] = 1; n 379 net/netfilter/ipset/ip_set_hash_gen.h mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n) n 385 net/netfilter/ipset/ip_set_hash_gen.h if (h->nets[i].cidr[n] != cidr) n 387 net/netfilter/ipset/ip_set_hash_gen.h h->nets[CIDR_POS(cidr)].nets[n]--; n 388 net/netfilter/ipset/ip_set_hash_gen.h if (h->nets[CIDR_POS(cidr)].nets[n] > 0) n 390 net/netfilter/ipset/ip_set_hash_gen.h for (j = i; j < net_end && h->nets[j].cidr[n]; j++) n 391 net/netfilter/ipset/ip_set_hash_gen.h h->nets[j].cidr[n] = h->nets[j + 1].cidr[n]; n 392 net/netfilter/ipset/ip_set_hash_gen.h h->nets[j].cidr[n] = 0; n 408 net/netfilter/ipset/ip_set_hash_gen.h #define ahash_data(n, i, dsize) \ n 409 net/netfilter/ipset/ip_set_hash_gen.h ((struct mtype_elem *)((n)->value + ((i) * (dsize)))) n 412 net/netfilter/ipset/ip_set_hash_gen.h mtype_ext_cleanup(struct ip_set *set, struct hbucket *n) n 416 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0; i < n->pos; i++) n 417 net/netfilter/ipset/ip_set_hash_gen.h if (test_bit(i, n->used)) n 418 net/netfilter/ipset/ip_set_hash_gen.h ip_set_ext_destroy(set, ahash_data(n, i, set->dsize)); n 427 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n; n 435 net/netfilter/ipset/ip_set_hash_gen.h n = __ipset_dereference(hbucket(t, i)); n 436 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 439 net/netfilter/ipset/ip_set_hash_gen.h mtype_ext_cleanup(set, n); n 442 net/netfilter/ipset/ip_set_hash_gen.h kfree_rcu(n, rcu); n 457 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n; n 461 net/netfilter/ipset/ip_set_hash_gen.h n = __ipset_dereference(hbucket(t, i)); n 462 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 465 net/netfilter/ipset/ip_set_hash_gen.h mtype_ext_cleanup(set, n); n 467 net/netfilter/ipset/ip_set_hash_gen.h kfree(n); n 515 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n, *tmp; n 527 net/netfilter/ipset/ip_set_hash_gen.h n = __ipset_dereference(hbucket(t, i)); n 528 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 530 net/netfilter/ipset/ip_set_hash_gen.h for (j = 0, d = 0; j < n->pos; j++) { n 531 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(j, n->used)) { n 535 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, j, dsize); n 539 net/netfilter/ipset/ip_set_hash_gen.h clear_bit(j, n->used); n 552 net/netfilter/ipset/ip_set_hash_gen.h if (d >= n->size) { n 554 net/netfilter/ipset/ip_set_hash_gen.h ext_size(n->size, dsize); n 556 net/netfilter/ipset/ip_set_hash_gen.h kfree_rcu(n, rcu); n 560 net/netfilter/ipset/ip_set_hash_gen.h (n->size - AHASH_INIT_SIZE) * dsize, n 565 net/netfilter/ipset/ip_set_hash_gen.h tmp->size = n->size - AHASH_INIT_SIZE; n 566 net/netfilter/ipset/ip_set_hash_gen.h for (j = 0, d = 0; j < n->pos; j++) { n 567 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(j, n->used)) n 569 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, j, dsize); n 579 net/netfilter/ipset/ip_set_hash_gen.h kfree_rcu(n, rcu); n 654 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n, *m; n 707 net/netfilter/ipset/ip_set_hash_gen.h n = __ipset_dereference(hbucket(orig, i)); n 708 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 710 net/netfilter/ipset/ip_set_hash_gen.h for (j = 0; j < n->pos; j++) { n 711 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(j, n->used)) n 713 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, j, dsize); n 829 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n; n 836 net/netfilter/ipset/ip_set_hash_gen.h n = rcu_dereference_bh(hbucket(t, i)); n 837 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 839 net/netfilter/ipset/ip_set_hash_gen.h for (j = 0; j < n->pos; j++) { n 840 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(j, n->used)) n 842 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, j, set->dsize); n 862 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n, *old = ERR_PTR(-ENOENT); n 892 net/netfilter/ipset/ip_set_hash_gen.h n = rcu_dereference_bh(hbucket(t, key)); n 893 net/netfilter/ipset/ip_set_hash_gen.h if (!n) { n 897 net/netfilter/ipset/ip_set_hash_gen.h n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize, n 899 net/netfilter/ipset/ip_set_hash_gen.h if (!n) { n 903 net/netfilter/ipset/ip_set_hash_gen.h n->size = AHASH_INIT_SIZE; n 908 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0; i < n->pos; i++) { n 909 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(i, n->used)) { n 917 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, i, set->dsize); n 936 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, j, set->dsize); n 952 net/netfilter/ipset/ip_set_hash_gen.h if (n->pos >= n->size) { n 954 net/netfilter/ipset/ip_set_hash_gen.h if (n->size >= AHASH_MAX(h)) { n 960 net/netfilter/ipset/ip_set_hash_gen.h old = n; n 961 net/netfilter/ipset/ip_set_hash_gen.h n = kzalloc(sizeof(*n) + n 964 net/netfilter/ipset/ip_set_hash_gen.h if (!n) { n 968 net/netfilter/ipset/ip_set_hash_gen.h memcpy(n, old, sizeof(struct hbucket) + n 970 net/netfilter/ipset/ip_set_hash_gen.h n->size = old->size + AHASH_INIT_SIZE; n 976 net/netfilter/ipset/ip_set_hash_gen.h j = n->pos++; n 977 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, j, set->dsize); n 999 net/netfilter/ipset/ip_set_hash_gen.h set_bit(j, n->used); n 1001 net/netfilter/ipset/ip_set_hash_gen.h rcu_assign_pointer(hbucket(t, key), n); n 1052 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n; n 1069 net/netfilter/ipset/ip_set_hash_gen.h n = rcu_dereference_bh(hbucket(t, key)); n 1070 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 1072 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0, k = 0; i < n->pos; i++) { n 1073 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(i, n->used)) { n 1077 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, i, dsize); n 1084 net/netfilter/ipset/ip_set_hash_gen.h clear_bit(i, n->used); n 1086 net/netfilter/ipset/ip_set_hash_gen.h if (i + 1 == n->pos) n 1087 net/netfilter/ipset/ip_set_hash_gen.h n->pos--; n 1109 net/netfilter/ipset/ip_set_hash_gen.h for (; i < n->pos; i++) { n 1110 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(i, n->used)) n 1113 net/netfilter/ipset/ip_set_hash_gen.h if (n->pos == 0 && k == 0) { n 1114 net/netfilter/ipset/ip_set_hash_gen.h t->hregion[r].ext_size -= ext_size(n->size, dsize); n 1116 net/netfilter/ipset/ip_set_hash_gen.h kfree_rcu(n, rcu); n 1119 net/netfilter/ipset/ip_set_hash_gen.h (n->size - AHASH_INIT_SIZE) * dsize, n 1123 net/netfilter/ipset/ip_set_hash_gen.h tmp->size = n->size - AHASH_INIT_SIZE; n 1124 net/netfilter/ipset/ip_set_hash_gen.h for (j = 0, k = 0; j < n->pos; j++) { n 1125 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(j, n->used)) n 1127 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, j, dsize); n 1136 net/netfilter/ipset/ip_set_hash_gen.h kfree_rcu(n, rcu); n 1176 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n; n 1199 net/netfilter/ipset/ip_set_hash_gen.h n = rcu_dereference_bh(hbucket(t, key)); n 1200 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 1202 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0; i < n->pos; i++) { n 1203 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(i, n->used)) n 1205 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, i, set->dsize); n 1232 net/netfilter/ipset/ip_set_hash_gen.h struct hbucket *n; n 1253 net/netfilter/ipset/ip_set_hash_gen.h n = rcu_dereference_bh(hbucket(t, key)); n 1254 net/netfilter/ipset/ip_set_hash_gen.h if (!n) { n 1258 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0; i < n->pos; i++) { n 1259 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(i, n->used)) n 1261 net/netfilter/ipset/ip_set_hash_gen.h data = ahash_data(n, i, set->dsize); n 1352 net/netfilter/ipset/ip_set_hash_gen.h const struct hbucket *n; n 1371 net/netfilter/ipset/ip_set_hash_gen.h n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0])); n 1373 net/netfilter/ipset/ip_set_hash_gen.h cb->args[IPSET_CB_ARG0], t, n); n 1374 net/netfilter/ipset/ip_set_hash_gen.h if (!n) n 1376 net/netfilter/ipset/ip_set_hash_gen.h for (i = 0; i < n->pos; i++) { n 1377 net/netfilter/ipset/ip_set_hash_gen.h if (!test_bit(i, n->used)) n 1379 net/netfilter/ipset/ip_set_hash_gen.h e = ahash_data(n, i, set->dsize); n 1383 net/netfilter/ipset/ip_set_hash_gen.h cb->args[IPSET_CB_ARG0], n, i, e); n 177 net/netfilter/ipset/ip_set_list_set.c struct set_elem *e, *n; n 179 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_safe(e, n, &map->members, list) n 237 net/netfilter/ipset/ip_set_list_set.c struct set_elem *e, *n, *prev, *next; n 241 net/netfilter/ipset/ip_set_list_set.c n = prev = next = NULL; n 247 net/netfilter/ipset/ip_set_list_set.c n = e; n 262 net/netfilter/ipset/ip_set_list_set.c if (n) { n 266 net/netfilter/ipset/ip_set_list_set.c ip_set_ext_destroy(set, n); n 267 net/netfilter/ipset/ip_set_list_set.c list_set_init_extensions(set, ext, n); n 276 net/netfilter/ipset/ip_set_list_set.c n = list_empty(&map->members) ? NULL : n 281 net/netfilter/ipset/ip_set_list_set.c n = list_next_entry(next, list); n 285 net/netfilter/ipset/ip_set_list_set.c n = list_prev_entry(prev, list); n 288 net/netfilter/ipset/ip_set_list_set.c if (n && n 290 net/netfilter/ipset/ip_set_list_set.c ip_set_timeout_expired(ext_timeout(n, set)))) n 291 net/netfilter/ipset/ip_set_list_set.c n = NULL; n 300 net/netfilter/ipset/ip_set_list_set.c if (n) n 301 net/netfilter/ipset/ip_set_list_set.c list_set_replace(set, e, n); n 415 net/netfilter/ipset/ip_set_list_set.c struct set_elem *e, *n; n 417 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_safe(e, n, &map->members, list) n 427 net/netfilter/ipset/ip_set_list_set.c struct set_elem *e, *n; n 432 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_safe(e, n, &map->members, list) { n 448 net/netfilter/ipset/ip_set_list_set.c u32 n = 0; n 452 net/netfilter/ipset/ip_set_list_set.c n++; n 455 net/netfilter/ipset/ip_set_list_set.c return (sizeof(*map) + n * dsize); n 1581 net/netfilter/ipvs/ip_vs_ctl.c struct hlist_node *n; n 1587 net/netfilter/ipvs/ip_vs_ctl.c hlist_for_each_entry_safe(svc, n, &ip_vs_svc_table[idx], n 1598 net/netfilter/ipvs/ip_vs_ctl.c hlist_for_each_entry_safe(svc, n, &ip_vs_svc_fwm_table[idx], n 162 net/netfilter/ipvs/ip_vs_mh.c int n, c, dt_count; n 183 net/netfilter/ipvs/ip_vs_mh.c n = 0; n 185 net/netfilter/ipvs/ip_vs_mh.c while (n < IP_VS_MH_TAB_SIZE) { n 218 net/netfilter/ipvs/ip_vs_mh.c if (++n == IP_VS_MH_TAB_SIZE) n 468 net/netfilter/ipvs/ip_vs_sync.c unsigned long n = (now + cp->timeout) & ~3UL; n 507 net/netfilter/ipvs/ip_vs_sync.c long diff = n - orig; n 521 net/netfilter/ipvs/ip_vs_sync.c n |= retries + 1; n 535 net/netfilter/ipvs/ip_vs_sync.c n = cmpxchg(&cp->sync_endtime, orig, n); n 536 net/netfilter/ipvs/ip_vs_sync.c return n == orig || force; n 187 net/netfilter/nf_conntrack_core.c unsigned int n; n 197 net/netfilter/nf_conntrack_core.c n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); n 198 net/netfilter/nf_conntrack_core.c return jhash2((u32 *)tuple, n, seed ^ n 735 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 742 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { n 759 net/netfilter/nf_conntrack_core.c if (get_nulls_value(n) != bucket) { n 825 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 840 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) n 845 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) n 936 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 998 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) n 1003 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) n 1059 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 1069 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { n 1103 net/netfilter/nf_conntrack_core.c if (get_nulls_value(n) != hash) { n 1122 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 1126 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) { n 1240 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 1251 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { n 2062 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 2070 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) { n 2139 net/netfilter/nf_conntrack_core.c struct hlist_nulls_node *n; n 2145 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { n 44 net/netfilter/nf_conntrack_ecache.c struct hlist_nulls_node *n; n 50 net/netfilter/nf_conntrack_ecache.c hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) { n 79 net/netfilter/nf_conntrack_expect.c static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple) n 85 net/netfilter/nf_conntrack_expect.c seed = nf_ct_expect_hashrnd ^ net_hash_mix(n); n 553 net/netfilter/nf_conntrack_expect.c struct hlist_node *n; n 556 net/netfilter/nf_conntrack_expect.c n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); n 557 net/netfilter/nf_conntrack_expect.c if (n) n 558 net/netfilter/nf_conntrack_expect.c return n; n 610 net/netfilter/nf_conntrack_expect.c struct hlist_node *n = v; n 613 net/netfilter/nf_conntrack_expect.c expect = hlist_entry(n, struct nf_conntrack_expect, hnode); n 319 net/netfilter/nf_conntrack_helper.c void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n) n 322 net/netfilter/nf_conntrack_helper.c list_add_rcu(&n->head, &nf_ct_helper_expectfn_list); n 327 net/netfilter/nf_conntrack_helper.c void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n) n 330 net/netfilter/nf_conntrack_helper.c list_del_rcu(&n->head); n 509 net/netfilter/nf_conntrack_helper.c unsigned int n) n 514 net/netfilter/nf_conntrack_helper.c for (i = 0; i < n; i++) { n 529 net/netfilter/nf_conntrack_helper.c unsigned int n) n 531 net/netfilter/nf_conntrack_helper.c while (n-- > 0) n 532 net/netfilter/nf_conntrack_helper.c nf_conntrack_helper_unregister(&helper[n]); n 909 net/netfilter/nf_conntrack_netlink.c struct hlist_nulls_node *n; n 933 net/netfilter/nf_conntrack_netlink.c hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], n 1400 net/netfilter/nf_conntrack_netlink.c struct hlist_nulls_node *n; n 1423 net/netfilter/nf_conntrack_netlink.c hlist_nulls_for_each_entry(h, n, list, hnnode) { n 108 net/netfilter/nf_conntrack_standalone.c struct hlist_nulls_node *n; n 113 net/netfilter/nf_conntrack_standalone.c n = rcu_dereference( n 115 net/netfilter/nf_conntrack_standalone.c if (!is_a_nulls(n)) n 116 net/netfilter/nf_conntrack_standalone.c return n; n 190 net/netfilter/nf_nat_core.c hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) n 198 net/netfilter/nf_nat_core.c tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n)); n 182 net/netfilter/nf_nat_sip.c unsigned int olen, matchend, poff, plen, buflen, n; n 246 net/netfilter/nf_nat_sip.c &n) > 0 && n 247 net/netfilter/nf_nat_sip.c htons(n) == ct->tuplehash[dir].tuple.dst.u.udp.port && n 248 net/netfilter/nf_nat_sip.c htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { n 2732 net/netfilter/nf_tables_api.c unsigned int size, i, n, ulen = 0, usize = 0; n 2793 net/netfilter/nf_tables_api.c n = 0; n 2806 net/netfilter/nf_tables_api.c if (n == NFT_RULE_MAXEXPRS) n 2808 net/netfilter/nf_tables_api.c err = nf_tables_expr_parse(&ctx, tmp, &info[n]); n 2811 net/netfilter/nf_tables_api.c size += info[n].ops->size; n 2812 net/netfilter/nf_tables_api.c n++; n 2844 net/netfilter/nf_tables_api.c for (i = 0; i < n; i++) { n 2906 net/netfilter/nf_tables_api.c for (i = 0; i < n; i++) { n 3230 net/netfilter/nf_tables_api.c unsigned int n = 0, min = 0; n 3254 net/netfilter/nf_tables_api.c n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE); n 3255 net/netfilter/nf_tables_api.c if (n >= BITS_PER_BYTE * PAGE_SIZE) { n 3263 net/netfilter/nf_tables_api.c set->name = kasprintf(GFP_KERNEL, name, min + n); n 5681 net/netfilter/nf_tables_api.c int rem, n = 0, err; n 5696 net/netfilter/nf_tables_api.c dev_array[n++] = dev; n 5697 net/netfilter/nf_tables_api.c if (n == NFT_FLOWTABLE_DEVICE_MAX) { n 5707 net/netfilter/nf_tables_api.c *len = n; n 5725 net/netfilter/nf_tables_api.c int err, n = 0, i; n 5744 net/netfilter/nf_tables_api.c dev_array, &n); n 5748 net/netfilter/nf_tables_api.c ops = kcalloc(n, sizeof(struct nf_hook_ops), GFP_KERNEL); n 5755 net/netfilter/nf_tables_api.c flowtable->ops_len = n; n 5757 net/netfilter/nf_tables_api.c for (i = 0; i < n; i++) { n 80 net/netfilter/nfnetlink.c int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) n 85 net/netfilter/nfnetlink.c for (cb_id = 0; cb_id < n->cb_count; cb_id++) n 86 net/netfilter/nfnetlink.c if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT)) n 89 net/netfilter/nfnetlink.c nfnl_lock(n->subsys_id); n 90 net/netfilter/nfnetlink.c if (table[n->subsys_id].subsys) { n 91 net/netfilter/nfnetlink.c nfnl_unlock(n->subsys_id); n 94 net/netfilter/nfnetlink.c rcu_assign_pointer(table[n->subsys_id].subsys, n); n 95 net/netfilter/nfnetlink.c nfnl_unlock(n->subsys_id); n 101 net/netfilter/nfnetlink.c int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) n 103 net/netfilter/nfnetlink.c nfnl_lock(n->subsys_id); n 104 net/netfilter/nfnetlink.c table[n->subsys_id].subsys = NULL; n 105 net/netfilter/nfnetlink.c nfnl_unlock(n->subsys_id); n 695 net/netfilter/nfnetlink_cthelper.c struct nfnl_cthelper *nlcth, *n; n 713 net/netfilter/nfnetlink_cthelper.c list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { n 788 net/netfilter/nfnetlink_cthelper.c struct nfnl_cthelper *nlcth, *n; n 792 net/netfilter/nfnetlink_cthelper.c list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { n 326 net/netfilter/nfnetlink_log.c unsigned int n; n 331 net/netfilter/nfnetlink_log.c n = max(inst_size, pkt_size); n 332 net/netfilter/nfnetlink_log.c skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN); n 334 net/netfilter/nfnetlink_log.c if (n > pkt_size) { n 821 net/netfilter/nfnetlink_log.c struct netlink_notify *n = ptr; n 822 net/netfilter/nfnetlink_log.c struct nfnl_log_net *log = nfnl_log_pernet(n->net); n 824 net/netfilter/nfnetlink_log.c if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { n 835 net/netfilter/nfnetlink_log.c if (n->portid == inst->peer_portid) n 977 net/netfilter/nfnetlink_queue.c struct netlink_notify *n = ptr; n 978 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(n->net); n 980 net/netfilter/nfnetlink_queue.c if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { n 991 net/netfilter/nfnetlink_queue.c if (n->portid == inst->peer_portid) n 276 net/netfilter/nft_set_bitmap.c struct nft_bitmap_elem *be, *n; n 278 net/netfilter/nft_set_bitmap.c list_for_each_entry_safe(be, n, &priv->list, head) n 92 net/netfilter/x_tables.c xt_register_targets(struct xt_target *target, unsigned int n) n 97 net/netfilter/x_tables.c for (i = 0; i < n; i++) { n 112 net/netfilter/x_tables.c xt_unregister_targets(struct xt_target *target, unsigned int n) n 114 net/netfilter/x_tables.c while (n-- > 0) n 115 net/netfilter/x_tables.c xt_unregister_target(&target[n]); n 142 net/netfilter/x_tables.c xt_register_matches(struct xt_match *match, unsigned int n) n 147 net/netfilter/x_tables.c for (i = 0; i < n; i++) { n 162 net/netfilter/x_tables.c xt_unregister_matches(struct xt_match *match, unsigned int n) n 164 net/netfilter/x_tables.c while (n-- > 0) n 165 net/netfilter/x_tables.c xt_unregister_match(&match[n]); n 36 net/netfilter/xt_TCPOPTSTRIP.c u_int16_t n, o; n 74 net/netfilter/xt_TCPOPTSTRIP.c n = TCPOPT_NOP; n 77 net/netfilter/xt_TCPOPTSTRIP.c n <<= 8; n 80 net/netfilter/xt_TCPOPTSTRIP.c htons(n), false); n 367 net/netfilter/xt_hashlimit.c struct hlist_node *n; n 370 net/netfilter/xt_hashlimit.c hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { n 38 net/netfilter/xt_set.c #define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo) \ n 39 net/netfilter/xt_set.c struct ip_set_adt_opt n = { \ n 25 net/netfilter/xt_u32.c __be32 n; n 42 net/netfilter/xt_u32.c if (skb_copy_bits(skb, pos, &n, sizeof(n)) < 0) n 44 net/netfilter/xt_u32.c val = ntohl(n); n 69 net/netfilter/xt_u32.c if (skb_copy_bits(skb, at + pos, &n, n 70 net/netfilter/xt_u32.c sizeof(n)) < 0) n 72 net/netfilter/xt_u32.c val = ntohl(n); n 62 net/netlabel/netlabel_addrlist.h struct netlbl_af4list *n = __af4list_entry(s); n 63 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { n 65 net/netlabel/netlabel_addrlist.h n = __af4list_entry(i); n 67 net/netlabel/netlabel_addrlist.h return n; n 74 net/netlabel/netlabel_addrlist.h struct netlbl_af4list *n = __af4list_entry(s); n 75 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { n 77 net/netlabel/netlabel_addrlist.h n = __af4list_entry(i); n 79 net/netlabel/netlabel_addrlist.h return n; n 129 net/netlabel/netlabel_addrlist.h struct netlbl_af6list *n = __af6list_entry(s); n 130 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { n 132 net/netlabel/netlabel_addrlist.h n = __af6list_entry(i); n 134 net/netlabel/netlabel_addrlist.h return n; n 141 net/netlabel/netlabel_addrlist.h struct netlbl_af6list *n = __af6list_entry(s); n 142 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) { n 144 net/netlabel/netlabel_addrlist.h n = __af6list_entry(i); n 146 net/netlabel/netlabel_addrlist.h return n; n 769 net/netlink/af_netlink.c struct netlink_notify n = { n 775 net/netlink/af_netlink.c NETLINK_URELEASE, &n); n 805 net/netlink/genetlink.c int n = 0; n 815 net/netlink/genetlink.c if (n++ < fams_to_skip) n 821 net/netlink/genetlink.c n--; n 826 net/netlink/genetlink.c cb->args[0] = n; n 902 net/nfc/core.c struct nfc_se *se, *n; n 907 net/nfc/core.c list_for_each_entry_safe(se, n, &dev->secure_elements, list) n 960 net/nfc/core.c struct nfc_se *se, *n; n 967 net/nfc/core.c list_for_each_entry_safe(se, n, &dev->secure_elements, list) { n 825 net/nfc/digital_core.c struct digital_cmd *cmd, *n; n 837 net/nfc/digital_core.c list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) { n 40 net/nfc/digital_technology.c #define DIGITAL_SENSB_N(n) ((n) & 0x7) n 52 net/nfc/digital_technology.c #define DIGITAL_ATTRIB_P4_DID(n) ((n) & 0xf) n 1032 net/nfc/hci/core.c struct hci_msg *msg, *n; n 1059 net/nfc/hci/core.c list_for_each_entry_safe(msg, n, &hdev->msg_tx_queue, msg_l) { n 35 net/nfc/hci/llc.c struct nfc_llc_engine *llc_engine, *n; n 37 net/nfc/hci/llc.c list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) { n 183 net/nfc/llcp_commands.c struct hlist_node *n; n 185 net/nfc/llcp_commands.c hlist_for_each_entry_safe(sdp, n, head, node) { n 556 net/nfc/llcp_commands.c struct hlist_node *n; n 563 net/nfc/llcp_commands.c hlist_for_each_entry_safe(sdp, n, tlv_list, node) { n 580 net/nfc/llcp_commands.c struct hlist_node *n; n 593 net/nfc/llcp_commands.c hlist_for_each_entry_safe(sdreq, n, tlv_list, node) { n 88 net/nfc/llcp_core.c struct nfc_llcp_sock *lsk, *n; n 91 net/nfc/llcp_core.c list_for_each_entry_safe(lsk, n, n 246 net/nfc/llcp_core.c struct hlist_node *n; n 255 net/nfc/llcp_core.c hlist_for_each_entry_safe(sdp, n, &local->pending_sdreqs, node) { n 1079 net/nfc/llcp_core.c u8 n; n 1085 net/nfc/llcp_core.c n = nfc_llcp_ns(s); n 1090 net/nfc/llcp_core.c if (n == nr) n 400 net/nfc/llcp_sock.c struct nfc_llcp_sock *lsk, *n, *llcp_parent; n 405 net/nfc/llcp_sock.c list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue, n 608 net/nfc/llcp_sock.c struct nfc_llcp_sock *lsk, *n; n 611 net/nfc/llcp_sock.c list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, n 1253 net/nfc/nci/core.c struct nci_conn_info *conn_info, *n; n 1261 net/nfc/nci/core.c list_for_each_entry_safe(conn_info, n, &ndev->conn_info_list, list) { n 371 net/nfc/netlink.c struct hlist_node *n; n 395 net/nfc/netlink.c hlist_for_each_entry_safe(sdres, n, sdres_list, node) { n 1330 net/nfc/netlink.c struct nfc_se *se, *n; n 1332 net/nfc/netlink.c list_for_each_entry_safe(se, n, &dev->secure_elements, list) { n 1819 net/nfc/netlink.c struct netlink_notify *n = ptr; n 1822 net/nfc/netlink.c if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC) n 1825 net/nfc/netlink.c pr_debug("NETLINK_URELEASE event from id %d\n", n->portid); n 1830 net/nfc/netlink.c w->portid = n->portid; n 1116 net/openvswitch/conntrack.c struct hlist_node *n; n 1119 net/openvswitch/conntrack.c hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) { n 1709 net/openvswitch/datapath.c struct hlist_node *n; n 1711 net/openvswitch/datapath.c hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) n 45 net/openvswitch/dp_notify.c struct hlist_node *n; n 47 net/openvswitch/dp_notify.c hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { n 216 net/openvswitch/flow_table.c struct hlist_node *n; n 220 net/openvswitch/flow_table.c hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) { n 593 net/openvswitch/meter.c struct hlist_node *n; n 595 net/openvswitch/meter.c hlist_for_each_entry_safe(meter, n, head, dp_hash_node) n 468 net/phonet/pep.c static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n) n 473 net/phonet/pep.c for (i = 0; i < n; i++) { n 97 net/rds/rds.h #define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \ n 98 net/rds/rds.h (rs)->rs_hash_initval) & ((n) - 1)) n 640 net/rose/af_rose.c int n; n 680 net/rose/af_rose.c for (n = 0 ; n < addr->srose_ndigis ; n++) n 681 net/rose/af_rose.c rose->source_digis[n] = full_addr->srose_digis[n]; n 703 net/rose/af_rose.c int n, err = 0; n 785 net/rose/af_rose.c for (n = 0 ; n < addr->srose_ndigis ; n++) n 786 net/rose/af_rose.c rose->dest_digis[n] = full_addr->srose_digis[n]; n 923 net/rose/af_rose.c int n; n 933 net/rose/af_rose.c for (n = 0; n < rose->dest_ndigis; n++) n 934 net/rose/af_rose.c srose->srose_digis[n] = rose->dest_digis[n]; n 940 net/rose/af_rose.c for (n = 0; n < rose->source_ndigis; n++) n 941 net/rose/af_rose.c srose->srose_digis[n] = rose->source_digis[n]; n 953 net/rose/af_rose.c int n; n 988 net/rose/af_rose.c for (n = 0 ; n < facilities.dest_ndigis ; n++) n 989 net/rose/af_rose.c make_rose->dest_digis[n] = facilities.dest_digis[n]; n 993 net/rose/af_rose.c for (n = 0 ; n < facilities.source_ndigis ; n++) n 994 net/rose/af_rose.c make_rose->source_digis[n] = facilities.source_digis[n]; n 1037 net/rose/af_rose.c int n, size, qbit = 0; n 1064 net/rose/af_rose.c for (n = 0 ; n < srose.srose_ndigis ; n++) n 1065 net/rose/af_rose.c if (ax25cmp(&rose->dest_digis[n], n 1066 net/rose/af_rose.c &srose.srose_digis[n])) n 1079 net/rose/af_rose.c for (n = 0 ; n < rose->dest_ndigis ; n++) n 1080 net/rose/af_rose.c srose.srose_digis[n] = rose->dest_digis[n]; n 1200 net/rose/af_rose.c int n, er, qbit; n 1243 net/rose/af_rose.c for (n = 0 ; n < rose->dest_ndigis ; n++) n 1244 net/rose/af_rose.c full_srose->srose_digis[n] = rose->dest_digis[n]; n 242 net/rose/rose_subr.c unsigned char l, lg, n = 0; n 251 net/rose/rose_subr.c n += 2; n 261 net/rose/rose_subr.c n += 3; n 269 net/rose/rose_subr.c n += 4; n 324 net/rose/rose_subr.c n += l + 2; n 330 net/rose/rose_subr.c return n; n 335 net/rose/rose_subr.c unsigned char l, n = 0; n 344 net/rose/rose_subr.c n += 2; n 352 net/rose/rose_subr.c n += 3; n 360 net/rose/rose_subr.c n += 4; n 386 net/rose/rose_subr.c n += l + 2; n 392 net/rose/rose_subr.c return n; n 29 net/rxrpc/ar-internal.h __be32 n[2]; n 375 net/rxrpc/call_object.c int n = atomic_fetch_add_unless(&call->usage, 1, 0); n 376 net/rxrpc/call_object.c if (n == 0) n 379 net/rxrpc/call_object.c trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1, n 392 net/rxrpc/call_object.c int n = atomic_read(&call->usage); n 393 net/rxrpc/call_object.c ASSERTCMP(n, >=, 1); n 395 net/rxrpc/call_object.c trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n, n 409 net/rxrpc/call_object.c int n = atomic_read(&call->usage); n 411 net/rxrpc/call_object.c trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n, n 422 net/rxrpc/call_object.c int n = atomic_inc_return(&call->usage); n 424 net/rxrpc/call_object.c trace_rxrpc_call(call->debug_id, op, n, here, NULL); n 542 net/rxrpc/call_object.c int n; n 546 net/rxrpc/call_object.c n = atomic_dec_return(&call->usage); n 547 net/rxrpc/call_object.c trace_rxrpc_call(debug_id, op, n, here, NULL); n 548 net/rxrpc/call_object.c ASSERTCMP(n, >=, 0); n 549 net/rxrpc/call_object.c if (n == 0) { n 999 net/rxrpc/conn_client.c int n; n 1002 net/rxrpc/conn_client.c n = atomic_dec_return(&conn->usage); n 1003 net/rxrpc/conn_client.c trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here); n 1004 net/rxrpc/conn_client.c if (n > 0) n 1006 net/rxrpc/conn_client.c ASSERTCMP(n, >=, 0); n 267 net/rxrpc/conn_object.c int n = atomic_fetch_add_unless(&conn->usage, 1, 0); n 268 net/rxrpc/conn_object.c if (n == 0) n 271 net/rxrpc/conn_object.c trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here); n 284 net/rxrpc/conn_object.c int n = atomic_read(&conn->usage); n 286 net/rxrpc/conn_object.c trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here); n 296 net/rxrpc/conn_object.c int n = atomic_inc_return(&conn->usage); n 298 net/rxrpc/conn_object.c trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here); n 310 net/rxrpc/conn_object.c int n = atomic_fetch_add_unless(&conn->usage, 1, 0); n 311 net/rxrpc/conn_object.c if (n > 0) n 312 net/rxrpc/conn_object.c trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here); n 336 net/rxrpc/conn_object.c int n; n 338 net/rxrpc/conn_object.c n = atomic_dec_return(&conn->usage); n 339 net/rxrpc/conn_object.c trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here); n 340 net/rxrpc/conn_object.c ASSERTCMP(n, >=, 0); n 341 net/rxrpc/conn_object.c if (n == 1) n 312 net/rxrpc/local_object.c int n; n 314 net/rxrpc/local_object.c n = atomic_inc_return(&local->usage); n 315 net/rxrpc/local_object.c trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); n 327 net/rxrpc/local_object.c int n = atomic_fetch_add_unless(&local->usage, 1, 0); n 328 net/rxrpc/local_object.c if (n > 0) n 330 net/rxrpc/local_object.c n + 1, here); n 344 net/rxrpc/local_object.c int n = atomic_read(&local->usage); n 347 net/rxrpc/local_object.c trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); n 359 net/rxrpc/local_object.c int n; n 364 net/rxrpc/local_object.c n = atomic_dec_return(&local->usage); n 365 net/rxrpc/local_object.c trace_rxrpc_local(debug_id, rxrpc_local_put, n, here); n 367 net/rxrpc/local_object.c if (n == 0) n 138 net/rxrpc/output.c size_t len, n; n 180 net/rxrpc/output.c n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason); n 185 net/rxrpc/output.c iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n; n 383 net/rxrpc/peer_object.c int n; n 385 net/rxrpc/peer_object.c n = atomic_inc_return(&peer->usage); n 386 net/rxrpc/peer_object.c trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here); n 398 net/rxrpc/peer_object.c int n = atomic_fetch_add_unless(&peer->usage, 1, 0); n 399 net/rxrpc/peer_object.c if (n > 0) n 400 net/rxrpc/peer_object.c trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here); n 432 net/rxrpc/peer_object.c int n; n 436 net/rxrpc/peer_object.c n = atomic_dec_return(&peer->usage); n 437 net/rxrpc/peer_object.c trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); n 438 net/rxrpc/peer_object.c if (n == 0) n 451 net/rxrpc/peer_object.c int n; n 453 net/rxrpc/peer_object.c n = atomic_dec_return(&peer->usage); n 454 net/rxrpc/peer_object.c trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); n 455 net/rxrpc/peer_object.c if (n == 0) { n 256 net/rxrpc/proc.c unsigned int bucket, n; n 265 net/rxrpc/proc.c n = *_pos & ((1U << shift) - 1); n 272 net/rxrpc/proc.c if (n == 0) { n 276 net/rxrpc/proc.c n++; n 279 net/rxrpc/proc.c p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); n 283 net/rxrpc/proc.c n = 1; n 284 net/rxrpc/proc.c *_pos = (bucket << shift) | n; n 291 net/rxrpc/proc.c unsigned int bucket, n; n 306 net/rxrpc/proc.c n = 1; n 307 net/rxrpc/proc.c *_pos = (bucket << shift) | n; n 313 net/rxrpc/proc.c if (n == 0) { n 315 net/rxrpc/proc.c n++; n 318 net/rxrpc/proc.c p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); n 993 net/rxrpc/rxkad.c _debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1])); n 1061 net/rxrpc/rxkad.c ntohl(session_key->n[0]), ntohl(session_key->n[1])); n 26 net/rxrpc/skbuff.c int n = atomic_inc_return(select_skb_count(skb)); n 27 net/rxrpc/skbuff.c trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, n 38 net/rxrpc/skbuff.c int n = atomic_read(select_skb_count(skb)); n 39 net/rxrpc/skbuff.c trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, n 50 net/rxrpc/skbuff.c int n = atomic_inc_return(select_skb_count(skb)); n 51 net/rxrpc/skbuff.c trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, n 62 net/rxrpc/skbuff.c int n = atomic_inc_return(&rxrpc_n_rx_skbs); n 63 net/rxrpc/skbuff.c trace_rxrpc_skb(skb, op, 0, n, 0, here); n 73 net/rxrpc/skbuff.c int n; n 75 net/rxrpc/skbuff.c n = atomic_dec_return(select_skb_count(skb)); n 76 net/rxrpc/skbuff.c trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, n 90 net/rxrpc/skbuff.c int n = atomic_dec_return(select_skb_count(skb)); n 92 net/rxrpc/skbuff.c refcount_read(&skb->users), n, n 1073 net/sched/act_api.c tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, n 1082 net/sched/act_api.c if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, n 1093 net/sched/act_api.c struct nlmsghdr *n, u32 portid, n 1137 net/sched/act_api.c struct nlmsghdr *n, u32 portid, n 1170 net/sched/act_api.c nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, n 1199 net/sched/act_api.c n->nlmsg_flags & NLM_F_ECHO); n 1244 net/sched/act_api.c tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], n 1255 net/sched/act_api.c if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, n 1271 net/sched/act_api.c n->nlmsg_flags & NLM_F_ECHO); n 1278 net/sched/act_api.c tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, n 1292 net/sched/act_api.c if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { n 1294 net/sched/act_api.c return tca_action_flush(net, tb[1], n, portid, extack); n 1301 net/sched/act_api.c act = tcf_action_get_1(net, tb[i], n, portid, extack); n 1313 net/sched/act_api.c ret = tcf_get_notify(net, portid, n, actions, event, extack); n 1315 net/sched/act_api.c ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); n 1326 net/sched/act_api.c tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], n 1337 net/sched/act_api.c if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, n 1345 net/sched/act_api.c n->nlmsg_flags & NLM_F_ECHO); n 1352 net/sched/act_api.c struct nlmsghdr *n, u32 portid, int ovr, n 1368 net/sched/act_api.c ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); n 1382 net/sched/act_api.c static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, n 1390 net/sched/act_api.c if ((n->nlmsg_type != RTM_GETACTION) && n 1394 net/sched/act_api.c ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca, n 1405 net/sched/act_api.c switch (n->nlmsg_type) { n 1413 net/sched/act_api.c if (n->nlmsg_flags & NLM_F_REPLACE) n 1415 net/sched/act_api.c ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr, n 1419 net/sched/act_api.c ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, n 1423 net/sched/act_api.c ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, n 410 net/sched/act_ife.c struct tcf_meta_info *e, *n; n 412 net/sched/act_ife.c list_for_each_entry_safe(e, n, &ife->metalist, metalist) { n 749 net/sched/act_ife.c struct tcf_meta_info *e, *n; n 752 net/sched/act_ife.c list_for_each_entry_safe(e, n, &ife->metalist, metalist) { n 38 net/sched/act_pedit.c u8 n) n 49 net/sched/act_pedit.c keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); n 58 net/sched/act_pedit.c if (!n) { n 62 net/sched/act_pedit.c n--; n 93 net/sched/act_pedit.c if (n) { n 106 net/sched/act_pedit.c struct tcf_pedit_key_ex *keys_ex, int n) n 113 net/sched/act_pedit.c for (; n > 0; n--) { n 1827 net/sched/cls_api.c struct nlmsghdr *n, struct tcf_proto *tp, n 1841 net/sched/cls_api.c n->nlmsg_seq, n->nlmsg_flags, event, n 1851 net/sched/cls_api.c n->nlmsg_flags & NLM_F_ECHO); n 1859 net/sched/cls_api.c struct nlmsghdr *n, struct tcf_proto *tp, n 1873 net/sched/cls_api.c n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, n 1890 net/sched/cls_api.c n->nlmsg_flags & NLM_F_ECHO); n 1901 net/sched/cls_api.c u32 parent, struct nlmsghdr *n, n 1909 net/sched/cls_api.c tfilter_notify(net, oskb, n, tp, block, n 1919 net/sched/cls_api.c static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, n 1948 net/sched/cls_api.c err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, n 1953 net/sched/cls_api.c t = nlmsg_data(n); n 1966 net/sched/cls_api.c if (n->nlmsg_flags & NLM_F_CREATE) { n 2048 net/sched/cls_api.c if (!(n->nlmsg_flags & NLM_F_CREATE)) { n 2086 net/sched/cls_api.c if (!(n->nlmsg_flags & NLM_F_CREATE)) { n 2091 net/sched/cls_api.c } else if (n->nlmsg_flags & NLM_F_EXCL) { n 2105 net/sched/cls_api.c n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, n 2108 net/sched/cls_api.c tfilter_notify(net, skb, n, tp, block, q, parent, fh, n 2146 net/sched/cls_api.c static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, n 2170 net/sched/cls_api.c err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, n 2175 net/sched/cls_api.c t = nlmsg_data(n); n 2239 net/sched/cls_api.c tfilter_notify_chain(net, skb, block, q, parent, n, n 2263 net/sched/cls_api.c tfilter_notify(net, skb, n, tp, block, q, parent, fh, n 2278 net/sched/cls_api.c err = tfilter_del_notify(net, skb, n, tp, block, n 2306 net/sched/cls_api.c static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, n 2327 net/sched/cls_api.c err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, n 2332 net/sched/cls_api.c t = nlmsg_data(n); n 2407 net/sched/cls_api.c err = tfilter_notify(net, skb, n, tp, block, q, parent, n 2437 net/sched/cls_api.c static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) n 2443 net/sched/cls_api.c n, NETLINK_CB(a->cb->skb).portid, n 2759 net/sched/cls_api.c static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, n 2773 net/sched/cls_api.c if (n->nlmsg_type != RTM_GETCHAIN && n 2778 net/sched/cls_api.c err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, n 2783 net/sched/cls_api.c t = nlmsg_data(n); n 2801 net/sched/cls_api.c if (n->nlmsg_type == RTM_NEWCHAIN) { n 2814 net/sched/cls_api.c if (!(n->nlmsg_flags & NLM_F_CREATE)) { n 2835 net/sched/cls_api.c if (n->nlmsg_type == RTM_NEWCHAIN) { n 2846 net/sched/cls_api.c switch (n->nlmsg_type) { n 2858 net/sched/cls_api.c tfilter_notify_chain(net, skb, block, q, parent, n, n 2868 net/sched/cls_api.c err = tc_chain_notify(chain, skb, n->nlmsg_seq, n 2869 net/sched/cls_api.c n->nlmsg_seq, n->nlmsg_type, true); n 110 net/sched/cls_basic.c struct basic_filter *f, *n; n 112 net/sched/cls_basic.c list_for_each_entry_safe(f, n, &head->flist, link) { n 302 net/sched/cls_flow.c unsigned int n, key; n 316 net/sched/cls_flow.c for (n = 0; n < f->nkeys; n++) { n 319 net/sched/cls_flow.c keys[n] = flow_key_get(skb, key, &flow_keys); n 220 net/sched/cls_rsvp.h static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h) n 234 net/sched/cls_rsvp.h RCU_INIT_POINTER(n->next, pins->next); n 235 net/sched/cls_rsvp.h rcu_assign_pointer(*ins, n); n 410 net/sched/cls_rsvp.h int n = data->tgenerator >> 5; n 413 net/sched/cls_rsvp.h if (data->tmap[n] & b) n 415 net/sched/cls_rsvp.h data->tmap[n] |= b; n 510 net/sched/cls_rsvp.h struct rsvp_filter *n; n 515 net/sched/cls_rsvp.h n = kmemdup(f, sizeof(*f), GFP_KERNEL); n 516 net/sched/cls_rsvp.h if (!n) { n 521 net/sched/cls_rsvp.h err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT, n 524 net/sched/cls_rsvp.h kfree(n); n 529 net/sched/cls_rsvp.h n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); n 530 net/sched/cls_rsvp.h tcf_bind_filter(tp, &n->res, base); n 533 net/sched/cls_rsvp.h tcf_exts_change(&n->exts, &e); n 534 net/sched/cls_rsvp.h rsvp_replace(tp, n, handle); n 113 net/sched/cls_u32.c struct tc_u_knode *n; n 123 net/sched/cls_u32.c n = rcu_dereference_bh(ht->ht[sel]); n 126 net/sched/cls_u32.c if (n) { n 127 net/sched/cls_u32.c struct tc_u32_key *key = n->sel.keys; n 130 net/sched/cls_u32.c __this_cpu_inc(n->pf->rcnt); n 134 net/sched/cls_u32.c if (tc_skip_sw(n->flags)) { n 135 net/sched/cls_u32.c n = rcu_dereference_bh(n->next); n 140 net/sched/cls_u32.c if ((skb->mark & n->mask) != n->val) { n 141 net/sched/cls_u32.c n = rcu_dereference_bh(n->next); n 144 net/sched/cls_u32.c __this_cpu_inc(*n->pcpu_success); n 148 net/sched/cls_u32.c for (i = n->sel.nkeys; i > 0; i--, key++) { n 159 net/sched/cls_u32.c n = rcu_dereference_bh(n->next); n 163 net/sched/cls_u32.c __this_cpu_inc(n->pf->kcnts[j]); n 168 net/sched/cls_u32.c ht = rcu_dereference_bh(n->ht_down); n 171 net/sched/cls_u32.c if (n->sel.flags & TC_U32_TERMINAL) { n 173 net/sched/cls_u32.c *res = n->res; n 174 net/sched/cls_u32.c if (!tcf_match_indev(skb, n->ifindex)) { n 175 net/sched/cls_u32.c n = rcu_dereference_bh(n->next); n 179 net/sched/cls_u32.c __this_cpu_inc(n->pf->rhit); n 181 net/sched/cls_u32.c r = tcf_exts_exec(skb, &n->exts, res); n 183 net/sched/cls_u32.c n = rcu_dereference_bh(n->next); n 189 net/sched/cls_u32.c n = rcu_dereference_bh(n->next); n 196 net/sched/cls_u32.c stack[sdepth].knode = n; n 200 net/sched/cls_u32.c ht = rcu_dereference_bh(n->ht_down); n 205 net/sched/cls_u32.c data = skb_header_pointer(skb, off + n->sel.hoff, 4, n 209 net/sched/cls_u32.c sel = ht->divisor & u32_hash_fold(*data, &n->sel, n 210 net/sched/cls_u32.c n->fshift); n 212 net/sched/cls_u32.c if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) n 215 net/sched/cls_u32.c if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { n 216 net/sched/cls_u32.c off2 = n->sel.off + 3; n 217 net/sched/cls_u32.c if (n->sel.flags & TC_U32_VAROFFSET) { n 221 net/sched/cls_u32.c off + n->sel.offoff, n 225 net/sched/cls_u32.c off2 += ntohs(n->sel.offmask & *data) >> n 226 net/sched/cls_u32.c n->sel.offshift; n 230 net/sched/cls_u32.c if (n->sel.flags & TC_U32_EAT) { n 241 net/sched/cls_u32.c n = stack[sdepth].knode; n 242 net/sched/cls_u32.c ht = rcu_dereference_bh(n->ht_up); n 270 net/sched/cls_u32.c struct tc_u_knode *n = NULL; n 276 net/sched/cls_u32.c for (n = rtnl_dereference(ht->ht[sel]); n 277 net/sched/cls_u32.c n; n 278 net/sched/cls_u32.c n = rtnl_dereference(n->next)) n 279 net/sched/cls_u32.c if (n->handle == handle) n 282 net/sched/cls_u32.c return n; n 389 net/sched/cls_u32.c static int u32_destroy_key(struct tc_u_knode *n, bool free_pf) n 391 net/sched/cls_u32.c struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); n 393 net/sched/cls_u32.c tcf_exts_destroy(&n->exts); n 394 net/sched/cls_u32.c tcf_exts_put_net(&n->exts); n 399 net/sched/cls_u32.c free_percpu(n->pf); n 403 net/sched/cls_u32.c free_percpu(n->pcpu_success); n 405 net/sched/cls_u32.c kfree(n); n 515 net/sched/cls_u32.c static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, n 521 net/sched/cls_u32.c tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); n 523 net/sched/cls_u32.c cls_u32.knode.handle = n->handle; n 526 net/sched/cls_u32.c &n->flags, &n->in_hw_count, true); n 529 net/sched/cls_u32.c static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, n 532 net/sched/cls_u32.c struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); n 540 net/sched/cls_u32.c cls_u32.knode.handle = n->handle; n 541 net/sched/cls_u32.c cls_u32.knode.fshift = n->fshift; n 543 net/sched/cls_u32.c cls_u32.knode.val = n->val; n 544 net/sched/cls_u32.c cls_u32.knode.mask = n->mask; n 549 net/sched/cls_u32.c cls_u32.knode.sel = &n->sel; n 550 net/sched/cls_u32.c cls_u32.knode.res = &n->res; n 551 net/sched/cls_u32.c cls_u32.knode.exts = &n->exts; n 552 net/sched/cls_u32.c if (n->ht_down) n 556 net/sched/cls_u32.c &n->flags, &n->in_hw_count, true); n 558 net/sched/cls_u32.c u32_remove_hw_knode(tp, n, NULL); n 562 net/sched/cls_u32.c if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW)) n 572 net/sched/cls_u32.c struct tc_u_knode *n; n 576 net/sched/cls_u32.c while ((n = rtnl_dereference(ht->ht[h])) != NULL) { n 578 net/sched/cls_u32.c rtnl_dereference(n->next)); n 580 net/sched/cls_u32.c tcf_unbind_filter(tp, &n->res); n 581 net/sched/cls_u32.c u32_remove_hw_knode(tp, n, extack); n 582 net/sched/cls_u32.c idr_remove(&ht->handle_idr, n->handle); n 583 net/sched/cls_u32.c if (tcf_exts_get_net(&n->exts)) n 584 net/sched/cls_u32.c tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); n 586 net/sched/cls_u32.c u32_destroy_key(n, true); n 711 net/sched/cls_u32.c struct tc_u_knode *n, struct nlattr **tb, n 717 net/sched/cls_u32.c err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack); n 744 net/sched/cls_u32.c ht_old = rtnl_dereference(n->ht_down); n 745 net/sched/cls_u32.c rcu_assign_pointer(n->ht_down, ht_down); n 751 net/sched/cls_u32.c n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); n 752 net/sched/cls_u32.c tcf_bind_filter(tp, &n->res, base); n 760 net/sched/cls_u32.c n->ifindex = ret; n 766 net/sched/cls_u32.c struct tc_u_knode *n) n 772 net/sched/cls_u32.c if (TC_U32_HTID(n->handle) == TC_U32_ROOT) n 775 net/sched/cls_u32.c ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle)); n 777 net/sched/cls_u32.c ins = &ht->ht[TC_U32_HASH(n->handle)]; n 784 net/sched/cls_u32.c if (pins->handle == n->handle) n 787 net/sched/cls_u32.c idr_replace(&ht->handle_idr, n, n->handle); n 788 net/sched/cls_u32.c RCU_INIT_POINTER(n->next, pins->next); n 789 net/sched/cls_u32.c rcu_assign_pointer(*ins, n); n 793 net/sched/cls_u32.c struct tc_u_knode *n) n 795 net/sched/cls_u32.c struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); n 796 net/sched/cls_u32.c struct tc_u32_sel *s = &n->sel; n 799 net/sched/cls_u32.c new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), n 805 net/sched/cls_u32.c RCU_INIT_POINTER(new->next, n->next); n 806 net/sched/cls_u32.c new->handle = n->handle; n 807 net/sched/cls_u32.c RCU_INIT_POINTER(new->ht_up, n->ht_up); n 809 net/sched/cls_u32.c new->ifindex = n->ifindex; n 810 net/sched/cls_u32.c new->fshift = n->fshift; n 811 net/sched/cls_u32.c new->res = n->res; n 812 net/sched/cls_u32.c new->flags = n->flags; n 824 net/sched/cls_u32.c new->pf = n->pf; n 828 net/sched/cls_u32.c new->val = n->val; n 829 net/sched/cls_u32.c new->mask = n->mask; n 831 net/sched/cls_u32.c new->pcpu_success = n->pcpu_success; n 850 net/sched/cls_u32.c struct tc_u_knode *n; n 883 net/sched/cls_u32.c n = *arg; n 884 net/sched/cls_u32.c if (n) { n 887 net/sched/cls_u32.c if (TC_U32_KEY(n->handle) == 0) { n 892 net/sched/cls_u32.c if ((n->flags ^ flags) & n 898 net/sched/cls_u32.c new = u32_init_knode(net, tp, n); n 920 net/sched/cls_u32.c tcf_unbind_filter(tp, &n->res); n 921 net/sched/cls_u32.c tcf_exts_get_net(&n->exts); n 922 net/sched/cls_u32.c tcf_queue_work(&n->rwork, u32_delete_key_work); n 1027 net/sched/cls_u32.c n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); n 1028 net/sched/cls_u32.c if (n == NULL) { n 1035 net/sched/cls_u32.c n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); n 1036 net/sched/cls_u32.c if (!n->pf) { n 1042 net/sched/cls_u32.c memcpy(&n->sel, s, sel_size); n 1043 net/sched/cls_u32.c RCU_INIT_POINTER(n->ht_up, ht); n 1044 net/sched/cls_u32.c n->handle = handle; n 1045 net/sched/cls_u32.c n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; n 1046 net/sched/cls_u32.c n->flags = flags; n 1048 net/sched/cls_u32.c err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE); n 1053 net/sched/cls_u32.c n->pcpu_success = alloc_percpu(u32); n 1054 net/sched/cls_u32.c if (!n->pcpu_success) { n 1063 net/sched/cls_u32.c n->val = mark->val; n 1064 net/sched/cls_u32.c n->mask = mark->mask; n 1068 net/sched/cls_u32.c err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr, n 1074 net/sched/cls_u32.c err = u32_replace_hw_knode(tp, n, flags, extack); n 1078 net/sched/cls_u32.c if (!tc_in_hw(n->flags)) n 1079 net/sched/cls_u32.c n->flags |= TCA_CLS_FLAGS_NOT_IN_HW; n 1087 net/sched/cls_u32.c RCU_INIT_POINTER(n->next, pins); n 1088 net/sched/cls_u32.c rcu_assign_pointer(*ins, n); n 1090 net/sched/cls_u32.c *arg = n; n 1096 net/sched/cls_u32.c free_percpu(n->pcpu_success); n 1100 net/sched/cls_u32.c tcf_exts_destroy(&n->exts); n 1103 net/sched/cls_u32.c free_percpu(n->pf); n 1105 net/sched/cls_u32.c kfree(n); n 1116 net/sched/cls_u32.c struct tc_u_knode *n; n 1135 net/sched/cls_u32.c for (n = rtnl_dereference(ht->ht[h]); n 1136 net/sched/cls_u32.c n; n 1137 net/sched/cls_u32.c n = rtnl_dereference(n->next)) { n 1142 net/sched/cls_u32.c if (arg->fn(tp, n, arg) < 0) { n 1172 net/sched/cls_u32.c static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, n 1176 net/sched/cls_u32.c struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); n 1181 net/sched/cls_u32.c tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); n 1184 net/sched/cls_u32.c cls_u32.knode.handle = n->handle; n 1187 net/sched/cls_u32.c cls_u32.knode.fshift = n->fshift; n 1189 net/sched/cls_u32.c cls_u32.knode.val = n->val; n 1190 net/sched/cls_u32.c cls_u32.knode.mask = n->mask; n 1195 net/sched/cls_u32.c cls_u32.knode.sel = &n->sel; n 1196 net/sched/cls_u32.c cls_u32.knode.res = &n->res; n 1197 net/sched/cls_u32.c cls_u32.knode.exts = &n->exts; n 1198 net/sched/cls_u32.c if (n->ht_down) n 1203 net/sched/cls_u32.c &cls_u32, cb_priv, &n->flags, n 1204 net/sched/cls_u32.c &n->in_hw_count); n 1216 net/sched/cls_u32.c struct tc_u_knode *n; n 1238 net/sched/cls_u32.c for (n = rtnl_dereference(ht->ht[h]); n 1239 net/sched/cls_u32.c n; n 1240 net/sched/cls_u32.c n = rtnl_dereference(n->next)) { n 1241 net/sched/cls_u32.c if (tc_skip_hw(n->flags)) n 1244 net/sched/cls_u32.c err = u32_reoffload_knode(tp, n, add, cb, n 1261 net/sched/cls_u32.c struct tc_u_knode *n = fh; n 1263 net/sched/cls_u32.c if (n && n->res.classid == classid) { n 1265 net/sched/cls_u32.c __tcf_bind_filter(q, &n->res, base); n 1267 net/sched/cls_u32.c __tcf_unbind_filter(q, &n->res); n 1274 net/sched/cls_u32.c struct tc_u_knode *n = fh; n 1278 net/sched/cls_u32.c if (n == NULL) n 1281 net/sched/cls_u32.c t->tcm_handle = n->handle; n 1287 net/sched/cls_u32.c if (TC_U32_KEY(n->handle) == 0) { n 1300 net/sched/cls_u32.c sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), n 1301 net/sched/cls_u32.c &n->sel)) n 1304 net/sched/cls_u32.c ht_up = rtnl_dereference(n->ht_up); n 1306 net/sched/cls_u32.c u32 htid = n->handle & 0xFFFFF000; n 1310 net/sched/cls_u32.c if (n->res.classid && n 1311 net/sched/cls_u32.c nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) n 1314 net/sched/cls_u32.c ht_down = rtnl_dereference(n->ht_down); n 1319 net/sched/cls_u32.c if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags)) n 1323 net/sched/cls_u32.c if ((n->val || n->mask)) { n 1324 net/sched/cls_u32.c struct tc_u32_mark mark = {.val = n->val, n 1325 net/sched/cls_u32.c .mask = n->mask, n 1330 net/sched/cls_u32.c __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); n 1340 net/sched/cls_u32.c if (tcf_exts_dump(skb, &n->exts) < 0) n 1343 net/sched/cls_u32.c if (n->ifindex) { n 1345 net/sched/cls_u32.c dev = __dev_get_by_index(net, n->ifindex); n 1351 net/sched/cls_u32.c n->sel.nkeys * sizeof(u64), n 1358 net/sched/cls_u32.c struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); n 1362 net/sched/cls_u32.c for (i = 0; i < n->sel.nkeys; i++) n 1368 net/sched/cls_u32.c n->sel.nkeys * sizeof(u64), n 1379 net/sched/cls_u32.c if (TC_U32_KEY(n->handle)) n 1380 net/sched/cls_u32.c if (tcf_exts_dump_stats(skb, &n->exts) < 0) n 643 net/sched/sch_api.c static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) n 648 net/sched/sch_api.c h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL); n 651 net/sched/sch_api.c for (i = 0; i < n; i++) n 753 net/sched/sch_api.c void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len) n 762 net/sched/sch_api.c if (n == 0 && len == 0) n 764 net/sched/sch_api.c drops = max_t(int, n, 0); n 781 net/sched/sch_api.c notify = !sch->q.qlen && !WARN_ON_ONCE(!n && n 794 net/sched/sch_api.c sch->q.qlen -= n; n 957 net/sched/sch_api.c struct nlmsghdr *n, u32 clid, n 968 net/sched/sch_api.c if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq, n 973 net/sched/sch_api.c if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq, n 980 net/sched/sch_api.c n->nlmsg_flags & NLM_F_ECHO); n 988 net/sched/sch_api.c struct nlmsghdr *n, u32 clid, n 992 net/sched/sch_api.c qdisc_notify(net, skb, n, clid, old, new); n 1021 net/sched/sch_api.c struct sk_buff *skb, struct nlmsghdr *n, u32 classid, n 1067 net/sched/sch_api.c notify_and_destroy(net, skb, n, classid, n 1076 net/sched/sch_api.c notify_and_destroy(net, skb, n, classid, old, new); n 1103 net/sched/sch_api.c notify_and_destroy(net, skb, n, classid, old, new); n 1407 net/sched/sch_api.c static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, n 1411 net/sched/sch_api.c struct tcmsg *tcm = nlmsg_data(n); n 1419 net/sched/sch_api.c if ((n->nlmsg_type != RTM_GETQDISC) && n 1423 net/sched/sch_api.c err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, n 1470 net/sched/sch_api.c if (n->nlmsg_type == RTM_DELQDISC) { n 1479 net/sched/sch_api.c err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack); n 1483 net/sched/sch_api.c qdisc_notify(net, skb, n, clid, NULL, q); n 1492 net/sched/sch_api.c static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, n 1508 net/sched/sch_api.c err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, n 1513 net/sched/sch_api.c tcm = nlmsg_data(n); n 1544 net/sched/sch_api.c if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) { n 1555 net/sched/sch_api.c if (n->nlmsg_flags & NLM_F_EXCL) { n 1594 net/sched/sch_api.c if ((n->nlmsg_flags & NLM_F_CREATE) && n 1595 net/sched/sch_api.c (n->nlmsg_flags & NLM_F_REPLACE) && n 1596 net/sched/sch_api.c ((n->nlmsg_flags & NLM_F_EXCL) || n 1615 net/sched/sch_api.c if (n->nlmsg_flags & NLM_F_EXCL) { n 1625 net/sched/sch_api.c qdisc_notify(net, skb, n, clid, NULL, q); n 1629 net/sched/sch_api.c if (!(n->nlmsg_flags & NLM_F_CREATE)) { n 1663 net/sched/sch_api.c err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack); n 1832 net/sched/sch_api.c struct nlmsghdr *n, struct Qdisc *q, n 1843 net/sched/sch_api.c if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) { n 1849 net/sched/sch_api.c n->nlmsg_flags & NLM_F_ECHO); n 1857 net/sched/sch_api.c struct sk_buff *oskb, struct nlmsghdr *n, n 1871 net/sched/sch_api.c if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, n 1884 net/sched/sch_api.c n->nlmsg_flags & NLM_F_ECHO); n 1899 net/sched/sch_api.c static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg) n 1907 net/sched/sch_api.c tp->ops->bind_class(n, a->classid, a->cl, q, a->base); n 1975 net/sched/sch_api.c static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, n 1979 net/sched/sch_api.c struct tcmsg *tcm = nlmsg_data(n); n 1991 net/sched/sch_api.c if ((n->nlmsg_type != RTM_GETTCLASS) && n 1995 net/sched/sch_api.c err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, n 2069 net/sched/sch_api.c if (n->nlmsg_type != RTM_NEWTCLASS || n 2070 net/sched/sch_api.c !(n->nlmsg_flags & NLM_F_CREATE)) n 2073 net/sched/sch_api.c switch (n->nlmsg_type) { n 2076 net/sched/sch_api.c if (n->nlmsg_flags & NLM_F_EXCL) n 2080 net/sched/sch_api.c err = tclass_del_notify(net, cops, skb, n, q, cl); n 2085 net/sched/sch_api.c err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); n 2103 net/sched/sch_api.c tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); n 99 net/sched/sch_gred.c int n; n 104 net/sched/sch_gred.c for (n = i + 1; n < table->DPs; n++) n 105 net/sched/sch_gred.c if (table->tab[n] && table->tab[n]->prio == q->prio) n 222 net/sched/sch_hfsc.c struct rb_node *n; n 224 net/sched/sch_hfsc.c for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { n 225 net/sched/sch_hfsc.c p = rb_entry(n, struct hfsc_class, el_node); n 238 net/sched/sch_hfsc.c struct rb_node *n; n 240 net/sched/sch_hfsc.c n = rb_first(&q->eligible); n 241 net/sched/sch_hfsc.c if (n == NULL) n 243 net/sched/sch_hfsc.c return rb_entry(n, struct hfsc_class, el_node); n 286 net/sched/sch_hfsc.c struct rb_node *n; n 288 net/sched/sch_hfsc.c for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { n 289 net/sched/sch_hfsc.c p = rb_entry(n, struct hfsc_class, vt_node); n 653 net/sched/sch_hfsc.c struct rb_node *n = rb_first(&cl->cf_tree); n 656 net/sched/sch_hfsc.c if (n == NULL) { n 660 net/sched/sch_hfsc.c p = rb_entry(n, struct hfsc_class, cf_node); n 668 net/sched/sch_hfsc.c struct rb_node *n; n 681 net/sched/sch_hfsc.c n = rb_last(&cl->cl_parent->vt_tree); n 682 net/sched/sch_hfsc.c if (n != NULL) { n 683 net/sched/sch_hfsc.c max_cl = rb_entry(n, struct hfsc_class, vt_node); n 332 net/sched/sch_htb.c static inline void htb_next_rb_node(struct rb_node **n) n 334 net/sched/sch_htb.c *n = rb_next(*n); n 741 net/sched/sch_htb.c static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, n 745 net/sched/sch_htb.c while (n) { n 747 net/sched/sch_htb.c rb_entry(n, struct htb_class, node[prio]); n 750 net/sched/sch_htb.c n = n->rb_right; n 752 net/sched/sch_htb.c r = n; n 753 net/sched/sch_htb.c n = n->rb_left; n 755 net/sched/sch_htb.c return n; n 779 net/sched/sch_netem.c size_t n = nla_len(attr)/sizeof(__s16); n 785 net/sched/sch_netem.c if (!n || n > NETEM_DIST_MAX) n 788 net/sched/sch_netem.c d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); n 792 net/sched/sch_netem.c d->size = n; n 793 net/sched/sch_netem.c for (i = 0; i < n; i++) n 205 net/sched/sch_sfq.c sfq_index p, n; n 210 net/sched/sch_sfq.c n = q->dep[qlen].next; n 212 net/sched/sch_sfq.c slot->dep.next = n; n 216 net/sched/sch_sfq.c sfq_dep_head(q, n)->prev = x; n 219 net/sched/sch_sfq.c #define sfq_unlink(q, x, n, p) \ n 221 net/sched/sch_sfq.c n = q->slots[x].dep.next; \ n 223 net/sched/sch_sfq.c sfq_dep_head(q, p)->next = n; \ n 224 net/sched/sch_sfq.c sfq_dep_head(q, n)->prev = p; \ n 230 net/sched/sch_sfq.c sfq_index p, n; n 233 net/sched/sch_sfq.c sfq_unlink(q, x, n, p); n 236 net/sched/sch_sfq.c if (n == p && q->cur_depth == d) n 243 net/sched/sch_sfq.c sfq_index p, n; n 246 net/sched/sch_sfq.c sfq_unlink(q, x, n, p); n 114 net/sched/sch_taprio.c struct sched_entry *entry, *n; n 119 net/sched/sch_taprio.c list_for_each_entry_safe(entry, n, &sched->entries, list) { n 200 net/sched/sch_taprio.c int tc, n; n 252 net/sched/sch_taprio.c n = div_s64(ktime_sub(txtime, curr_intv_start), cycle); n 253 net/sched/sch_taprio.c *interval_start = ktime_add(curr_intv_start, n * cycle); n 254 net/sched/sch_taprio.c *interval_end = ktime_add(curr_intv_end, n * cycle); n 807 net/sched/sch_taprio.c static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, n 813 net/sched/sch_taprio.c err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n, n 829 net/sched/sch_taprio.c struct nlattr *n; n 836 net/sched/sch_taprio.c nla_for_each_nested(n, list, rem) { n 839 net/sched/sch_taprio.c if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) { n 850 net/sched/sch_taprio.c err = parse_sched_entry(n, entry, i, extack); n 975 net/sched/sch_taprio.c s64 n; n 998 net/sched/sch_taprio.c n = div64_s64(ktime_sub_ns(now, base), cycle); n 999 net/sched/sch_taprio.c *start = ktime_add_ns(base, (n + 1) * cycle); n 220 net/sched/sch_teql.c struct neighbour *n; n 223 net/sched/sch_teql.c n = dst_neigh_lookup_skb(dst, skb); n 224 net/sched/sch_teql.c if (!n) n 230 net/sched/sch_teql.c mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev); n 231 net/sched/sch_teql.c neigh_release(n); n 234 net/sched/sch_teql.c n = mn; n 237 net/sched/sch_teql.c if (neigh_event_send(n, skb_res) == 0) { n 241 net/sched/sch_teql.c neigh_ha_snapshot(haddr, n, dev); n 250 net/sched/sch_teql.c neigh_release(n); n 1800 net/sctp/sm_make_chunk.c __be32 n = htonl(usecs); n 1811 net/sctp/sm_make_chunk.c SCTP_ERROR_STALE_COOKIE, &n, n 1812 net/sctp/sm_make_chunk.c sizeof(n), 0); n 132 net/sctp/stream_sched.c struct sctp_sched_ops *n = sctp_sched_ops[sched]; n 138 net/sctp/stream_sched.c if (old == n) n 160 net/sctp/stream_sched.c asoc->outqueue.sched = n; n 161 net/sctp/stream_sched.c n->init(&asoc->stream); n 166 net/sctp/stream_sched.c ret = n->init_sid(&asoc->stream, i, GFP_KERNEL); n 176 net/sctp/stream_sched.c n->enqueue(&asoc->outqueue, msg); n 182 net/sctp/stream_sched.c n->free(&asoc->stream); n 209 net/sctp/stream_sched_prio.c struct sctp_stream_priorities *prio, *n; n 227 net/sctp/stream_sched_prio.c list_for_each_entry_safe(prio, n, &list, prio_sched) { n 952 net/smc/af_smc.c struct smc_sock *isk, *n; n 955 net/smc/af_smc.c list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) { n 147 net/sunrpc/auth_gss/gss_krb5_keys.c size_t blocksize, keybytes, keylength, n; n 198 net/sunrpc/auth_gss/gss_krb5_keys.c n = 0; n 199 net/sunrpc/auth_gss/gss_krb5_keys.c while (n < keybytes) { n 203 net/sunrpc/auth_gss/gss_krb5_keys.c if ((keybytes - n) <= outblock.len) { n 204 net/sunrpc/auth_gss/gss_krb5_keys.c memcpy(rawkey + n, outblock.data, (keybytes - n)); n 208 net/sunrpc/auth_gss/gss_krb5_keys.c memcpy(rawkey + n, outblock.data, outblock.len); n 210 net/sunrpc/auth_gss/gss_krb5_keys.c n += outblock.len; n 1319 net/sunrpc/cache.c loff_t n = *pos; n 1324 net/sunrpc/cache.c if (!n--) n 1326 net/sunrpc/cache.c hash = n >> 32; n 1327 net/sunrpc/cache.c entry = n & ((1LL<<32) - 1); n 1332 net/sunrpc/cache.c n &= ~((1LL<<32) - 1); n 1335 net/sunrpc/cache.c n += 1LL<<32; n 1340 net/sunrpc/cache.c *pos = n+1; n 758 net/sunrpc/sched.c struct rpc_task *task, *n; n 763 net/sunrpc/sched.c list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { n 145 net/sunrpc/xdr.c size_t i, n = xdr_buf_pagecount(buf); n 147 net/sunrpc/xdr.c if (n != 0 && buf->bvec == NULL) { n 148 net/sunrpc/xdr.c buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp); n 151 net/sunrpc/xdr.c for (i = 0; i < n; i++) { n 906 net/sunrpc/xprt.c struct rb_node *n = xprt->recv_queue.rb_node; n 909 net/sunrpc/xprt.c while (n != NULL) { n 910 net/sunrpc/xprt.c req = rb_entry(n, struct rpc_rqst, rq_recv); n 913 net/sunrpc/xprt.c n = n->rb_left; n 916 net/sunrpc/xprt.c n = n->rb_right; n 929 net/sunrpc/xprt.c struct rb_node *n = NULL; n 933 net/sunrpc/xprt.c n = *p; n 934 net/sunrpc/xprt.c req = rb_entry(n, struct rpc_rqst, rq_recv); n 937 net/sunrpc/xprt.c p = &n->rb_left; n 940 net/sunrpc/xprt.c p = &n->rb_right; n 947 net/sunrpc/xprt.c rb_link_node(&new->rq_recv, n, p); n 329 net/sunrpc/xprtrdma/frwr_ops.c int i, n, dma_nents; n 362 net/sunrpc/xprtrdma/frwr_ops.c n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); n 363 net/sunrpc/xprtrdma/frwr_ops.c if (n != dma_nents) n 391 net/sunrpc/xprtrdma/frwr_ops.c trace_xprtrdma_frwr_maperr(mr, n); n 195 net/sunrpc/xprtrdma/rpc_rdma.c unsigned int *n) n 210 net/sunrpc/xprtrdma/rpc_rdma.c ++(*n); n 229 net/sunrpc/xprtrdma/rpc_rdma.c unsigned int len, n; n 232 net/sunrpc/xprtrdma/rpc_rdma.c n = 0; n 234 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n); n 255 net/sunrpc/xprtrdma/rpc_rdma.c ++n; n 274 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n); n 277 net/sunrpc/xprtrdma/rpc_rdma.c if (unlikely(n > RPCRDMA_MAX_SEGS)) n 279 net/sunrpc/xprtrdma/rpc_rdma.c return n; n 329 net/sunrpc/xprtsock.c size_t i,n; n 333 net/sunrpc/xprtsock.c n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; n 334 net/sunrpc/xprtsock.c for (i = 0; i < n; i++) { n 234 net/tipc/group.c struct rb_node *n = grp->members.rb_node; n 238 net/tipc/group.c while (n) { n 239 net/tipc/group.c m = container_of(n, struct tipc_member, tree_node); n 242 net/tipc/group.c n = n->rb_left; n 244 net/tipc/group.c n = n->rb_right; n 266 net/tipc/group.c struct rb_node *n; n 268 net/tipc/group.c for (n = rb_first(&grp->members); n; n = rb_next(n)) { n 269 net/tipc/group.c m = container_of(n, struct tipc_member, tree_node); n 280 net/tipc/group.c struct rb_node **n, *parent = NULL; n 283 net/tipc/group.c n = &grp->members.rb_node; n 284 net/tipc/group.c while (*n) { n 285 net/tipc/group.c tmp = container_of(*n, struct tipc_member, tree_node); n 286 net/tipc/group.c parent = *n; n 290 net/tipc/group.c n = &(*n)->rb_left; n 292 net/tipc/group.c n = &(*n)->rb_right; n 296 net/tipc/group.c rb_link_node(&m->tree_node, parent, n); n 392 net/tipc/group.c struct rb_node *n; n 395 net/tipc/group.c for (n = rb_first(&grp->members); n; n = rb_next(n)) { n 396 net/tipc/group.c m = container_of(n, struct tipc_member, tree_node); n 1354 net/tipc/link.c u8 n = 0; n 1363 net/tipc/link.c ga->gacks[n].ack = htons(expect - 1); n 1364 net/tipc/link.c ga->gacks[n].gap = htons(seqno - expect); n 1365 net/tipc/link.c if (++n >= MAX_GAP_ACK_BLKS) { n 1377 net/tipc/link.c ga->gacks[n].ack = htons(seqno); n 1378 net/tipc/link.c ga->gacks[n].gap = 0; n 1379 net/tipc/link.c n++; n 1382 net/tipc/link.c len = tipc_gap_ack_blks_sz(n); n 1384 net/tipc/link.c ga->gack_cnt = n; n 1409 net/tipc/link.c u16 seqno, n = 0; n 1446 net/tipc/link.c if (!ga || n >= ga->gack_cnt) n 1448 net/tipc/link.c acked = ntohs(ga->gacks[n].ack); n 1449 net/tipc/link.c gap = ntohs(ga->gacks[n].gap); n 1450 net/tipc/link.c n++; n 147 net/tipc/msg.h #define tipc_gap_ack_blks_sz(n) (sizeof(struct tipc_gap_ack_blks) + \ n 148 net/tipc/msg.h sizeof(struct tipc_gap_ack) * (n)) n 213 net/tipc/msg.h static inline void msg_set_user(struct tipc_msg *m, u32 n) n 215 net/tipc/msg.h msg_set_bits(m, 0, 25, 0xf, n); n 223 net/tipc/msg.h static inline void msg_set_hdr_sz(struct tipc_msg *m, u32 n) n 225 net/tipc/msg.h msg_set_bits(m, 0, 21, 0xf, n>>2); n 248 net/tipc/msg.h static inline void msg_set_non_seq(struct tipc_msg *m, u32 n) n 250 net/tipc/msg.h msg_set_bits(m, 0, 20, 1, n); n 326 net/tipc/msg.h static inline void msg_set_type(struct tipc_msg *m, u32 n) n 328 net/tipc/msg.h msg_set_bits(m, 1, 29, 0x7, n); n 391 net/tipc/msg.h static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n) n 393 net/tipc/msg.h msg_set_bits(m, 1, 19, 0x3, n); n 401 net/tipc/msg.h static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n) n 403 net/tipc/msg.h msg_set_bits(m, 1, 0, 0xffff, n); n 424 net/tipc/msg.h static inline void msg_set_dest_session(struct tipc_msg *m, u16 n) n 426 net/tipc/msg.h msg_set_bits(m, 1, 0, 0xffff, n); n 437 net/tipc/msg.h static inline void msg_set_ack(struct tipc_msg *m, u16 n) n 439 net/tipc/msg.h msg_set_bits(m, 2, 16, 0xffff, n); n 447 net/tipc/msg.h static inline void msg_set_seqno(struct tipc_msg *m, u16 n) n 449 net/tipc/msg.h msg_set_bits(m, 2, 0, 0xffff, n); n 552 net/tipc/msg.h static inline void msg_set_nametype(struct tipc_msg *m, u32 n) n 554 net/tipc/msg.h msg_set_word(m, 8, n); n 567 net/tipc/msg.h static inline void msg_set_namelower(struct tipc_msg *m, u32 n) n 569 net/tipc/msg.h msg_set_word(m, 9, n); n 572 net/tipc/msg.h static inline void msg_set_nameinst(struct tipc_msg *m, u32 n) n 574 net/tipc/msg.h msg_set_namelower(m, n); n 582 net/tipc/msg.h static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) n 584 net/tipc/msg.h msg_set_word(m, 10, n); n 650 net/tipc/msg.h static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n) n 652 net/tipc/msg.h msg_set_bits(m, 1, 16, 0x1fff, n); n 660 net/tipc/msg.h static inline void msg_set_node_sig(struct tipc_msg *m, u32 n) n 662 net/tipc/msg.h msg_set_bits(m, 1, 0, 0xffff, n); n 670 net/tipc/msg.h static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n) n 672 net/tipc/msg.h msg_set_bits(m, 1, 15, 0x1fff, n); n 683 net/tipc/msg.h static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n) n 685 net/tipc/msg.h msg_set_word(m, 2, n); n 693 net/tipc/msg.h static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n) n 695 net/tipc/msg.h msg_set_bits(m, 2, 16, 0xffff, n); n 703 net/tipc/msg.h static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n) n 705 net/tipc/msg.h msg_set_bits(m, 2, 0, 0xffff, n); n 721 net/tipc/msg.h static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n) n 723 net/tipc/msg.h msg_set_bits(m, 4, 16, 0xffff, n); n 731 net/tipc/msg.h static inline void msg_set_nof_fragms(struct tipc_msg *m, u32 n) n 733 net/tipc/msg.h msg_set_bits(m, 4, 0, 0xffff, n); n 741 net/tipc/msg.h static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n) n 743 net/tipc/msg.h msg_set_bits(m, 4, 16, 0xffff, n); n 751 net/tipc/msg.h static inline void msg_set_next_sent(struct tipc_msg *m, u16 n) n 753 net/tipc/msg.h msg_set_bits(m, 4, 0, 0xffff, n); n 756 net/tipc/msg.h static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n) n 758 net/tipc/msg.h msg_set_bits(m, 4, 0, 0xffff, n); n 786 net/tipc/msg.h static inline void msg_set_session(struct tipc_msg *m, u16 n) n 788 net/tipc/msg.h msg_set_bits(m, 5, 16, 0xffff, n); n 806 net/tipc/msg.h static inline void msg_set_net_plane(struct tipc_msg *m, char n) n 808 net/tipc/msg.h msg_set_bits(m, 5, 1, 7, (n - 'A')); n 816 net/tipc/msg.h static inline void msg_set_linkprio(struct tipc_msg *m, u32 n) n 818 net/tipc/msg.h msg_set_bits(m, 5, 4, 0x1f, n); n 826 net/tipc/msg.h static inline void msg_set_bearer_id(struct tipc_msg *m, u32 n) n 828 net/tipc/msg.h msg_set_bits(m, 5, 9, 0x7, n); n 878 net/tipc/msg.h static inline void msg_set_bc_gap(struct tipc_msg *m, u32 n) n 880 net/tipc/msg.h msg_set_bits(m, 8, 0, 0x3ff, n); n 891 net/tipc/msg.h static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n) n 893 net/tipc/msg.h msg_set_bits(m, 9, 16, 0xffff, n); n 901 net/tipc/msg.h static inline void msg_set_syncpt(struct tipc_msg *m, u16 n) n 903 net/tipc/msg.h msg_set_bits(m, 9, 16, 0xffff, n); n 911 net/tipc/msg.h static inline void msg_set_conn_ack(struct tipc_msg *m, u32 n) n 913 net/tipc/msg.h msg_set_bits(m, 9, 16, 0xffff, n); n 921 net/tipc/msg.h static inline void msg_set_adv_win(struct tipc_msg *m, u16 n) n 923 net/tipc/msg.h msg_set_bits(m, 9, 0, 0xffff, n); n 931 net/tipc/msg.h static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n) n 933 net/tipc/msg.h msg_set_bits(m, 9, 16, 0xffff, (n / 4)); n 941 net/tipc/msg.h static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n) n 943 net/tipc/msg.h msg_set_bits(m, 9, 0, 0xffff, n); n 951 net/tipc/msg.h static inline void msg_set_grp_bc_syncpt(struct tipc_msg *m, u16 n) n 953 net/tipc/msg.h msg_set_bits(m, 9, 16, 0xffff, n); n 961 net/tipc/msg.h static inline void msg_set_grp_bc_acked(struct tipc_msg *m, u16 n) n 963 net/tipc/msg.h msg_set_bits(m, 9, 16, 0xffff, n); n 971 net/tipc/msg.h static inline void msg_set_grp_remitted(struct tipc_msg *m, u16 n) n 973 net/tipc/msg.h msg_set_bits(m, 9, 16, 0xffff, n); n 983 net/tipc/msg.h static inline void msg_set_grp_evt(struct tipc_msg *m, int n) n 985 net/tipc/msg.h msg_set_bits(m, 10, 0, 0x3, n); n 993 net/tipc/msg.h static inline void msg_set_grp_bc_ack_req(struct tipc_msg *m, bool n) n 995 net/tipc/msg.h msg_set_bits(m, 10, 0, 0x1, n); n 1003 net/tipc/msg.h static inline void msg_set_grp_bc_seqno(struct tipc_msg *m, u32 n) n 1005 net/tipc/msg.h msg_set_bits(m, 10, 16, 0xffff, n); n 1034 net/tipc/msg.h static inline void msg_set_sugg_node_addr(struct tipc_msg *m, u32 n) n 1036 net/tipc/msg.h msg_set_word(m, 14, n); n 146 net/tipc/name_table.c struct rb_node *n = sc->ranges.rb_node; n 149 net/tipc/name_table.c while (n) { n 150 net/tipc/name_table.c sr = container_of(n, struct service_range, tree_node); n 152 net/tipc/name_table.c n = n->rb_left; n 154 net/tipc/name_table.c n = n->rb_right; n 166 net/tipc/name_table.c struct rb_node *n = sc->ranges.rb_node; n 174 net/tipc/name_table.c for (n = &sr->tree_node; n; n = rb_next(n)) { n 175 net/tipc/name_table.c sr = container_of(n, struct service_range, tree_node); n 179 net/tipc/name_table.c if (!n || sr->lower != lower || sr->upper != upper) n 188 net/tipc/name_table.c struct rb_node **n, *parent = NULL; n 191 net/tipc/name_table.c n = &sc->ranges.rb_node; n 192 net/tipc/name_table.c while (*n) { n 193 net/tipc/name_table.c tmp = container_of(*n, struct service_range, tree_node); n 194 net/tipc/name_table.c parent = *n; n 197 net/tipc/name_table.c n = &(*n)->rb_left; n 199 net/tipc/name_table.c n = &(*n)->rb_right; n 201 net/tipc/name_table.c n = &(*n)->rb_left; n 203 net/tipc/name_table.c n = &(*n)->rb_right; n 214 net/tipc/name_table.c rb_link_node(&sr->tree_node, parent, n); n 292 net/tipc/name_table.c struct rb_node *n; n 305 net/tipc/name_table.c for (n = rb_first(&service->ranges); n; n = rb_next(n)) { n 306 net/tipc/name_table.c sr = container_of(n, struct service_range, tree_node); n 520 net/tipc/name_table.c struct rb_node *n; n 529 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { n 530 net/tipc/name_table.c sr = container_of(n, struct service_range, tree_node); n 555 net/tipc/name_table.c struct rb_node *n; n 564 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { n 565 net/tipc/name_table.c sr = container_of(n, struct service_range, tree_node); n 587 net/tipc/name_table.c struct rb_node *n; n 595 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { n 596 net/tipc/name_table.c sr = container_of(n, struct service_range, tree_node); n 878 net/tipc/name_table.c struct rb_node *n; n 881 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { n 882 net/tipc/name_table.c sr = container_of(n, struct service_range, tree_node); n 123 net/tipc/netlink_compat.c int n; n 137 net/tipc/netlink_compat.c n = vscnprintf(buf, rem, fmt, args); n 140 net/tipc/netlink_compat.c TLV_SET_LEN(tlv, n + len); n 141 net/tipc/netlink_compat.c skb_put(skb, n); n 143 net/tipc/netlink_compat.c return n; n 155 net/tipc/node.c static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, n 158 net/tipc/node.c static void tipc_node_link_down(struct tipc_node *n, int bearer_id, n 160 net/tipc/node.c static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq); n 163 net/tipc/node.c static void tipc_node_fsm_evt(struct tipc_node *n, int evt); n 167 net/tipc/node.c static bool node_is_up(struct tipc_node *n); n 177 net/tipc/node.c static struct tipc_link *node_active_link(struct tipc_node *n, int sel) n 179 net/tipc/node.c int bearer_id = n->active_links[sel & 1]; n 184 net/tipc/node.c return n->links[bearer_id].link; n 189 net/tipc/node.c struct tipc_node *n; n 193 net/tipc/node.c n = tipc_node_find(net, addr); n 194 net/tipc/node.c if (unlikely(!n)) n 197 net/tipc/node.c bearer_id = n->active_links[sel & 1]; n 199 net/tipc/node.c mtu = n->links[bearer_id].mtu; n 200 net/tipc/node.c tipc_node_put(n); n 207 net/tipc/node.c struct tipc_node *n; n 216 net/tipc/node.c n = tipc_node_find(net, addr); n 217 net/tipc/node.c if (!n) n 220 net/tipc/node.c memcpy(id, &n->peer_id, TIPC_NODEID_LEN); n 221 net/tipc/node.c tipc_node_put(n); n 227 net/tipc/node.c struct tipc_node *n; n 230 net/tipc/node.c n = tipc_node_find(net, addr); n 231 net/tipc/node.c if (unlikely(!n)) n 233 net/tipc/node.c caps = n->capabilities; n 234 net/tipc/node.c tipc_node_put(n); n 240 net/tipc/node.c struct tipc_node *n = container_of(kref, struct tipc_node, kref); n 242 net/tipc/node.c kfree(n->bc_entry.link); n 243 net/tipc/node.c kfree_rcu(n, rcu); n 284 net/tipc/node.c struct tipc_node *n; n 288 net/tipc/node.c list_for_each_entry_rcu(n, &tn->node_list, list) { n 289 net/tipc/node.c read_lock_bh(&n->lock); n 290 net/tipc/node.c if (!memcmp(id, n->peer_id, 16) && n 291 net/tipc/node.c kref_get_unless_zero(&n->kref)) n 293 net/tipc/node.c read_unlock_bh(&n->lock); n 298 net/tipc/node.c return found ? n : NULL; n 301 net/tipc/node.c static void tipc_node_read_lock(struct tipc_node *n) n 303 net/tipc/node.c read_lock_bh(&n->lock); n 306 net/tipc/node.c static void tipc_node_read_unlock(struct tipc_node *n) n 308 net/tipc/node.c read_unlock_bh(&n->lock); n 311 net/tipc/node.c static void tipc_node_write_lock(struct tipc_node *n) n 313 net/tipc/node.c write_lock_bh(&n->lock); n 316 net/tipc/node.c static void tipc_node_write_unlock_fast(struct tipc_node *n) n 318 net/tipc/node.c write_unlock_bh(&n->lock); n 321 net/tipc/node.c static void tipc_node_write_unlock(struct tipc_node *n) n 323 net/tipc/node.c struct net *net = n->net; n 325 net/tipc/node.c u32 flags = n->action_flags; n 331 net/tipc/node.c write_unlock_bh(&n->lock); n 335 net/tipc/node.c addr = n->addr; n 336 net/tipc/node.c link_id = n->link_id; n 338 net/tipc/node.c publ_list = &n->publ_list; n 340 net/tipc/node.c n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | n 343 net/tipc/node.c write_unlock_bh(&n->lock); n 367 net/tipc/node.c struct tipc_node *n, *temp_node; n 373 net/tipc/node.c n = tipc_node_find(net, addr); n 374 net/tipc/node.c if (n) { n 375 net/tipc/node.c if (n->capabilities == capabilities) n 378 net/tipc/node.c tipc_node_write_lock(n); n 379 net/tipc/node.c n->capabilities = capabilities; n 381 net/tipc/node.c l = n->links[bearer_id].link; n 385 net/tipc/node.c tipc_node_write_unlock_fast(n); n 394 net/tipc/node.c n = kzalloc(sizeof(*n), GFP_ATOMIC); n 395 net/tipc/node.c if (!n) { n 399 net/tipc/node.c n->addr = addr; n 400 net/tipc/node.c memcpy(&n->peer_id, peer_id, 16); n 401 net/tipc/node.c n->net = net; n 402 net/tipc/node.c n->capabilities = capabilities; n 403 net/tipc/node.c kref_init(&n->kref); n 404 net/tipc/node.c rwlock_init(&n->lock); n 405 net/tipc/node.c INIT_HLIST_NODE(&n->hash); n 406 net/tipc/node.c INIT_LIST_HEAD(&n->list); n 407 net/tipc/node.c INIT_LIST_HEAD(&n->publ_list); n 408 net/tipc/node.c INIT_LIST_HEAD(&n->conn_sks); n 409 net/tipc/node.c skb_queue_head_init(&n->bc_entry.namedq); n 410 net/tipc/node.c skb_queue_head_init(&n->bc_entry.inputq1); n 411 net/tipc/node.c __skb_queue_head_init(&n->bc_entry.arrvq); n 412 net/tipc/node.c skb_queue_head_init(&n->bc_entry.inputq2); n 414 net/tipc/node.c spin_lock_init(&n->links[i].lock); n 415 net/tipc/node.c n->state = SELF_DOWN_PEER_LEAVING; n 416 net/tipc/node.c n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); n 417 net/tipc/node.c n->signature = INVALID_NODE_SIG; n 418 net/tipc/node.c n->active_links[0] = INVALID_BEARER_ID; n 419 net/tipc/node.c n->active_links[1] = INVALID_BEARER_ID; n 423 net/tipc/node.c n->capabilities, n 424 net/tipc/node.c &n->bc_entry.inputq1, n 425 net/tipc/node.c &n->bc_entry.namedq, n 427 net/tipc/node.c &n->bc_entry.link)) { n 429 net/tipc/node.c kfree(n); n 430 net/tipc/node.c n = NULL; n 433 net/tipc/node.c tipc_node_get(n); n 434 net/tipc/node.c timer_setup(&n->timer, tipc_node_timeout, 0); n 435 net/tipc/node.c n->keepalive_intv = U32_MAX; n 436 net/tipc/node.c hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]); n 438 net/tipc/node.c if (n->addr < temp_node->addr) n 441 net/tipc/node.c list_add_tail_rcu(&n->list, &temp_node->list); n 447 net/tipc/node.c trace_tipc_node_create(n, true, " "); n 450 net/tipc/node.c return n; n 453 net/tipc/node.c static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) n 459 net/tipc/node.c if (intv < n->keepalive_intv) n 460 net/tipc/node.c n->keepalive_intv = intv; n 463 net/tipc/node.c tipc_link_set_abort_limit(l, tol / n->keepalive_intv); n 495 net/tipc/node.c struct tipc_node *n; n 500 net/tipc/node.c n = tipc_node_find(net, addr); n 501 net/tipc/node.c if (!n) { n 505 net/tipc/node.c tipc_node_write_lock(n); n 506 net/tipc/node.c list_add_tail(subscr, &n->publ_list); n 507 net/tipc/node.c tipc_node_write_unlock_fast(n); n 508 net/tipc/node.c tipc_node_put(n); n 513 net/tipc/node.c struct tipc_node *n; n 518 net/tipc/node.c n = tipc_node_find(net, addr); n 519 net/tipc/node.c if (!n) { n 523 net/tipc/node.c tipc_node_write_lock(n); n 525 net/tipc/node.c tipc_node_write_unlock_fast(n); n 526 net/tipc/node.c tipc_node_put(n); n 634 net/tipc/node.c struct tipc_node *n = from_timer(n, t, timer); n 637 net/tipc/node.c int remains = n->link_cnt; n 641 net/tipc/node.c trace_tipc_node_timeout(n, false, " "); n 642 net/tipc/node.c if (!node_is_up(n) && tipc_node_cleanup(n)) { n 644 net/tipc/node.c tipc_node_put(n); n 653 net/tipc/node.c tipc_node_read_lock(n); n 654 net/tipc/node.c n->keepalive_intv = 10000; n 655 net/tipc/node.c tipc_node_read_unlock(n); n 657 net/tipc/node.c tipc_node_read_lock(n); n 658 net/tipc/node.c le = &n->links[bearer_id]; n 662 net/tipc/node.c tipc_node_calculate_timer(n, le->link); n 667 net/tipc/node.c tipc_node_read_unlock(n); n 668 net/tipc/node.c tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); n 670 net/tipc/node.c tipc_node_link_down(n, bearer_id, false); n 672 net/tipc/node.c mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); n 680 net/tipc/node.c static void __tipc_node_link_up(struct tipc_node *n, int bearer_id, n 683 net/tipc/node.c int *slot0 = &n->active_links[0]; n 684 net/tipc/node.c int *slot1 = &n->active_links[1]; n 685 net/tipc/node.c struct tipc_link *ol = node_active_link(n, 0); n 686 net/tipc/node.c struct tipc_link *nl = n->links[bearer_id].link; n 695 net/tipc/node.c n->working_links++; n 696 net/tipc/node.c n->action_flags |= TIPC_NOTIFY_LINK_UP; n 697 net/tipc/node.c n->link_id = tipc_link_id(nl); n 700 net/tipc/node.c n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; n 702 net/tipc/node.c tipc_bearer_add_dest(n->net, bearer_id, n->addr); n 703 net/tipc/node.c tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); n 707 net/tipc/node.c trace_tipc_node_link_up(n, true, " "); n 716 net/tipc/node.c tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); n 717 net/tipc/node.c n->action_flags |= TIPC_NOTIFY_NODE_UP; n 719 net/tipc/node.c tipc_bcast_add_peer(n->net, nl, xmitq); n 746 net/tipc/node.c static void tipc_node_link_up(struct tipc_node *n, int bearer_id, n 751 net/tipc/node.c tipc_node_write_lock(n); n 752 net/tipc/node.c __tipc_node_link_up(n, bearer_id, xmitq); n 753 net/tipc/node.c maddr = &n->links[bearer_id].maddr; n 754 net/tipc/node.c tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr); n 755 net/tipc/node.c tipc_node_write_unlock(n); n 778 net/tipc/node.c static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, n 791 net/tipc/node.c tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); n 793 net/tipc/node.c n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); n 798 net/tipc/node.c tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); n 804 net/tipc/node.c static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id, n 808 net/tipc/node.c struct tipc_link_entry *le = &n->links[*bearer_id]; n 809 net/tipc/node.c int *slot0 = &n->active_links[0]; n 810 net/tipc/node.c int *slot1 = &n->active_links[1]; n 814 net/tipc/node.c l = n->links[*bearer_id].link; n 818 net/tipc/node.c n->working_links--; n 819 net/tipc/node.c n->action_flags |= TIPC_NOTIFY_LINK_DOWN; n 820 net/tipc/node.c n->link_id = tipc_link_id(l); n 822 net/tipc/node.c tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); n 831 net/tipc/node.c _l = n->links[i].link; n 848 net/tipc/node.c if (!node_is_up(n)) { n 850 net/tipc/node.c tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); n 851 net/tipc/node.c tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT); n 856 net/tipc/node.c *maddr = &n->links[*bearer_id].maddr; n 857 net/tipc/node.c node_lost_contact(n, &le->inputq); n 858 net/tipc/node.c tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); n 861 net/tipc/node.c tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); n 864 net/tipc/node.c *bearer_id = n->active_links[0]; n 865 net/tipc/node.c tnl = n->links[*bearer_id].link; n 867 net/tipc/node.c tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); n 868 net/tipc/node.c n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1); n 874 net/tipc/node.c tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); n 875 net/tipc/node.c *maddr = &n->links[*bearer_id].maddr; n 878 net/tipc/node.c static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) n 880 net/tipc/node.c struct tipc_link_entry *le = &n->links[bearer_id]; n 891 net/tipc/node.c tipc_node_write_lock(n); n 893 net/tipc/node.c __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); n 902 net/tipc/node.c n->link_cnt--; n 904 net/tipc/node.c trace_tipc_node_link_down(n, true, "node link down or deleted!"); n 905 net/tipc/node.c tipc_node_write_unlock(n); n 907 net/tipc/node.c tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); n 909 net/tipc/node.c tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); n 910 net/tipc/node.c tipc_sk_rcv(n->net, &le->inputq); n 913 net/tipc/node.c static bool node_is_up(struct tipc_node *n) n 915 net/tipc/node.c return n->active_links[0] != INVALID_BEARER_ID; n 920 net/tipc/node.c struct tipc_node *n; n 926 net/tipc/node.c n = tipc_node_find(net, addr); n 927 net/tipc/node.c if (!n) n 929 net/tipc/node.c retval = node_is_up(n); n 930 net/tipc/node.c tipc_node_put(n); n 936 net/tipc/node.c struct tipc_node *n; n 939 net/tipc/node.c while ((n = tipc_node_find(net, addr))) { n 940 net/tipc/node.c tipc_node_put(n); n 952 net/tipc/node.c struct tipc_node *n; n 955 net/tipc/node.c n = tipc_node_find(net, addr); n 956 net/tipc/node.c if (n) { n 957 net/tipc/node.c if (!memcmp(n->peer_id, id, NODE_ID_LEN)) n 959 net/tipc/node.c tipc_node_put(n); n 966 net/tipc/node.c n = tipc_node_find_by_id(net, id); n 967 net/tipc/node.c if (n) { n 968 net/tipc/node.c addr = n->addr; n 969 net/tipc/node.c tipc_node_put(n); n 986 net/tipc/node.c struct tipc_node *n; n 1001 net/tipc/node.c n = tipc_node_create(net, addr, peer_id, capabilities); n 1002 net/tipc/node.c if (!n) n 1005 net/tipc/node.c tipc_node_write_lock(n); n 1007 net/tipc/node.c le = &n->links[b->identity]; n 1013 net/tipc/node.c sign_match = (signature == n->signature); n 1053 net/tipc/node.c n->signature = signature; n 1058 net/tipc/node.c n->signature = signature; n 1069 net/tipc/node.c n->signature = signature; n 1079 net/tipc/node.c if (n->link_cnt == 2) n 1088 net/tipc/node.c n->capabilities, n 1089 net/tipc/node.c tipc_bc_sndlink(n->net), n->bc_entry.link, n 1091 net/tipc/node.c &n->bc_entry.namedq, &l)) { n 1098 net/tipc/node.c if (n->state == NODE_FAILINGOVER) n 1101 net/tipc/node.c n->link_cnt++; n 1102 net/tipc/node.c tipc_node_calculate_timer(n, l); n 1103 net/tipc/node.c if (n->link_cnt == 1) { n 1104 net/tipc/node.c intv = jiffies + msecs_to_jiffies(n->keepalive_intv); n 1105 net/tipc/node.c if (!mod_timer(&n->timer, intv)) n 1106 net/tipc/node.c tipc_node_get(n); n 1111 net/tipc/node.c tipc_node_write_unlock(n); n 1113 net/tipc/node.c tipc_node_link_down(n, b->identity, false); n 1114 net/tipc/node.c tipc_node_put(n); n 1120 net/tipc/node.c struct tipc_node *n; n 1123 net/tipc/node.c list_for_each_entry_rcu(n, &tn->node_list, list) { n 1124 net/tipc/node.c tipc_node_link_down(n, bearer_id, true); n 1129 net/tipc/node.c static void tipc_node_reset_links(struct tipc_node *n) n 1133 net/tipc/node.c pr_warn("Resetting all links to %x\n", n->addr); n 1135 net/tipc/node.c trace_tipc_node_reset_links(n, true, " "); n 1137 net/tipc/node.c tipc_node_link_down(n, i, false); n 1144 net/tipc/node.c static void tipc_node_fsm_evt(struct tipc_node *n, int evt) n 1146 net/tipc/node.c int state = n->state; n 1311 net/tipc/node.c trace_tipc_node_fsm(n->peer_id, n->state, state, evt); n 1312 net/tipc/node.c n->state = state; n 1317 net/tipc/node.c trace_tipc_node_fsm(n->peer_id, n->state, state, evt); n 1320 net/tipc/node.c static void node_lost_contact(struct tipc_node *n, n 1325 net/tipc/node.c struct list_head *conns = &n->conn_sks; n 1329 net/tipc/node.c pr_debug("Lost contact with %x\n", n->addr); n 1330 net/tipc/node.c n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); n 1331 net/tipc/node.c trace_tipc_node_lost_contact(n, true, " "); n 1334 net/tipc/node.c tipc_bcast_remove_peer(n->net, n->bc_entry.link); n 1338 net/tipc/node.c l = n->links[i].link; n 1344 net/tipc/node.c n->action_flags |= TIPC_NOTIFY_NODE_DOWN; n 1349 net/tipc/node.c SHORT_H_SIZE, 0, tipc_own_addr(n->net), n 1440 net/tipc/node.c struct tipc_node *n; n 1452 net/tipc/node.c n = tipc_node_find(net, dnode); n 1453 net/tipc/node.c if (unlikely(!n)) { n 1458 net/tipc/node.c tipc_node_read_lock(n); n 1459 net/tipc/node.c bearer_id = n->active_links[selector & 1]; n 1461 net/tipc/node.c tipc_node_read_unlock(n); n 1462 net/tipc/node.c tipc_node_put(n); n 1468 net/tipc/node.c le = &n->links[bearer_id]; n 1472 net/tipc/node.c tipc_node_read_unlock(n); n 1475 net/tipc/node.c tipc_node_link_down(n, bearer_id, false); n 1479 net/tipc/node.c tipc_node_put(n); n 1520 net/tipc/node.c struct tipc_node *n; n 1524 net/tipc/node.c list_for_each_entry_rcu(n, tipc_nodes(net), list) { n 1525 net/tipc/node.c dst = n->addr; n 1528 net/tipc/node.c if (!node_is_up(n)) n 1541 net/tipc/node.c static void tipc_node_mcast_rcv(struct tipc_node *n) n 1543 net/tipc/node.c struct tipc_bclink_entry *be = &n->bc_entry; n 1551 net/tipc/node.c tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2); n 1554 net/tipc/node.c static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, n 1560 net/tipc/node.c rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr); n 1563 net/tipc/node.c tipc_node_reset_links(n); n 1575 net/tipc/node.c tipc_node_read_lock(n); n 1576 net/tipc/node.c ucl = n->links[bearer_id].link; n 1579 net/tipc/node.c tipc_node_read_unlock(n); n 1599 net/tipc/node.c struct tipc_node *n; n 1605 net/tipc/node.c n = tipc_node_find(net, dnode); n 1607 net/tipc/node.c n = tipc_node_find(net, msg_prevnode(hdr)); n 1608 net/tipc/node.c if (!n) { n 1612 net/tipc/node.c be = &n->bc_entry; n 1613 net/tipc/node.c le = &n->links[bearer_id]; n 1619 net/tipc/node.c tipc_node_read_lock(n); n 1621 net/tipc/node.c tipc_node_read_unlock(n); n 1628 net/tipc/node.c tipc_node_mcast_rcv(n); n 1631 net/tipc/node.c if (!skb_queue_empty(&n->bc_entry.namedq)) n 1632 net/tipc/node.c tipc_named_rcv(net, &n->bc_entry.namedq); n 1636 net/tipc/node.c tipc_node_reset_links(n); n 1638 net/tipc/node.c tipc_node_put(n); n 1647 net/tipc/node.c static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, n 1656 net/tipc/node.c int state = n->state; n 1663 net/tipc/node.c trace_tipc_node_check_state(n, true, " "); n 1665 net/tipc/node.c l = n->links[bearer_id].link; n 1676 net/tipc/node.c if ((pb_id != bearer_id) && n->links[pb_id].link) { n 1677 net/tipc/node.c pl = n->links[pb_id].link; n 1694 net/tipc/node.c tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT); n 1700 net/tipc/node.c tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); n 1715 net/tipc/node.c __tipc_node_link_down(n, &pb_id, xmitq, &maddr); n 1716 net/tipc/node.c trace_tipc_node_link_down(n, true, n 1728 net/tipc/node.c if (n->state != NODE_FAILINGOVER) n 1729 net/tipc/node.c tipc_node_link_failover(n, pl, l, xmitq); n 1732 net/tipc/node.c if (less(syncpt, n->sync_point)) n 1733 net/tipc/node.c n->sync_point = syncpt; n 1737 net/tipc/node.c if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { n 1738 net/tipc/node.c if (!more(rcv_nxt, n->sync_point)) n 1740 net/tipc/node.c tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT); n 1752 net/tipc/node.c if (n->capabilities & TIPC_TUNNEL_ENHANCED) n 1757 net/tipc/node.c __tipc_node_link_up(n, bearer_id, xmitq); n 1758 net/tipc/node.c if (n->state == SELF_UP_PEER_UP) { n 1759 net/tipc/node.c n->sync_point = syncpt; n 1761 net/tipc/node.c tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); n 1766 net/tipc/node.c if (n->state == NODE_SYNCHING) { n 1775 net/tipc/node.c if (more(dlv_nxt, n->sync_point)) { n 1777 net/tipc/node.c tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); n 1803 net/tipc/node.c struct tipc_node *n; n 1834 net/tipc/node.c n = tipc_node_find(net, msg_prevnode(hdr)); n 1835 net/tipc/node.c if (unlikely(!n)) n 1837 net/tipc/node.c le = &n->links[bearer_id]; n 1841 net/tipc/node.c tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq); n 1842 net/tipc/node.c else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) n 1843 net/tipc/node.c tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr); n 1846 net/tipc/node.c tipc_node_read_lock(n); n 1847 net/tipc/node.c if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) { n 1855 net/tipc/node.c tipc_node_read_unlock(n); n 1861 net/tipc/node.c tipc_node_write_lock(n); n 1862 net/tipc/node.c if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { n 1868 net/tipc/node.c tipc_node_write_unlock(n); n 1872 net/tipc/node.c tipc_node_link_up(n, bearer_id, &xmitq); n 1875 net/tipc/node.c tipc_node_link_down(n, bearer_id, false); n 1877 net/tipc/node.c if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) n 1878 net/tipc/node.c tipc_named_rcv(net, &n->bc_entry.namedq); n 1880 net/tipc/node.c if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) n 1881 net/tipc/node.c tipc_node_mcast_rcv(n); n 1889 net/tipc/node.c tipc_node_put(n); n 1901 net/tipc/node.c struct tipc_node *n; n 1907 net/tipc/node.c list_for_each_entry_rcu(n, &tn->node_list, list) { n 1908 net/tipc/node.c tipc_node_write_lock(n); n 1909 net/tipc/node.c e = &n->links[bearer_id]; n 1917 net/tipc/node.c tipc_node_write_unlock(n); n 2053 net/tipc/node.c struct tipc_node *n; n 2059 net/tipc/node.c list_for_each_entry_rcu(n, &tn->node_list, list) { n 2060 net/tipc/node.c tipc_node_read_lock(n); n 2062 net/tipc/node.c l = n->links[i].link; n 2065 net/tipc/node.c found_node = n; n 2069 net/tipc/node.c tipc_node_read_unlock(n); n 2546 net/tipc/node.c int tipc_node_dump(struct tipc_node *n, bool more, char *buf) n 2551 net/tipc/node.c if (!n) { n 2556 net/tipc/node.c i += scnprintf(buf, sz, "node data: %x", n->addr); n 2557 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %x", n->state); n 2558 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]); n 2559 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]); n 2560 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %x", n->action_flags); n 2561 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %u", n->failover_sent); n 2562 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %u", n->sync_point); n 2563 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %d", n->link_cnt); n 2564 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %u", n->working_links); n 2565 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %x", n->capabilities); n 2566 net/tipc/node.c i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv); n 2572 net/tipc/node.c i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu); n 2574 net/tipc/node.c i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr); n 2576 net/tipc/node.c i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i); n 2578 net/tipc/node.c i += tipc_list_dump(&n->links[0].inputq, false, buf + i); n 2581 net/tipc/node.c i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu); n 2583 net/tipc/node.c i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr); n 2585 net/tipc/node.c i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i); n 2587 net/tipc/node.c i += tipc_list_dump(&n->links[1].inputq, false, buf + i); n 2590 net/tipc/node.c i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i); n 133 net/tipc/trace.h int tipc_node_dump(struct tipc_node *n, bool more, char *buf); n 326 net/tipc/trace.h TP_PROTO(struct tipc_node *n, bool more, const char *header), n 328 net/tipc/trace.h TP_ARGS(n, more, header), n 338 net/tipc/trace.h __entry->addr = tipc_node_get_addr(n); n 339 net/tipc/trace.h tipc_node_dump(n, more, __get_str(buf)); n 348 net/tipc/trace.h TP_PROTO(struct tipc_node *n, bool more, const char *header), \ n 349 net/tipc/trace.h TP_ARGS(n, more, header)) n 172 net/unix/af_unix.c static inline unsigned int unix_hash_fold(__wsum n) n 174 net/unix/af_unix.c unsigned int hash = (__force unsigned int)csum_fold(n); n 246 net/vmw_vsock/virtio_transport.c struct virtio_vsock_pkt *pkt, *n; n 258 net/vmw_vsock/virtio_transport.c list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { n 265 net/vmw_vsock/virtio_transport.c list_for_each_entry_safe(pkt, n, &freeme, list) { n 1717 net/wireless/nl80211.c #define CMD(op, n) \ n 1721 net/wireless/nl80211.c if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ n 10817 net/wireless/nl80211.c int i, n, low_index; n 10849 net/wireless/nl80211.c n = wdev->cqm_config->n_rssi_thresholds; n 10851 net/wireless/nl80211.c for (i = 0; i < n; i++) { n 10852 net/wireless/nl80211.c i = array_index_nospec(i, n); n 10859 net/wireless/nl80211.c low_index = array_index_nospec(low_index, n); n 10864 net/wireless/nl80211.c if (i < n) { n 10865 net/wireless/nl80211.c i = array_index_nospec(i, n); n 996 net/wireless/scan.c struct rb_node *n = rdev->bss_tree.rb_node; n 1000 net/wireless/scan.c while (n) { n 1001 net/wireless/scan.c bss = rb_entry(n, struct cfg80211_internal_bss, rbn); n 1007 net/wireless/scan.c n = n->rb_left; n 1009 net/wireless/scan.c n = n->rb_right; n 805 net/xfrm/xfrm_algo.c int i, n; n 807 net/xfrm/xfrm_algo.c for (i = 0, n = 0; i < aalg_entries(); i++) n 809 net/xfrm/xfrm_algo.c n++; n 810 net/xfrm/xfrm_algo.c return n; n 816 net/xfrm/xfrm_algo.c int i, n; n 818 net/xfrm/xfrm_algo.c for (i = 0, n = 0; i < ealg_entries(); i++) n 820 net/xfrm/xfrm_algo.c n++; n 821 net/xfrm/xfrm_algo.c return n; n 18 net/xfrm/xfrm_hash.c struct hlist_head *n; n 21 net/xfrm/xfrm_hash.c n = kzalloc(sz, GFP_KERNEL); n 23 net/xfrm/xfrm_hash.c n = vzalloc(sz); n 25 net/xfrm/xfrm_hash.c n = (struct hlist_head *) n 29 net/xfrm/xfrm_hash.c return n; n 32 net/xfrm/xfrm_hash.c void xfrm_hash_free(struct hlist_head *n, unsigned int sz) n 35 net/xfrm/xfrm_hash.c kfree(n); n 37 net/xfrm/xfrm_hash.c vfree(n); n 39 net/xfrm/xfrm_hash.c free_pages((unsigned long)n, get_order(sz)); n 190 net/xfrm/xfrm_hash.h void xfrm_hash_free(struct hlist_head *n, unsigned int sz); n 826 net/xfrm/xfrm_policy.c struct xfrm_pol_inexact_node *n, n 845 net/xfrm/xfrm_policy.c hlist_for_each_entry(p, &n->hhead, bydst) { n 858 net/xfrm/xfrm_policy.c hlist_add_head_rcu(&policy->bydst, &n->hhead); n 869 net/xfrm/xfrm_policy.c &n->addr, n 870 net/xfrm/xfrm_policy.c n->prefixlen, n 873 net/xfrm/xfrm_policy.c &n->addr, n 874 net/xfrm/xfrm_policy.c n->prefixlen, n 889 net/xfrm/xfrm_policy.c struct xfrm_pol_inexact_node *n, n 897 net/xfrm/xfrm_policy.c WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); n 908 net/xfrm/xfrm_policy.c prefixlen = min(node->prefixlen, n->prefixlen); n 910 net/xfrm/xfrm_policy.c delta = xfrm_policy_addr_delta(&n->addr, &node->addr, n 917 net/xfrm/xfrm_policy.c bool same_prefixlen = node->prefixlen == n->prefixlen; n 920 net/xfrm/xfrm_policy.c hlist_for_each_entry(tmp, &n->hhead, bydst) { n 930 net/xfrm/xfrm_policy.c kfree_rcu(n, rcu); n 935 net/xfrm/xfrm_policy.c kfree_rcu(n, rcu); n 936 net/xfrm/xfrm_policy.c n = node; n 941 net/xfrm/xfrm_policy.c rb_link_node_rcu(&n->node, parent, p); n 942 net/xfrm/xfrm_policy.c rb_insert_color(&n->node, new); n 948 net/xfrm/xfrm_policy.c struct xfrm_pol_inexact_node *n, n 962 net/xfrm/xfrm_policy.c xfrm_policy_inexact_node_reinsert(net, node, &n->root, n 971 net/xfrm/xfrm_policy.c xfrm_policy_inexact_list_reinsert(net, n, family); n 1115 net/xfrm/xfrm_policy.c struct xfrm_pol_inexact_node *n; n 1128 net/xfrm/xfrm_policy.c n = xfrm_policy_inexact_insert_node(net, n 1135 net/xfrm/xfrm_policy.c if (!n) n 1138 net/xfrm/xfrm_policy.c return &n->hhead; n 1143 net/xfrm/xfrm_policy.c n = xfrm_policy_inexact_insert_node(net, n 1149 net/xfrm/xfrm_policy.c if (!n) n 1156 net/xfrm/xfrm_policy.c return &n->hhead; n 1159 net/xfrm/xfrm_policy.c n = xfrm_policy_inexact_insert_node(net, n 1160 net/xfrm/xfrm_policy.c &n->root, n 1165 net/xfrm/xfrm_policy.c if (!n) n 1168 net/xfrm/xfrm_policy.c return &n->hhead; n 1280 net/xfrm/xfrm_policy.c struct hlist_node *n; n 1282 net/xfrm/xfrm_policy.c hlist_for_each_entry_safe(policy, n, n 1292 net/xfrm/xfrm_policy.c hlist_for_each_entry_safe(policy, n, odst + i, bydst) n 1947 net/xfrm/xfrm_policy.c struct xfrm_pol_inexact_node *n; n 1957 net/xfrm/xfrm_policy.c n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr, n 1959 net/xfrm/xfrm_policy.c if (n) { n 1960 net/xfrm/xfrm_policy.c cand->res[XFRM_POL_CAND_DADDR] = &n->hhead; n 1961 net/xfrm/xfrm_policy.c n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr, n 1963 net/xfrm/xfrm_policy.c if (n) n 1964 net/xfrm/xfrm_policy.c cand->res[XFRM_POL_CAND_BOTH] = &n->hhead; n 1967 net/xfrm/xfrm_policy.c n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr, n 1969 net/xfrm/xfrm_policy.c if (n) n 1970 net/xfrm/xfrm_policy.c cand->res[XFRM_POL_CAND_SADDR] = &n->hhead; n 4318 net/xfrm/xfrm_policy.c int i, j, n = 0; n 4331 net/xfrm/xfrm_policy.c n++; n 4348 net/xfrm/xfrm_policy.c if (!n) n 1773 net/xfrm/xfrm_state.c __xfrm6_sort(void **dst, void **src, int n, n 1780 net/xfrm/xfrm_state.c for (i = 0; i < n; i++) { n 1790 net/xfrm/xfrm_state.c for (i = 0; i < n; i++) { n 1856 net/xfrm/xfrm_state.c __xfrm6_sort(void **dst, void **src, int n, n 1861 net/xfrm/xfrm_state.c for (i = 0; i < n; i++) n 1867 net/xfrm/xfrm_state.c xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, n 1873 net/xfrm/xfrm_state.c __xfrm6_sort((void **)dst, (void **)src, n, n 1876 net/xfrm/xfrm_state.c for (i = 0; i < n; i++) n 1881 net/xfrm/xfrm_state.c xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, n 1887 net/xfrm/xfrm_state.c __xfrm6_sort((void **)dst, (void **)src, n, n 1890 net/xfrm/xfrm_state.c for (i = 0; i < n; i++) n 2370 net/xfrm/xfrm_user.c int n = 0; n 2383 net/xfrm/xfrm_user.c err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); n 2387 net/xfrm/xfrm_user.c if (!n) n 2397 net/xfrm/xfrm_user.c err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap); n 30 samples/auxdisplay/cfag12864b-example.c #define CFAG12864B_BIT(n) (((unsigned char) 1) << (n)) n 153 samples/auxdisplay/cfag12864b-example.c unsigned char i, j, n; n 159 samples/auxdisplay/cfag12864b-example.c for (n = 0; n < CFAG12864B_BPB; n++) n 161 samples/auxdisplay/cfag12864b-example.c j * CFAG12864B_BPB + n]) n 164 samples/auxdisplay/cfag12864b-example.c CFAG12864B_BIT(n); n 186 samples/auxdisplay/cfag12864b-example.c static void example(unsigned char n) n 191 samples/auxdisplay/cfag12864b-example.c if (n > EXAMPLES) n 194 samples/auxdisplay/cfag12864b-example.c printf("Example %i/%i - ", n, EXAMPLES); n 196 samples/auxdisplay/cfag12864b-example.c switch (n) { n 244 samples/auxdisplay/cfag12864b-example.c unsigned char n; n 258 samples/auxdisplay/cfag12864b-example.c for (n = 1; n <= EXAMPLES; n++) { n 259 samples/auxdisplay/cfag12864b-example.c example(n); n 29 samples/bpf/tcp_basertt_kern.c int rv = 0, n; n 48 samples/bpf/tcp_basertt_kern.c n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION, n 50 samples/bpf/tcp_basertt_kern.c if (!n && !__builtin_memcmp(cong, nv, sizeof(nv)+1)) { n 53 samples/bpf/tcp_basertt_kern.c } else if (n) { n 54 samples/bpf/tcp_basertt_kern.c rv = n; n 33 samples/bpf/tracex3_kern.c static unsigned int log2l(unsigned long long n) n 35 samples/bpf/tracex3_kern.c #define S(k) if (n >= (1ull << k)) { i += k; n >>= k; } n 36 samples/bpf/tracex3_kern.c int i = -(n == 0); n 23 samples/vfs/test-fsmount.c int err, n; n 28 samples/vfs/test-fsmount.c n = read(fd, buf, sizeof(buf)); n 29 samples/vfs/test-fsmount.c if (n < 0) n 31 samples/vfs/test-fsmount.c n -= 2; n 35 samples/vfs/test-fsmount.c fprintf(stderr, "Error: %*.*s\n", n, n, buf + 2); n 38 samples/vfs/test-fsmount.c fprintf(stderr, "Warning: %*.*s\n", n, n, buf + 2); n 41 samples/vfs/test-fsmount.c fprintf(stderr, "Info: %*.*s\n", n, n, buf + 2); n 545 scripts/asn1_compiler.c int n; n 546 scripts/asn1_compiler.c for (n = 0; n < nr_tokens; n++) n 547 scripts/asn1_compiler.c debug("Token %3u: '%s'\n", n, token_list[n].content); n 749 scripts/asn1_compiler.c unsigned nr, t, n; n 752 scripts/asn1_compiler.c for (n = 0; n < nr_tokens - 1; n++) n 753 scripts/asn1_compiler.c if (token_list[n + 0].token_type == TOKEN_TYPE_NAME && n 754 scripts/asn1_compiler.c token_list[n + 1].token_type == TOKEN_ASSIGNMENT) n 776 scripts/asn1_compiler.c for (n = 0; n < nr_tokens - 1; n++) { n 777 scripts/asn1_compiler.c if (token_list[n + 0].token_type == TOKEN_TYPE_NAME && n 778 scripts/asn1_compiler.c token_list[n + 1].token_type == TOKEN_ASSIGNMENT) { n 779 scripts/asn1_compiler.c types[t].name = &token_list[n]; n 784 scripts/asn1_compiler.c types[t].name = &token_list[n + 1]; n 791 scripts/asn1_compiler.c for (n = 0; n < nr_types; n++) { n 792 scripts/asn1_compiler.c struct type *type = type_index[n]; n 306 scripts/dtc/checks.c int n = strspn(node->name, c->data); n 308 scripts/dtc/checks.c if (n < strlen(node->name)) n 310 scripts/dtc/checks.c node->name[n]); n 317 scripts/dtc/checks.c int n = strspn(node->name, c->data); n 319 scripts/dtc/checks.c if (n < node->basenamelen) n 321 scripts/dtc/checks.c node->name[n]); n 366 scripts/dtc/checks.c int n = strspn(prop->name, c->data); n 368 scripts/dtc/checks.c if (n < strlen(prop->name)) n 370 scripts/dtc/checks.c prop->name[n]); n 383 scripts/dtc/checks.c int n = strspn(name, c->data); n 385 scripts/dtc/checks.c if (n == strlen(prop->name)) n 396 scripts/dtc/checks.c if (name[n] == '#' && ((n == 0) || (name[n-1] == ','))) { n 397 scripts/dtc/checks.c name += n + 1; n 398 scripts/dtc/checks.c n = strspn(name, c->data); n 400 scripts/dtc/checks.c if (n < strlen(name)) n 402 scripts/dtc/checks.c name[n]); n 724 scripts/dtc/checks.c #define node_addr_cells(n) \ n 725 scripts/dtc/checks.c (((n)->addr_cells == -1) ? 2 : (n)->addr_cells) n 726 scripts/dtc/checks.c #define node_size_cells(n) \ n 727 scripts/dtc/checks.c (((n)->size_cells == -1) ? 1 : (n)->size_cells) n 57 scripts/dtc/dtc.h #define strprefixeq(a, n, b) (strlen(b) == (n) && (memcmp(a, b, n) == 0)) n 179 scripts/dtc/dtc.h #define for_each_property_withdel(n, p) \ n 180 scripts/dtc/dtc.h for ((p) = (n)->proplist; (p); (p) = (p)->next) n 182 scripts/dtc/dtc.h #define for_each_property(n, p) \ n 183 scripts/dtc/dtc.h for_each_property_withdel(n, p) \ n 186 scripts/dtc/dtc.h #define for_each_child_withdel(n, c) \ n 187 scripts/dtc/dtc.h for ((c) = (n)->children; (c); (c) = (c)->next_sibling) n 189 scripts/dtc/dtc.h #define for_each_child(n, c) \ n 190 scripts/dtc/dtc.h for_each_child_withdel(n, c) \ n 225 scripts/dtc/dtc.h cell_t propval_cell_n(struct property *prop, int n); n 13 scripts/dtc/include-prefixes/dt-bindings/bus/moxtet.h #define MOXTET_IRQ_PERIDOT(n) (8 + (n)) n 16 scripts/dtc/include-prefixes/dt-bindings/gpio/uniphier-gpio.h #define UNIPHIER_GPIO_IRQ(n) ((UNIPHIER_GPIO_IRQ_OFFSET) + (n)) n 39 scripts/dtc/libfdt/fdt_ro.c const char *s, *n; n 71 scripts/dtc/libfdt/fdt_ro.c n = memchr(s, '\0', len); n 72 scripts/dtc/libfdt/fdt_ro.c if (!n) { n 79 scripts/dtc/libfdt/fdt_ro.c *lenp = n - s; n 148 scripts/dtc/libfdt/fdt_ro.c static const struct fdt_reserve_entry *fdt_mem_rsv(const void *fdt, int n) n 150 scripts/dtc/libfdt/fdt_ro.c int offset = n * sizeof(struct fdt_reserve_entry); n 157 scripts/dtc/libfdt/fdt_ro.c return fdt_mem_rsv_(fdt, n); n 160 scripts/dtc/libfdt/fdt_ro.c int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size) n 165 scripts/dtc/libfdt/fdt_ro.c re = fdt_mem_rsv(fdt, n); n 158 scripts/dtc/libfdt/fdt_rw.c int fdt_del_mem_rsv(void *fdt, int n) n 160 scripts/dtc/libfdt/fdt_rw.c struct fdt_reserve_entry *re = fdt_mem_rsv_w_(fdt, n); n 164 scripts/dtc/libfdt/fdt_rw.c if (n >= fdt_num_mem_rsv(fdt)) n 435 scripts/dtc/libfdt/libfdt.h int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size); n 1527 scripts/dtc/libfdt/libfdt.h int fdt_del_mem_rsv(void *fdt, int n); n 29 scripts/dtc/libfdt/libfdt_env.h #define EXTRACT_BYTE(x, n) ((unsigned long long)((uint8_t *)&x)[n]) n 36 scripts/dtc/libfdt/libfdt_internal.h static inline const struct fdt_reserve_entry *fdt_mem_rsv_(const void *fdt, int n) n 42 scripts/dtc/libfdt/libfdt_internal.h return rsv_table + n; n 44 scripts/dtc/libfdt/libfdt_internal.h static inline struct fdt_reserve_entry *fdt_mem_rsv_w_(void *fdt, int n) n 46 scripts/dtc/libfdt/libfdt_internal.h return (void *)(uintptr_t)fdt_mem_rsv_(fdt, n); n 441 scripts/dtc/livetree.c cell_t propval_cell_n(struct property *prop, int n) n 443 scripts/dtc/livetree.c assert(prop->val.len / sizeof(cell_t) >= n); n 444 scripts/dtc/livetree.c return fdt32_to_cpu(*((fdt32_t *)prop->val.val + n)); n 669 scripts/dtc/livetree.c int n = 0, i = 0; n 674 scripts/dtc/livetree.c n++; n 676 scripts/dtc/livetree.c if (n == 0) n 679 scripts/dtc/livetree.c tbl = xmalloc(n * sizeof(*tbl)); n 686 scripts/dtc/livetree.c qsort(tbl, n, sizeof(*tbl), cmp_reserve_info); n 689 scripts/dtc/livetree.c for (i = 0; i < (n-1); i++) n 691 scripts/dtc/livetree.c tbl[n-1]->next = NULL; n 708 scripts/dtc/livetree.c int n = 0, i = 0; n 712 scripts/dtc/livetree.c n++; n 714 scripts/dtc/livetree.c if (n == 0) n 717 scripts/dtc/livetree.c tbl = xmalloc(n * sizeof(*tbl)); n 722 scripts/dtc/livetree.c qsort(tbl, n, sizeof(*tbl), cmp_prop); n 725 scripts/dtc/livetree.c for (i = 0; i < (n-1); i++) n 727 scripts/dtc/livetree.c tbl[n-1]->next = NULL; n 744 scripts/dtc/livetree.c int n = 0, i = 0; n 748 scripts/dtc/livetree.c n++; n 750 scripts/dtc/livetree.c if (n == 0) n 753 scripts/dtc/livetree.c tbl = xmalloc(n * sizeof(*tbl)); n 758 scripts/dtc/livetree.c qsort(tbl, n, sizeof(*tbl), cmp_subnode); n 761 scripts/dtc/livetree.c for (i = 0; i < (n-1); i++) n 763 scripts/dtc/livetree.c tbl[n-1]->next_sibling = NULL; n 37 scripts/dtc/util.c int n, size = 0; /* start with 128 bytes */ n 46 scripts/dtc/util.c n = vsnprintf(NULL, 0, fmt, ap_copy) + 1; n 49 scripts/dtc/util.c p = xrealloc(p, size + n); n 51 scripts/dtc/util.c n = vsnprintf(p + size, n, fmt, ap); n 59 scripts/dtc/util.c int n; n 63 scripts/dtc/util.c n = xavsprintf_append(strp, fmt, ap); n 66 scripts/dtc/util.c return n; n 71 scripts/dtc/util.c int n; n 77 scripts/dtc/util.c n = xavsprintf_append(strp, fmt, ap); n 80 scripts/dtc/util.c return n; n 395 scripts/gcc-plugins/gcc-common.h static inline struct cgraph_node *cgraph_alias_target(struct cgraph_node *n) n 397 scripts/gcc-plugins/gcc-common.h return cgraph_alias_aliased_node(n); n 22 scripts/gcc-plugins/gcc-generate-gimple-pass.h #define __GCC_PLUGIN_STRINGIFY(n) #n n 23 scripts/gcc-plugins/gcc-generate-gimple-pass.h #define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n) n 27 scripts/gcc-plugins/gcc-generate-gimple-pass.h #define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data) n 30 scripts/gcc-plugins/gcc-generate-gimple-pass.h #define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass) n 35 scripts/gcc-plugins/gcc-generate-gimple-pass.h #define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass) n 42 scripts/gcc-plugins/gcc-generate-gimple-pass.h #define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate) n 51 scripts/gcc-plugins/gcc-generate-gimple-pass.h #define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute) n 30 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __GCC_PLUGIN_STRINGIFY(n) #n n 31 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n) n 35 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data) n 38 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass) n 43 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass) n 49 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __GENERATE_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _generate_summary) n 56 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __READ_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _read_summary) n 63 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __WRITE_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _write_summary) n 70 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __READ_OPTIMIZATION_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _read_optimization_summary) n 77 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __WRITE_OPTIMIZATION_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _write_optimization_summary) n 84 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __STMT_FIXUP(n) _GCC_PLUGIN_CONCAT2(n, _stmt_fixup) n 91 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __FUNCTION_TRANSFORM(n) _GCC_PLUGIN_CONCAT2(n, _function_transform) n 98 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __VARIABLE_TRANSFORM(n) _GCC_PLUGIN_CONCAT2(n, _variable_transform) n 106 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate) n 115 scripts/gcc-plugins/gcc-generate-ipa-pass.h #define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute) n 22 scripts/gcc-plugins/gcc-generate-rtl-pass.h #define __GCC_PLUGIN_STRINGIFY(n) #n n 23 scripts/gcc-plugins/gcc-generate-rtl-pass.h #define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n) n 27 scripts/gcc-plugins/gcc-generate-rtl-pass.h #define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data) n 30 scripts/gcc-plugins/gcc-generate-rtl-pass.h #define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass) n 35 scripts/gcc-plugins/gcc-generate-rtl-pass.h #define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass) n 42 scripts/gcc-plugins/gcc-generate-rtl-pass.h #define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate) n 51 scripts/gcc-plugins/gcc-generate-rtl-pass.h #define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute) n 22 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h #define __GCC_PLUGIN_STRINGIFY(n) #n n 23 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h #define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n) n 27 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h #define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data) n 30 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h #define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass) n 35 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h #define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass) n 42 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h #define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate) n 51 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h #define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute) n 45 scripts/genksyms/genksyms.c int n; n 353 scripts/genksyms/genksyms.c struct string_list *n, *n2; n 357 scripts/genksyms/genksyms.c for (va_start(ap, start); (n = va_arg(ap, struct string_list *));) { n 358 scripts/genksyms/genksyms.c for (n2 = n; n2->next; n2 = n2->next) n 361 scripts/genksyms/genksyms.c start = n; n 381 scripts/genksyms/genksyms.c struct string_list *res, *n; n 385 scripts/genksyms/genksyms.c n = res = copy_node(start); n 387 scripts/genksyms/genksyms.c n->next = copy_node(start); n 388 scripts/genksyms/genksyms.c n = n->next; n 390 scripts/genksyms/genksyms.c n->next = NULL; n 441 scripts/genksyms/genksyms.c size_t n; n 443 scripts/genksyms/genksyms.c for (n = 0; n < ARRAY_SIZE(symbol_types); n++) { n 444 scripts/genksyms/genksyms.c if (node.string[0] == symbol_types[n].n) { n 445 scripts/genksyms/genksyms.c node.tag = n; n 494 scripts/genksyms/genksyms.c if (symbol_types[list->tag].n) { n 495 scripts/genksyms/genksyms.c putc(symbol_types[list->tag].n, f); n 586 scripts/genksyms/genksyms.c struct string_list *n; n 591 scripts/genksyms/genksyms.c n = concat_list(mk_node n 598 scripts/genksyms/genksyms.c add_symbol(cur->string, cur->tag, n, 0); n 656 scripts/genksyms/genksyms.c struct symbol *n = sym->expansion_trail; n 675 scripts/genksyms/genksyms.c sym = n; n 847 scripts/genksyms/genksyms.c if (symbol_types[sym->type].n) { n 848 scripts/genksyms/genksyms.c putc(symbol_types[sym->type].n, dumpfile); n 97 scripts/insert-sys-cert.c char *w, *p, *n; n 112 scripts/insert-sys-cert.c n = strstr(l, name); n 113 scripts/insert-sys-cert.c if (n) n 116 scripts/insert-sys-cert.c if (!n) { n 139 scripts/insert-sys-cert.c int i, n; n 144 scripts/insert-sys-cert.c n = symtab->sh_size / symtab->sh_entsize; n 147 scripts/insert-sys-cert.c for (i = 0; i < n; i++) { n 296 scripts/kconfig/confdata.c static int add_byte(int c, char **lineptr, size_t slen, size_t *n) n 300 scripts/kconfig/confdata.c if (new_size > *n) { n 308 scripts/kconfig/confdata.c *n = new_size; n 316 scripts/kconfig/confdata.c static ssize_t compat_getline(char **lineptr, size_t *n, FILE *stream) n 326 scripts/kconfig/confdata.c if (add_byte(c, &line, slen, n) < 0) n 331 scripts/kconfig/confdata.c if (add_byte('\0', &line, slen, n) < 0) n 338 scripts/kconfig/confdata.c if (add_byte(c, &line, slen, n) < 0) n 1250 scripts/kconfig/confdata.c int n, p[3]; n 1252 scripts/kconfig/confdata.c n = 0; n 1257 scripts/kconfig/confdata.c p[n++] = tmp; n 1264 scripts/kconfig/confdata.c if( n >=3 ) { n 1268 scripts/kconfig/confdata.c switch( n ) { n 61 scripts/kconfig/list.h #define list_for_each_entry_safe(pos, n, head, member) \ n 63 scripts/kconfig/list.h n = list_entry(pos->member.next, typeof(*pos), member); \ n 65 scripts/kconfig/list.h pos = n, n = list_entry(n->member.next, typeof(*n), member)) n 89 scripts/kconfig/lkc.h char *xstrndup(const char *s, size_t n); n 171 scripts/kconfig/lxdialog/dialog.h void item_set(int n); n 158 scripts/kconfig/lxdialog/menubox.c static void do_scroll(WINDOW *win, int *scroll, int n) n 162 scripts/kconfig/lxdialog/menubox.c wscrl(win, n); n 164 scripts/kconfig/lxdialog/menubox.c *scroll = *scroll + n; n 11 scripts/kconfig/lxdialog/textbox.c static void back_lines(int n); n 267 scripts/kconfig/lxdialog/textbox.c static void back_lines(int n) n 273 scripts/kconfig/lxdialog/textbox.c for (i = 0; i < n; i++) { n 658 scripts/kconfig/lxdialog/util.c int n = 0; n 662 scripts/kconfig/lxdialog/util.c n++; n 663 scripts/kconfig/lxdialog/util.c return n; n 666 scripts/kconfig/lxdialog/util.c void item_set(int n) n 670 scripts/kconfig/lxdialog/util.c if (i++ == n) n 676 scripts/kconfig/lxdialog/util.c int n = 0; n 681 scripts/kconfig/lxdialog/util.c return n; n 682 scripts/kconfig/lxdialog/util.c n++; n 353 scripts/kconfig/preprocess.c unsigned long n; n 362 scripts/kconfig/preprocess.c n = strtoul(tmp, &endptr, 10); n 363 scripts/kconfig/preprocess.c if (!*endptr && n > 0 && n <= argc) { n 364 scripts/kconfig/preprocess.c res = xstrdup(argv[n - 1]); n 120 scripts/kconfig/util.c char *xstrndup(const char *s, size_t n) n 124 scripts/kconfig/util.c p = strndup(s, n); n 1997 scripts/mod/modpost.c size_t n = strcspn(s, "."); n 1999 scripts/mod/modpost.c if (n && s[n]) { n 2000 scripts/mod/modpost.c size_t m = strspn(s + n + 1, "0123456789"); n 2001 scripts/mod/modpost.c if (m && (s[n + m] == '.' || s[n + m] == 0)) n 2002 scripts/mod/modpost.c s[n] = 0; n 2510 scripts/mod/modpost.c int n; n 2512 scripts/mod/modpost.c for (n = 0; n < SYMBOL_HASH_SIZE ; n++) { n 2513 scripts/mod/modpost.c symbol = symbolhash[n]; n 2568 scripts/mod/modpost.c int n; n 2676 scripts/mod/modpost.c for (n = 0; n < SYMBOL_HASH_SIZE; n++) { n 2679 scripts/mod/modpost.c for (s = symbolhash[n]; s; s = s->next) { n 318 scripts/recordmcount.c size_t n; n 335 scripts/recordmcount.c n = write(fd_map, file_map, sb.st_size); n 336 scripts/recordmcount.c if (n != sb.st_size) { n 342 scripts/recordmcount.c n = write(fd_map, file_append, file_append_size); n 343 scripts/recordmcount.c if (n != file_append_size) { n 175 scripts/sign-file.c int n; n 181 scripts/sign-file.c n = BIO_read(b, buf, 2); n 182 scripts/sign-file.c if (n != 2) { n 187 scripts/sign-file.c if (n >= 0) { n 231 scripts/sign-file.c int opt, n; n 360 scripts/sign-file.c while ((n = BIO_read(bm, buf, sizeof(buf))), n 361 scripts/sign-file.c n > 0) { n 362 scripts/sign-file.c ERR(BIO_write(bd, buf, n) < 0, "%s", dest_name); n 365 scripts/sign-file.c ERR(n < 0, "%s", module_name); n 382 scripts/sign-file.c while ((n = BIO_read(b, buf, sizeof(buf))), n > 0) n 383 scripts/sign-file.c ERR(BIO_write(bd, buf, n) < 0, "%s", dest_name); n 1190 scripts/unifdef.c strlcmp(const char *s, const char *t, size_t n) n 1192 scripts/unifdef.c while (n-- && *t != '\0') n 845 security/apparmor/apparmorfs.c struct multi_transaction *new, size_t n) n 849 security/apparmor/apparmorfs.c AA_BUG(n > MULTI_TRANSACTION_LIMIT); n 851 security/apparmor/apparmorfs.c new->size = n; n 692 security/apparmor/domain.c char *n = kstrdup(name, GFP_ATOMIC); n 694 security/apparmor/domain.c if (n) { n 699 security/apparmor/domain.c new_profile = aa_new_null_profile(profile, false, n, n 703 security/apparmor/domain.c strcpy((char *)name, n); n 704 security/apparmor/domain.c kfree(n); n 683 security/apparmor/file.c unsigned int n; n 688 security/apparmor/file.c n = iterate_fd(files, 0, match_file, label); n 689 security/apparmor/file.c if (!n) /* none found? */ n 697 security/apparmor/file.c replace_fd(n - 1, devnull, 0); n 698 security/apparmor/file.c } while ((n = iterate_fd(files, n, match_file, label)) != 0); n 58 security/apparmor/include/label.h int aa_vec_unique(struct aa_profile **vec, int n, int flags); n 327 security/apparmor/include/label.h size_t n, gfp_t gfp, bool create, n 332 security/apparmor/include/label.h static inline const char *aa_label_strn_split(const char *str, int n) n 337 security/apparmor/include/label.h state = aa_dfa_matchn_until(stacksplitdfa, DFA_START, str, n, &pos); n 49 security/apparmor/include/lib.h const char *skipn_spaces(const char *str, size_t n); n 51 security/apparmor/include/lib.h const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name, n 133 security/apparmor/include/match.h const char *str, int n, const char **retpos); n 186 security/apparmor/include/policy.h size_t n); n 189 security/apparmor/include/policy.h const char *fqname, size_t n); n 91 security/apparmor/include/policy_ns.h struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n); n 92 security/apparmor/include/policy_ns.h struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n); n 93 security/apparmor/include/policy_ns.h struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n); n 142 security/apparmor/include/policy_ns.h const char *name, size_t n) n 144 security/apparmor/include/policy_ns.h return (struct aa_ns *)__policy_strn_find(head, name, n); n 186 security/apparmor/label.c static bool vec_is_stale(struct aa_profile **vec, int n) n 192 security/apparmor/label.c for (i = 0; i < n; i++) { n 200 security/apparmor/label.c static bool vec_unconfined(struct aa_profile **vec, int n) n 206 security/apparmor/label.c for (i = 0; i < n; i++) { n 224 security/apparmor/label.c static inline int unique(struct aa_profile **vec, int n) n 228 security/apparmor/label.c AA_BUG(n < 1); n 232 security/apparmor/label.c for (i = 1; i < n; i++) { n 262 security/apparmor/label.c int aa_vec_unique(struct aa_profile **vec, int n, int flags) n 266 security/apparmor/label.c AA_BUG(n < 1); n 270 security/apparmor/label.c if (n > 8) { n 271 security/apparmor/label.c sort(vec, n, sizeof(struct aa_profile *), sort_cmp, NULL); n 272 security/apparmor/label.c dups = unique(vec, n); n 277 security/apparmor/label.c for (i = 1; i < n; i++) { n 306 security/apparmor/label.c vec[n - dups] = NULL; n 692 security/apparmor/label.c static struct aa_label *__vec_find(struct aa_profile **vec, int n) n 698 security/apparmor/label.c AA_BUG(n <= 0); n 700 security/apparmor/label.c node = vec_labelset(vec, n)->root.rb_node; n 703 security/apparmor/label.c int result = vec_cmp(this->vec, this->size, vec, n); n 806 security/apparmor/label.c static struct aa_label *vec_find(struct aa_profile **vec, int n) n 814 security/apparmor/label.c AA_BUG(n <= 0); n 816 security/apparmor/label.c ls = vec_labelset(vec, n); n 818 security/apparmor/label.c label = __vec_find(vec, n); n 1808 security/apparmor/label.c static int label_count_strn_entries(const char *str, size_t n) n 1810 security/apparmor/label.c const char *end = str + n; n 1835 security/apparmor/label.c const char *str, size_t n) n 1837 security/apparmor/label.c const char *first = skipn_spaces(str, n); n 1840 security/apparmor/label.c return aa_fqlookupn_profile(base, str, n); n 1842 security/apparmor/label.c return aa_fqlookupn_profile(currentbase, str, n); n 1858 security/apparmor/label.c size_t n, gfp_t gfp, bool create, n 1864 security/apparmor/label.c const char *end = str + n; n 1870 security/apparmor/label.c str = skipn_spaces(str, n); n 74 security/apparmor/lib.c const char *skipn_spaces(const char *str, size_t n) n 76 security/apparmor/lib.c for (; n && isspace(*str); --n) n 78 security/apparmor/lib.c if (n) n 83 security/apparmor/lib.c const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name, n 86 security/apparmor/lib.c const char *end = fqname + n; n 87 security/apparmor/lib.c const char *name = skipn_spaces(fqname, n); n 573 security/apparmor/match.c const char *str, int n, const char **retpos) n 591 security/apparmor/match.c for (; n; n--) { n 602 security/apparmor/match.c for (; n; n--) { n 396 security/apparmor/policy.c const char *hname, size_t n) n 401 security/apparmor/policy.c for (split = strnstr(hname, "//", n); split; n 402 security/apparmor/policy.c split = strnstr(hname, "//", n)) { n 409 security/apparmor/policy.c n -= split + 2 - hname; n 413 security/apparmor/policy.c if (n) n 414 security/apparmor/policy.c return __strn_find_child(&base->profiles, hname, n); n 433 security/apparmor/policy.c size_t n) n 439 security/apparmor/policy.c profile = __lookupn_profile(&ns->base, hname, n); n 444 security/apparmor/policy.c if (!profile && strncmp(hname, "unconfined", n) == 0) n 457 security/apparmor/policy.c const char *fqname, size_t n) n 464 security/apparmor/policy.c name = aa_splitn_fqname(fqname, n, &ns_name, &ns_len); n 473 security/apparmor/policy.c profile = aa_lookupn_profile(ns, name, n - (name - fqname)); n 162 security/apparmor/policy_ns.c struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n) n 167 security/apparmor/policy_ns.c ns = aa_get_ns(__aa_findn_ns(&root->sub_ns, name, n)); n 200 security/apparmor/policy_ns.c struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n) n 205 security/apparmor/policy_ns.c for (split = strnstr(hname, "//", n); split; n 206 security/apparmor/policy_ns.c split = strnstr(hname, "//", n)) { n 211 security/apparmor/policy_ns.c n -= split + 2 - hname; n 215 security/apparmor/policy_ns.c if (n) n 216 security/apparmor/policy_ns.c return __aa_findn_ns(&ns->sub_ns, hname, n); n 231 security/apparmor/policy_ns.c struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n) n 236 security/apparmor/policy_ns.c ns = aa_get_ns(__aa_lookupn_ns(view, name, n)); n 36 security/integrity/iint.c struct rb_node *n = integrity_iint_tree.rb_node; n 38 security/integrity/iint.c while (n) { n 39 security/integrity/iint.c iint = rb_entry(n, struct integrity_iint_cache, rb_node); n 42 security/integrity/iint.c n = n->rb_left; n 44 security/integrity/iint.c n = n->rb_right; n 48 security/integrity/iint.c if (!n) n 657 security/keys/key.c struct rb_node *n; n 663 security/keys/key.c n = key_serial_tree.rb_node; n 664 security/keys/key.c while (n) { n 665 security/keys/key.c key = rb_entry(n, struct key, serial_node); n 668 security/keys/key.c n = n->rb_left; n 670 security/keys/key.c n = n->rb_right; n 171 security/keys/keyring.c int n, desc_len = index_key->desc_len; n 181 security/keys/keyring.c n = desc_len; n 182 security/keys/keyring.c if (n <= 0) n 184 security/keys/keyring.c if (n > 4) n 185 security/keys/keyring.c n = 4; n 187 security/keys/keyring.c memcpy(&piece, description, n); n 188 security/keys/keyring.c description += n; n 189 security/keys/keyring.c desc_len -= n; n 217 security/keys/keyring.c size_t n = min_t(size_t, index_key->desc_len, sizeof(index_key->desc)); n 219 security/keys/keyring.c memcpy(index_key->desc, index_key->description, n); n 273 security/keys/keyring.c int desc_len = index_key->desc_len, n = sizeof(chunk); n 293 security/keys/keyring.c if (desc_len > n) n 294 security/keys/keyring.c desc_len = n; n 64 security/keys/proc.c static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n) n 68 security/keys/proc.c n = rb_next(n); n 69 security/keys/proc.c while (n) { n 70 security/keys/proc.c struct key *key = rb_entry(n, struct key, serial_node); n 73 security/keys/proc.c n = rb_next(n); n 75 security/keys/proc.c return n; n 81 security/keys/proc.c struct rb_node *n = key_serial_tree.rb_node; n 84 security/keys/proc.c while (n) { n 85 security/keys/proc.c struct key *key = rb_entry(n, struct key, serial_node); n 89 security/keys/proc.c n = n->rb_left; n 91 security/keys/proc.c n = n->rb_right; n 105 security/keys/proc.c n = rb_next(&minkey->serial_node); n 106 security/keys/proc.c if (!n) n 108 security/keys/proc.c minkey = rb_entry(n, struct key, serial_node); n 129 security/keys/proc.c static inline key_serial_t key_node_serial(struct rb_node *n) n 131 security/keys/proc.c struct key *key = rb_entry(n, struct key, serial_node); n 137 security/keys/proc.c struct rb_node *n; n 139 security/keys/proc.c n = key_serial_next(p, v); n 140 security/keys/proc.c if (n) n 141 security/keys/proc.c *_pos = key_node_serial(n); n 144 security/keys/proc.c return n; n 252 security/keys/proc.c static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n) n 254 security/keys/proc.c while (n) { n 255 security/keys/proc.c struct key_user *user = rb_entry(n, struct key_user, node); n 258 security/keys/proc.c n = rb_next(n); n 260 security/keys/proc.c return n; n 263 security/keys/proc.c static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n) n 265 security/keys/proc.c return __key_user_next(user_ns, rb_next(n)); n 270 security/keys/proc.c struct rb_node *n = rb_first(r); n 271 security/keys/proc.c return __key_user_next(user_ns, n); n 144 security/lockdown/lockdown.c size_t n, loff_t *ppos) n 149 security/lockdown/lockdown.c state = memdup_user_nul(buf, n); n 168 security/lockdown/lockdown.c return err ? err : n; n 2457 security/selinux/hooks.c unsigned n; n 2484 security/selinux/hooks.c n = iterate_fd(files, 0, match_file, cred); n 2485 security/selinux/hooks.c if (!n) /* none found? */ n 2493 security/selinux/hooks.c replace_fd(n - 1, devnull, 0); n 2494 security/selinux/hooks.c } while ((n = iterate_fd(files, n, match_file, cred)) != 0); n 1516 security/selinux/selinuxfs.c loff_t n = *pos - 1; n 1521 security/selinux/selinuxfs.c return sel_avc_get_stat_idx(&n); n 39 security/selinux/ss/avtab.c static const u32 n = 0xe6546b64; n 50 security/selinux/ss/avtab.c hash = hash * m + n; \ n 96 security/selinux/ss/avtab.c struct avtab_node **n = &h->htable[hvalue]; n 98 security/selinux/ss/avtab.c newnode->next = *n; n 99 security/selinux/ss/avtab.c *n = newnode; n 54 security/selinux/ss/ebitmap.c struct ebitmap_node *n, *new, *prev; n 57 security/selinux/ss/ebitmap.c n = src->node; n 59 security/selinux/ss/ebitmap.c while (n) { n 65 security/selinux/ss/ebitmap.c new->startbit = n->startbit; n 66 security/selinux/ss/ebitmap.c memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); n 73 security/selinux/ss/ebitmap.c n = n->next; n 244 security/selinux/ss/ebitmap.c struct ebitmap_node *n; n 249 security/selinux/ss/ebitmap.c n = e->node; n 250 security/selinux/ss/ebitmap.c while (n && (n->startbit <= bit)) { n 251 security/selinux/ss/ebitmap.c if ((n->startbit + EBITMAP_SIZE) > bit) n 252 security/selinux/ss/ebitmap.c return ebitmap_node_get_bit(n, bit); n 253 security/selinux/ss/ebitmap.c n = n->next; n 261 security/selinux/ss/ebitmap.c struct ebitmap_node *n, *prev, *new; n 264 security/selinux/ss/ebitmap.c n = e->node; n 265 security/selinux/ss/ebitmap.c while (n && n->startbit <= bit) { n 266 security/selinux/ss/ebitmap.c if ((n->startbit + EBITMAP_SIZE) > bit) { n 268 security/selinux/ss/ebitmap.c ebitmap_node_set_bit(n, bit); n 272 security/selinux/ss/ebitmap.c ebitmap_node_clr_bit(n, bit); n 274 security/selinux/ss/ebitmap.c s = find_first_bit(n->maps, EBITMAP_SIZE); n 279 security/selinux/ss/ebitmap.c if (!n->next) { n 291 security/selinux/ss/ebitmap.c prev->next = n->next; n 293 security/selinux/ss/ebitmap.c e->node = n->next; n 294 security/selinux/ss/ebitmap.c kmem_cache_free(ebitmap_node_cachep, n); n 298 security/selinux/ss/ebitmap.c prev = n; n 299 security/selinux/ss/ebitmap.c n = n->next; n 312 security/selinux/ss/ebitmap.c if (!n) n 329 security/selinux/ss/ebitmap.c struct ebitmap_node *n, *temp; n 334 security/selinux/ss/ebitmap.c n = e->node; n 335 security/selinux/ss/ebitmap.c while (n) { n 336 security/selinux/ss/ebitmap.c temp = n; n 337 security/selinux/ss/ebitmap.c n = n->next; n 348 security/selinux/ss/ebitmap.c struct ebitmap_node *n = NULL; n 406 security/selinux/ss/ebitmap.c if (!n || startbit >= n->startbit + EBITMAP_SIZE) { n 416 security/selinux/ss/ebitmap.c if (n) n 417 security/selinux/ss/ebitmap.c n->next = tmp; n 420 security/selinux/ss/ebitmap.c n = tmp; n 421 security/selinux/ss/ebitmap.c } else if (startbit <= n->startbit) { n 424 security/selinux/ss/ebitmap.c startbit, n->startbit); n 435 security/selinux/ss/ebitmap.c index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE; n 437 security/selinux/ss/ebitmap.c n->maps[index++] = map & (-1UL); n 454 security/selinux/ss/ebitmap.c struct ebitmap_node *n; n 465 security/selinux/ss/ebitmap.c ebitmap_for_each_positive_bit(e, n, bit) { n 481 security/selinux/ss/ebitmap.c ebitmap_for_each_positive_bit(e, n, bit) { n 48 security/selinux/ss/ebitmap.h struct ebitmap_node **n) n 52 security/selinux/ss/ebitmap.h for (*n = e->node; *n; *n = (*n)->next) { n 53 security/selinux/ss/ebitmap.h ofs = find_first_bit((*n)->maps, EBITMAP_SIZE); n 55 security/selinux/ss/ebitmap.h return (*n)->startbit + ofs; n 66 security/selinux/ss/ebitmap.h struct ebitmap_node **n, n 71 security/selinux/ss/ebitmap.h ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1); n 73 security/selinux/ss/ebitmap.h return ofs + (*n)->startbit; n 75 security/selinux/ss/ebitmap.h for (*n = (*n)->next; *n; *n = (*n)->next) { n 76 security/selinux/ss/ebitmap.h ofs = find_first_bit((*n)->maps, EBITMAP_SIZE); n 78 security/selinux/ss/ebitmap.h return ofs + (*n)->startbit; n 88 security/selinux/ss/ebitmap.h static inline int ebitmap_node_get_bit(struct ebitmap_node *n, n 91 security/selinux/ss/ebitmap.h unsigned int index = EBITMAP_NODE_INDEX(n, bit); n 92 security/selinux/ss/ebitmap.h unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit); n 95 security/selinux/ss/ebitmap.h if ((n->maps[index] & (EBITMAP_BIT << ofs))) n 100 security/selinux/ss/ebitmap.h static inline void ebitmap_node_set_bit(struct ebitmap_node *n, n 103 security/selinux/ss/ebitmap.h unsigned int index = EBITMAP_NODE_INDEX(n, bit); n 104 security/selinux/ss/ebitmap.h unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit); n 107 security/selinux/ss/ebitmap.h n->maps[index] |= (EBITMAP_BIT << ofs); n 110 security/selinux/ss/ebitmap.h static inline void ebitmap_node_clr_bit(struct ebitmap_node *n, n 113 security/selinux/ss/ebitmap.h unsigned int index = EBITMAP_NODE_INDEX(n, bit); n 114 security/selinux/ss/ebitmap.h unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit); n 117 security/selinux/ss/ebitmap.h n->maps[index] &= ~(EBITMAP_BIT << ofs); n 120 security/selinux/ss/ebitmap.h #define ebitmap_for_each_positive_bit(e, n, bit) \ n 121 security/selinux/ss/ebitmap.h for (bit = ebitmap_start_positive(e, &n); \ n 123 security/selinux/ss/ebitmap.h bit = ebitmap_next_positive(e, &n, bit)) \ n 211 security/selinux/ss/services.c unsigned int i, n = mapping->num_perms; n 214 security/selinux/ss/services.c for (i = 0, result = 0; i < n; i++) { n 222 security/selinux/ss/services.c for (i = 0, result = 0; i < n; i++) n 227 security/selinux/ss/services.c for (i = 0, result = 0; i < n; i++) { n 725 security/selinux/ss/services.c char *o = NULL, *n = NULL, *t = NULL; n 730 security/selinux/ss/services.c if (context_struct_to_string(p, ncontext, &n, &nlen)) n 737 security/selinux/ss/services.c o, n, t, sym_name(p, SYM_CLASSES, tclass-1)); n 740 security/selinux/ss/services.c kfree(n); n 1583 security/selinux/ss/services.c char *s = NULL, *t = NULL, *n = NULL; n 1591 security/selinux/ss/services.c if (context_struct_to_string(policydb, newcontext, &n, &nlen)) n 1597 security/selinux/ss/services.c audit_log_n_untrustedstring(ab, n, nlen - 1); n 1604 security/selinux/ss/services.c kfree(n); n 1924 security/smack/smack_lsm.c struct list_head *n; n 1928 security/smack/smack_lsm.c list_for_each_safe(l, n, &tsp->smk_rules) { n 214 sound/aoa/codecs/onyx.c u8 v, n; n 221 sound/aoa/codecs/onyx.c n = v; n 222 sound/aoa/codecs/onyx.c n &= ~ONYX_ADC_PGA_GAIN_MASK; n 223 sound/aoa/codecs/onyx.c n |= (ucontrol->value.integer.value[0] - INPUTGAIN_RANGE_SHIFT) n 225 sound/aoa/codecs/onyx.c onyx_write_register(onyx, ONYX_REG_ADC_CONTROL, n); n 228 sound/aoa/codecs/onyx.c return n != v; n 415 sound/aoa/codecs/onyx.c #define SINGLE_BIT(n, type, description, address, mask, flags) \ n 416 sound/aoa/codecs/onyx.c static struct snd_kcontrol_new n##_control = { \ n 924 sound/aoa/codecs/onyx.c #define ADDCTL(n) \ n 926 sound/aoa/codecs/onyx.c ctl = snd_ctl_new1(&n, onyx); \ n 371 sound/aoa/codecs/tas.c #define MIXER_CONTROL(n,descr,idx) \ n 372 sound/aoa/codecs/tas.c static struct snd_kcontrol_new n##_control = { \ n 639 sound/aoa/fabrics/layout.c #define AMP_CONTROL(n, description) \ n 640 sound/aoa/fabrics/layout.c static int n##_control_get(struct snd_kcontrol *kcontrol, \ n 644 sound/aoa/fabrics/layout.c if (gpio->methods && gpio->methods->get_##n) \ n 646 sound/aoa/fabrics/layout.c gpio->methods->get_##n(gpio); \ n 649 sound/aoa/fabrics/layout.c static int n##_control_put(struct snd_kcontrol *kcontrol, \ n 653 sound/aoa/fabrics/layout.c if (gpio->methods && gpio->methods->set_##n) \ n 654 sound/aoa/fabrics/layout.c gpio->methods->set_##n(gpio, \ n 658 sound/aoa/fabrics/layout.c static struct snd_kcontrol_new n##_ctl = { \ n 663 sound/aoa/fabrics/layout.c .get = n##_control_get, \ n 664 sound/aoa/fabrics/layout.c .put = n##_control_put, \ n 12 sound/aoa/soundbus/i2sbus/interface.h #define __PAD(m,n) u8 __pad##m[n] n 13 sound/aoa/soundbus/i2sbus/interface.h #define _PAD(line, n) __PAD(line, n) n 14 sound/aoa/soundbus/i2sbus/interface.h #define PAD(n) _PAD(__LINE__, (n)) n 769 sound/core/info.c struct snd_info_entry *p, *n; n 780 sound/core/info.c list_for_each_entry_safe(p, n, &entry->children, list) n 839 sound/core/oss/pcm_oss.c int n; n 932 sound/core/oss/pcm_oss.c n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size); n 933 sound/core/oss/pcm_oss.c err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL); n 2186 sound/core/oss/pcm_oss.c snd_pcm_sframes_t n; n 2188 sound/core/oss/pcm_oss.c n = delay - runtime->oss.prev_hw_ptr_period; n 2189 sound/core/oss/pcm_oss.c if (n < 0) n 2190 sound/core/oss/pcm_oss.c n += runtime->boundary; n 2191 sound/core/oss/pcm_oss.c info.blocks = n / runtime->period_size; n 52 sound/core/pcm_lib.c snd_pcm_sframes_t noise_dist, n; n 55 sound/core/pcm_lib.c n = appl_ptr - runtime->silence_start; n 56 sound/core/pcm_lib.c if (n < 0) n 57 sound/core/pcm_lib.c n += runtime->boundary; n 58 sound/core/pcm_lib.c if ((snd_pcm_uframes_t)n < runtime->silence_filled) n 59 sound/core/pcm_lib.c runtime->silence_filled -= n; n 545 sound/core/pcm_lib.c u_int64_t n = (u_int64_t) a * b; n 550 sound/core/pcm_lib.c n = div_u64_rem(n, c, r); n 551 sound/core/pcm_lib.c if (n >= UINT_MAX) { n 555 sound/core/pcm_lib.c return n; n 1075 sound/core/pcm_lib.c unsigned int n; n 1077 sound/core/pcm_lib.c n = i->min % step; n 1078 sound/core/pcm_lib.c if (n != 0 || i->openmin) { n 1079 sound/core/pcm_lib.c i->min += step - n; n 1083 sound/core/pcm_lib.c n = i->max % step; n 1084 sound/core/pcm_lib.c if (n != 0 || i->openmax) { n 1085 sound/core/pcm_lib.c i->max -= n; n 2749 sound/core/pcm_native.c snd_pcm_sframes_t n = 0; n 2754 sound/core/pcm_native.c n = snd_pcm_calc_delay(substream); n 2757 sound/core/pcm_native.c *delay = n; n 99 sound/core/seq/oss/seq_oss_event.c return note_off_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev); n 102 sound/core/seq/oss/seq_oss_event.c return note_on_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev); n 110 sound/core/seq/oss/seq_oss_event.c q->n.chn, 0, q->n.note, ev); n 81 sound/core/seq/oss/seq_oss_event.h struct evrec_note n; n 589 sound/core/seq/seq_midi_emul.c int n; n 593 sound/core/seq/seq_midi_emul.c for (n = 0; n < 128; n++) { n 594 sound/core/seq/seq_midi_emul.c if (chan->note[n]) { n 595 sound/core/seq/seq_midi_emul.c ops->note_terminate(drv, n, chan); n 596 sound/core/seq/seq_midi_emul.c chan->note[n] = 0; n 607 sound/core/seq/seq_midi_emul.c int n; n 611 sound/core/seq/seq_midi_emul.c for (n = 0; n < 128; n++) { n 612 sound/core/seq/seq_midi_emul.c if (chan->note[n] == SNDRV_MIDI_NOTE_ON) n 613 sound/core/seq/seq_midi_emul.c note_off(ops, drv, chan, n, 0); n 620 sound/core/seq/seq_midi_emul.c static void snd_midi_channel_init(struct snd_midi_channel *p, int n) n 627 sound/core/seq/seq_midi_emul.c p->number = n; n 634 sound/core/seq/seq_midi_emul.c if (n == 9) n 641 sound/core/seq/seq_midi_emul.c static struct snd_midi_channel *snd_midi_channel_init_set(int n) n 646 sound/core/seq/seq_midi_emul.c chan = kmalloc_array(n, sizeof(struct snd_midi_channel), GFP_KERNEL); n 648 sound/core/seq/seq_midi_emul.c for (i = 0; i < n; i++) n 680 sound/core/seq/seq_midi_emul.c struct snd_midi_channel_set *snd_midi_channel_alloc_set(int n) n 686 sound/core/seq/seq_midi_emul.c chset->channels = snd_midi_channel_init_set(n); n 688 sound/core/seq/seq_midi_emul.c chset->max_channels = n; n 212 sound/core/seq/seq_ports.c struct list_head *p, *n; n 214 sound/core/seq/seq_ports.c list_for_each_safe(p, n, &grp->list_head) { n 965 sound/core/timer.c struct list_head *p, *n; n 968 sound/core/timer.c list_for_each_safe(p, n, &timer->open_list_head) { n 2168 sound/core/timer.c struct snd_timer *timer, *n; n 2170 sound/core/timer.c list_for_each_entry_safe(timer, n, &snd_timer_list, device_list) n 358 sound/core/vmaster.c struct link_slave *slave, *n; n 361 sound/core/vmaster.c list_for_each_entry_safe(slave, n, &master->slaves, list) { n 49 sound/drivers/opl3/opl3_midi.c int oldvol, newvol, n; n 64 sound/drivers/opl3/opl3_midi.c n = OPL3_TOTAL_LEVEL_MASK - (newvol & OPL3_TOTAL_LEVEL_MASK); n 66 sound/drivers/opl3/opl3_midi.c *volbyte = (*volbyte & OPL3_KSL_MASK) | (n & OPL3_TOTAL_LEVEL_MASK); n 502 sound/hda/hdac_device.c hda_nid_t val, n; n 528 sound/hda/hdac_device.c for (n = prev_nid + 1; n <= val; n++) { n 532 sound/hda/hdac_device.c conn_list[conns] = n; n 52 sound/isa/ad1848/ad1848.c static int snd_ad1848_match(struct device *dev, unsigned int n) n 54 sound/isa/ad1848/ad1848.c if (!enable[n]) n 57 sound/isa/ad1848/ad1848.c if (port[n] == SNDRV_AUTO_PORT) { n 61 sound/isa/ad1848/ad1848.c if (irq[n] == SNDRV_AUTO_IRQ) { n 65 sound/isa/ad1848/ad1848.c if (dma1[n] == SNDRV_AUTO_DMA) { n 72 sound/isa/ad1848/ad1848.c static int snd_ad1848_probe(struct device *dev, unsigned int n) n 78 sound/isa/ad1848/ad1848.c error = snd_card_new(dev, index[n], id[n], THIS_MODULE, 0, &card); n 82 sound/isa/ad1848/ad1848.c error = snd_wss_create(card, port[n], -1, irq[n], dma1[n], -1, n 83 sound/isa/ad1848/ad1848.c thinkpad[n] ? WSS_HW_THINKPAD : WSS_HW_DETECT, n 101 sound/isa/ad1848/ad1848.c if (!thinkpad[n]) n 104 sound/isa/ad1848/ad1848.c chip->pcm->name, chip->port, irq[n], dma1[n]); n 108 sound/isa/ad1848/ad1848.c chip->pcm->name, chip->port, irq[n], dma1[n]); n 121 sound/isa/ad1848/ad1848.c static int snd_ad1848_remove(struct device *dev, unsigned int n) n 128 sound/isa/ad1848/ad1848.c static int snd_ad1848_suspend(struct device *dev, unsigned int n, pm_message_t state) n 138 sound/isa/ad1848/ad1848.c static int snd_ad1848_resume(struct device *dev, unsigned int n) n 34 sound/isa/adlib.c static int snd_adlib_match(struct device *dev, unsigned int n) n 36 sound/isa/adlib.c if (!enable[n]) n 39 sound/isa/adlib.c if (port[n] == SNDRV_AUTO_PORT) { n 51 sound/isa/adlib.c static int snd_adlib_probe(struct device *dev, unsigned int n) n 57 sound/isa/adlib.c error = snd_card_new(dev, index[n], id[n], THIS_MODULE, 0, &card); n 63 sound/isa/adlib.c card->private_data = request_region(port[n], 4, CRD_NAME); n 73 sound/isa/adlib.c sprintf(card->longname, CRD_NAME " at %#lx", port[n]); n 75 sound/isa/adlib.c error = snd_opl3_create(card, port[n], port[n] + 2, OPL3_HW_AUTO, 1, &opl3); n 100 sound/isa/adlib.c static int snd_adlib_remove(struct device *dev, unsigned int n) n 427 sound/isa/cmi8328.c static int snd_cmi8328_suspend(struct device *pdev, unsigned int n, n 443 sound/isa/cmi8328.c static int snd_cmi8328_resume(struct device *pdev, unsigned int n) n 643 sound/isa/cmi8330.c static int snd_cmi8330_isa_suspend(struct device *dev, unsigned int n, n 649 sound/isa/cmi8330.c static int snd_cmi8330_isa_resume(struct device *dev, unsigned int n) n 57 sound/isa/cs423x/cs4231.c static int snd_cs4231_match(struct device *dev, unsigned int n) n 59 sound/isa/cs423x/cs4231.c if (!enable[n]) n 62 sound/isa/cs423x/cs4231.c if (port[n] == SNDRV_AUTO_PORT) { n 66 sound/isa/cs423x/cs4231.c if (irq[n] == SNDRV_AUTO_IRQ) { n 70 sound/isa/cs423x/cs4231.c if (dma1[n] == SNDRV_AUTO_DMA) { n 77 sound/isa/cs423x/cs4231.c static int snd_cs4231_probe(struct device *dev, unsigned int n) n 83 sound/isa/cs423x/cs4231.c error = snd_card_new(dev, index[n], id[n], THIS_MODULE, 0, &card); n 87 sound/isa/cs423x/cs4231.c error = snd_wss_create(card, port[n], -1, irq[n], dma1[n], dma2[n], n 101 sound/isa/cs423x/cs4231.c if (dma2[n] < 0) n 104 sound/isa/cs423x/cs4231.c chip->pcm->name, chip->port, irq[n], dma1[n]); n 108 sound/isa/cs423x/cs4231.c chip->pcm->name, chip->port, irq[n], dma1[n], dma2[n]); n 118 sound/isa/cs423x/cs4231.c if (mpu_port[n] > 0 && mpu_port[n] != SNDRV_AUTO_PORT) { n 119 sound/isa/cs423x/cs4231.c if (mpu_irq[n] == SNDRV_AUTO_IRQ) n 120 sound/isa/cs423x/cs4231.c mpu_irq[n] = -1; n 122 sound/isa/cs423x/cs4231.c mpu_port[n], 0, mpu_irq[n], n 138 sound/isa/cs423x/cs4231.c static int snd_cs4231_remove(struct device *dev, unsigned int n) n 145 sound/isa/cs423x/cs4231.c static int snd_cs4231_suspend(struct device *dev, unsigned int n, pm_message_t state) n 155 sound/isa/cs423x/cs4231.c static int snd_cs4231_resume(struct device *dev, unsigned int n) n 514 sound/isa/cs423x/cs4236.c static int snd_cs423x_isa_suspend(struct device *dev, unsigned int n, n 520 sound/isa/cs423x/cs4236.c static int snd_cs423x_isa_resume(struct device *dev, unsigned int n) n 78 sound/isa/es1688/es1688.c static int snd_es1688_match(struct device *dev, unsigned int n) n 80 sound/isa/es1688/es1688.c return enable[n] && !is_isapnp_selected(n); n 84 sound/isa/es1688/es1688.c struct device *dev, unsigned int n) n 93 sound/isa/es1688/es1688.c if (irq[n] == SNDRV_AUTO_IRQ) { n 94 sound/isa/es1688/es1688.c irq[n] = snd_legacy_find_free_irq(possible_irqs); n 95 sound/isa/es1688/es1688.c if (irq[n] < 0) { n 100 sound/isa/es1688/es1688.c if (dma8[n] == SNDRV_AUTO_DMA) { n 101 sound/isa/es1688/es1688.c dma8[n] = snd_legacy_find_free_dma(possible_dmas); n 102 sound/isa/es1688/es1688.c if (dma8[n] < 0) { n 108 sound/isa/es1688/es1688.c if (port[n] != SNDRV_AUTO_PORT) n 109 sound/isa/es1688/es1688.c return snd_es1688_create(card, chip, port[n], mpu_port[n], n 110 sound/isa/es1688/es1688.c irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); n 114 sound/isa/es1688/es1688.c port[n] = possible_ports[i]; n 115 sound/isa/es1688/es1688.c error = snd_es1688_create(card, chip, port[n], mpu_port[n], n 116 sound/isa/es1688/es1688.c irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); n 122 sound/isa/es1688/es1688.c static int snd_es1688_probe(struct snd_card *card, unsigned int n) n 142 sound/isa/es1688/es1688.c if (fm_port[n] == SNDRV_AUTO_PORT) n 143 sound/isa/es1688/es1688.c fm_port[n] = port[n]; /* share the same port */ n 145 sound/isa/es1688/es1688.c if (fm_port[n] > 0) { n 146 sound/isa/es1688/es1688.c if (snd_opl3_create(card, fm_port[n], fm_port[n] + 2, n 149 sound/isa/es1688/es1688.c "opl3 not detected at 0x%lx\n", fm_port[n]); n 157 sound/isa/es1688/es1688.c if (mpu_irq[n] >= 0 && mpu_irq[n] != SNDRV_AUTO_IRQ && n 161 sound/isa/es1688/es1688.c mpu_irq[n], NULL); n 169 sound/isa/es1688/es1688.c static int snd_es1688_isa_probe(struct device *dev, unsigned int n) n 174 sound/isa/es1688/es1688.c error = snd_card_new(dev, index[n], id[n], THIS_MODULE, n 179 sound/isa/es1688/es1688.c error = snd_es1688_legacy_create(card, dev, n); n 183 sound/isa/es1688/es1688.c error = snd_es1688_probe(card, n); n 195 sound/isa/es1688/es1688.c static int snd_es1688_isa_remove(struct device *dev, unsigned int n) n 217 sound/isa/es1688/es1688.c static int snd_card_es968_pnp(struct snd_card *card, unsigned int n, n 234 sound/isa/es1688/es1688.c port[n] = pnp_port_start(pdev, 0); n 235 sound/isa/es1688/es1688.c dma8[n] = pnp_dma(pdev, 0); n 236 sound/isa/es1688/es1688.c irq[n] = pnp_irq(pdev, 0); n 238 sound/isa/es1688/es1688.c return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], n 239 sound/isa/es1688/es1688.c mpu_irq[n], dma8[n], ES1688_HW_AUTO); n 2236 sound/isa/es18xx.c static int snd_es18xx_isa_suspend(struct device *dev, unsigned int n, n 2242 sound/isa/es18xx.c static int snd_es18xx_isa_resume(struct device *dev, unsigned int n) n 194 sound/isa/galaxy/galaxy.c static int snd_galaxy_match(struct device *dev, unsigned int n) n 196 sound/isa/galaxy/galaxy.c if (!enable[n]) n 199 sound/isa/galaxy/galaxy.c switch (port[n]) { n 204 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_SBA_220; n 207 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_SBA_240; n 210 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_SBA_260; n 213 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_SBA_280; n 216 sound/isa/galaxy/galaxy.c dev_err(dev, "invalid port %#lx\n", port[n]); n 220 sound/isa/galaxy/galaxy.c switch (wss_port[n]) { n 225 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_530; n 228 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_604; n 231 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_E80; n 234 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_WSS_ENABLE | GALAXY_CONFIG_WSSA_F40; n 237 sound/isa/galaxy/galaxy.c dev_err(dev, "invalid WSS port %#lx\n", wss_port[n]); n 241 sound/isa/galaxy/galaxy.c switch (irq[n]) { n 246 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_IRQ_7; n 249 sound/isa/galaxy/galaxy.c irq[n] = 9; n 252 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_IRQ_9; n 255 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_IRQ_10; n 258 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_IRQ_11; n 261 sound/isa/galaxy/galaxy.c dev_err(dev, "invalid IRQ %d\n", irq[n]); n 265 sound/isa/galaxy/galaxy.c switch (dma1[n]) { n 270 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_DMA_0; n 273 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_DMA_1; n 276 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_DMA_3; n 279 sound/isa/galaxy/galaxy.c dev_err(dev, "invalid playback DMA %d\n", dma1[n]); n 283 sound/isa/galaxy/galaxy.c if (dma2[n] == SNDRV_AUTO_DMA || dma2[n] == dma1[n]) { n 284 sound/isa/galaxy/galaxy.c dma2[n] = -1; n 288 sound/isa/galaxy/galaxy.c wss_config[n] |= WSS_CONFIG_DUPLEX; n 289 sound/isa/galaxy/galaxy.c switch (dma2[n]) { n 293 sound/isa/galaxy/galaxy.c if (dma1[n] == 0) n 297 sound/isa/galaxy/galaxy.c dev_err(dev, "invalid capture DMA %d\n", dma2[n]); n 302 sound/isa/galaxy/galaxy.c switch (mpu_port[n]) { n 305 sound/isa/galaxy/galaxy.c mpu_port[n] = -1; n 308 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_MPU_ENABLE | GALAXY_CONFIG_MPUA_300; n 311 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_MPU_ENABLE | GALAXY_CONFIG_MPUA_330; n 314 sound/isa/galaxy/galaxy.c dev_err(dev, "invalid MPU port %#lx\n", mpu_port[n]); n 318 sound/isa/galaxy/galaxy.c switch (mpu_irq[n]) { n 321 sound/isa/galaxy/galaxy.c mpu_irq[n] = -1; n 324 sound/isa/galaxy/galaxy.c mpu_irq[n] = 9; n 327 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_MPUIRQ_2; n 331 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_MPUIRQ_3; n 335 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_MPUIRQ_5; n 338 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_MPUIRQ_7; n 342 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_MPUIRQ_10; n 346 sound/isa/galaxy/galaxy.c dev_err(dev, "invalid MPU IRQ %d\n", mpu_irq[n]); n 350 sound/isa/galaxy/galaxy.c if (mpu_irq[n] == irq[n]) { n 356 sound/isa/galaxy/galaxy.c switch (fm_port[n]) { n 359 sound/isa/galaxy/galaxy.c fm_port[n] = -1; n 364 sound/isa/galaxy/galaxy.c dev_err(dev, "illegal FM port %#lx\n", fm_port[n]); n 368 sound/isa/galaxy/galaxy.c config[n] |= GALAXY_CONFIG_GAME_ENABLE; n 491 sound/isa/galaxy/galaxy.c static int snd_galaxy_probe(struct device *dev, unsigned int n) n 499 sound/isa/galaxy/galaxy.c err = snd_card_new(dev, index[n], id[n], THIS_MODULE, n 507 sound/isa/galaxy/galaxy.c galaxy->res_port = request_region(port[n], 16, DRV_NAME); n 509 sound/isa/galaxy/galaxy.c dev_err(dev, "could not grab ports %#lx-%#lx\n", port[n], n 510 sound/isa/galaxy/galaxy.c port[n] + 15); n 514 sound/isa/galaxy/galaxy.c galaxy->port = ioport_map(port[n], 16); n 518 sound/isa/galaxy/galaxy.c dev_err(dev, "did not find a Sound Galaxy at %#lx\n", port[n]); n 521 sound/isa/galaxy/galaxy.c dev_info(dev, "Sound Galaxy (type %d) found at %#lx\n", type, port[n]); n 523 sound/isa/galaxy/galaxy.c galaxy->res_config_port = request_region(port[n] + GALAXY_PORT_CONFIG, n 527 sound/isa/galaxy/galaxy.c port[n] + GALAXY_PORT_CONFIG, n 528 sound/isa/galaxy/galaxy.c port[n] + GALAXY_PORT_CONFIG + 15); n 532 sound/isa/galaxy/galaxy.c galaxy->config_port = ioport_map(port[n] + GALAXY_PORT_CONFIG, 16); n 534 sound/isa/galaxy/galaxy.c galaxy_config(galaxy, config[n]); n 536 sound/isa/galaxy/galaxy.c galaxy->res_wss_port = request_region(wss_port[n], 4, DRV_NAME); n 538 sound/isa/galaxy/galaxy.c dev_err(dev, "could not grab ports %#lx-%#lx\n", wss_port[n], n 539 sound/isa/galaxy/galaxy.c wss_port[n] + 3); n 543 sound/isa/galaxy/galaxy.c galaxy->wss_port = ioport_map(wss_port[n], 4); n 545 sound/isa/galaxy/galaxy.c err = galaxy_wss_config(galaxy, wss_config[n]); n 554 sound/isa/galaxy/galaxy.c card->shortname, port[n], wss_port[n], irq[n], dma1[n], n 555 sound/isa/galaxy/galaxy.c dma2[n]); n 557 sound/isa/galaxy/galaxy.c err = snd_wss_create(card, wss_port[n] + 4, -1, irq[n], dma1[n], n 558 sound/isa/galaxy/galaxy.c dma2[n], WSS_HW_DETECT, 0, &chip); n 574 sound/isa/galaxy/galaxy.c if (mpu_port[n] >= 0) { n 576 sound/isa/galaxy/galaxy.c mpu_port[n], 0, mpu_irq[n], NULL); n 581 sound/isa/galaxy/galaxy.c if (fm_port[n] >= 0) { n 584 sound/isa/galaxy/galaxy.c err = snd_opl3_create(card, fm_port[n], fm_port[n] + 2, n 587 sound/isa/galaxy/galaxy.c dev_err(dev, "no OPL device at %#lx\n", fm_port[n]); n 611 sound/isa/galaxy/galaxy.c static int snd_galaxy_remove(struct device *dev, unsigned int n) n 61 sound/isa/gus/gusclassic.c static int snd_gusclassic_match(struct device *dev, unsigned int n) n 63 sound/isa/gus/gusclassic.c return enable[n]; n 67 sound/isa/gus/gusclassic.c struct device *dev, unsigned int n, n 76 sound/isa/gus/gusclassic.c if (irq[n] == SNDRV_AUTO_IRQ) { n 77 sound/isa/gus/gusclassic.c irq[n] = snd_legacy_find_free_irq(possible_irqs); n 78 sound/isa/gus/gusclassic.c if (irq[n] < 0) { n 83 sound/isa/gus/gusclassic.c if (dma1[n] == SNDRV_AUTO_DMA) { n 84 sound/isa/gus/gusclassic.c dma1[n] = snd_legacy_find_free_dma(possible_dmas); n 85 sound/isa/gus/gusclassic.c if (dma1[n] < 0) { n 90 sound/isa/gus/gusclassic.c if (dma2[n] == SNDRV_AUTO_DMA) { n 91 sound/isa/gus/gusclassic.c dma2[n] = snd_legacy_find_free_dma(possible_dmas); n 92 sound/isa/gus/gusclassic.c if (dma2[n] < 0) { n 98 sound/isa/gus/gusclassic.c if (port[n] != SNDRV_AUTO_PORT) n 99 sound/isa/gus/gusclassic.c return snd_gus_create(card, port[n], irq[n], dma1[n], dma2[n], n 100 sound/isa/gus/gusclassic.c 0, channels[n], pcm_channels[n], 0, rgus); n 104 sound/isa/gus/gusclassic.c port[n] = possible_ports[i]; n 105 sound/isa/gus/gusclassic.c error = snd_gus_create(card, port[n], irq[n], dma1[n], dma2[n], n 106 sound/isa/gus/gusclassic.c 0, channels[n], pcm_channels[n], 0, rgus); n 131 sound/isa/gus/gusclassic.c static int snd_gusclassic_probe(struct device *dev, unsigned int n) n 137 sound/isa/gus/gusclassic.c error = snd_card_new(dev, index[n], id[n], THIS_MODULE, 0, &card); n 141 sound/isa/gus/gusclassic.c if (pcm_channels[n] < 2) n 142 sound/isa/gus/gusclassic.c pcm_channels[n] = 2; n 144 sound/isa/gus/gusclassic.c error = snd_gusclassic_create(card, dev, n, &gus); n 152 sound/isa/gus/gusclassic.c gus->joystick_dac = joystick_dac[n]; n 198 sound/isa/gus/gusclassic.c static int snd_gusclassic_remove(struct device *dev, unsigned int n) n 77 sound/isa/gus/gusextreme.c static int snd_gusextreme_match(struct device *dev, unsigned int n) n 79 sound/isa/gus/gusextreme.c return enable[n]; n 84 sound/isa/gus/gusextreme.c struct device *dev, unsigned int n) n 92 sound/isa/gus/gusextreme.c if (irq[n] == SNDRV_AUTO_IRQ) { n 93 sound/isa/gus/gusextreme.c irq[n] = snd_legacy_find_free_irq(possible_irqs); n 94 sound/isa/gus/gusextreme.c if (irq[n] < 0) { n 99 sound/isa/gus/gusextreme.c if (dma8[n] == SNDRV_AUTO_DMA) { n 100 sound/isa/gus/gusextreme.c dma8[n] = snd_legacy_find_free_dma(possible_dmas); n 101 sound/isa/gus/gusextreme.c if (dma8[n] < 0) { n 107 sound/isa/gus/gusextreme.c if (port[n] != SNDRV_AUTO_PORT) n 108 sound/isa/gus/gusextreme.c return snd_es1688_create(card, chip, port[n], mpu_port[n], n 109 sound/isa/gus/gusextreme.c irq[n], mpu_irq[n], dma8[n], ES1688_HW_1688); n 113 sound/isa/gus/gusextreme.c port[n] = possible_ports[i]; n 114 sound/isa/gus/gusextreme.c error = snd_es1688_create(card, chip, port[n], mpu_port[n], n 115 sound/isa/gus/gusextreme.c irq[n], mpu_irq[n], dma8[n], ES1688_HW_1688); n 122 sound/isa/gus/gusextreme.c struct device *dev, unsigned int n, n 128 sound/isa/gus/gusextreme.c if (gf1_irq[n] == SNDRV_AUTO_IRQ) { n 129 sound/isa/gus/gusextreme.c gf1_irq[n] = snd_legacy_find_free_irq(possible_irqs); n 130 sound/isa/gus/gusextreme.c if (gf1_irq[n] < 0) { n 135 sound/isa/gus/gusextreme.c if (dma1[n] == SNDRV_AUTO_DMA) { n 136 sound/isa/gus/gusextreme.c dma1[n] = snd_legacy_find_free_dma(possible_dmas); n 137 sound/isa/gus/gusextreme.c if (dma1[n] < 0) { n 142 sound/isa/gus/gusextreme.c return snd_gus_create(card, gf1_port[n], gf1_irq[n], dma1[n], -1, n 143 sound/isa/gus/gusextreme.c 0, channels[n], pcm_channels[n], 0, rgus); n 222 sound/isa/gus/gusextreme.c static int snd_gusextreme_probe(struct device *dev, unsigned int n) n 230 sound/isa/gus/gusextreme.c error = snd_card_new(dev, index[n], id[n], THIS_MODULE, n 237 sound/isa/gus/gusextreme.c if (mpu_port[n] == SNDRV_AUTO_PORT) n 238 sound/isa/gus/gusextreme.c mpu_port[n] = 0; n 240 sound/isa/gus/gusextreme.c if (mpu_irq[n] > 15) n 241 sound/isa/gus/gusextreme.c mpu_irq[n] = -1; n 243 sound/isa/gus/gusextreme.c error = snd_gusextreme_es1688_create(card, es1688, dev, n); n 247 sound/isa/gus/gusextreme.c if (gf1_port[n] < 0) n 248 sound/isa/gus/gusextreme.c gf1_port[n] = es1688->port + 0x20; n 250 sound/isa/gus/gusextreme.c error = snd_gusextreme_gus_card_create(card, dev, n, &gus); n 258 sound/isa/gus/gusextreme.c gus->joystick_dac = joystick_dac[n]; n 282 sound/isa/gus/gusextreme.c if (pcm_channels[n] > 0) { n 307 sound/isa/gus/gusextreme.c es1688->mpu_port, 0, mpu_irq[n], NULL); n 327 sound/isa/gus/gusextreme.c static int snd_gusextreme_remove(struct device *dev, unsigned int n) n 281 sound/isa/msnd/msnd.c n = msnd_fifo_write(&chip->DARF, n 284 sound/isa/msnd/msnd.c if (n <= 0) { n 286 sound/isa/msnd/msnd.c return n; n 359 sound/isa/msnd/msnd.c int n; n 370 sound/isa/msnd/msnd.c for (n = 0; n < pcm_periods; ++n, pDAQ += DAQDS__size) { n 371 sound/isa/msnd/msnd.c writew(PCTODSP_BASED((u32)(pcm_count * n)), n 378 sound/isa/msnd/msnd.c writew(HIMT_PLAY_DONE * 0x100 + n, pDAQ + DAQDS_wIntMsg); n 379 sound/isa/msnd/msnd.c writew(n, pDAQ + DAQDS_wFlags); n 387 sound/isa/msnd/msnd.c int n; n 413 sound/isa/msnd/msnd.c for (n = 0; n < pcm_periods; ++n, pDAQ += DAQDS__size) { n 414 sound/isa/msnd/msnd.c u32 tmp = pcm_count * n; n 422 sound/isa/msnd/msnd.c writew(HIMT_RECORD_DONE * 0x100 + n, pDAQ + DAQDS_wIntMsg); n 423 sound/isa/msnd/msnd.c writew(n, pDAQ + DAQDS_wFlags); n 888 sound/isa/opl3sa2.c static int snd_opl3sa2_isa_suspend(struct device *dev, unsigned int n, n 894 sound/isa/opl3sa2.c static int snd_opl3sa2_isa_resume(struct device *dev, unsigned int n) n 92 sound/isa/opti9xx/miro.c #define OPTi9XX_MC_REG(n) n n 1380 sound/isa/opti9xx/miro.c static int snd_miro_isa_match(struct device *devptr, unsigned int n) n 1391 sound/isa/opti9xx/miro.c static int snd_miro_isa_probe(struct device *devptr, unsigned int n) n 105 sound/isa/opti9xx/opti92x-ad1848.c #define OPTi9XX_MC_REG(n) n n 1060 sound/isa/opti9xx/opti92x-ad1848.c static int snd_opti9xx_isa_suspend(struct device *dev, unsigned int n, n 1066 sound/isa/opti9xx/opti92x-ad1848.c static int snd_opti9xx_isa_resume(struct device *dev, unsigned int n) n 351 sound/isa/sb/jazz16.c static int snd_jazz16_suspend(struct device *pdev, unsigned int n, n 363 sound/isa/sb/jazz16.c static int snd_jazz16_resume(struct device *pdev, unsigned int n) n 557 sound/isa/sb/sb16.c static int snd_sb16_isa_suspend(struct device *dev, unsigned int n, n 563 sound/isa/sb/sb16.c static int snd_sb16_isa_resume(struct device *dev, unsigned int n) n 202 sound/isa/sb/sb8.c static int snd_sb8_suspend(struct device *dev, unsigned int n, n 214 sound/isa/sb/sb8.c static int snd_sb8_resume(struct device *dev, unsigned int n) n 1360 sound/oss/dmasound/dmasound_core.c int n = state.len - state.ptr; n 1361 sound/oss/dmasound/dmasound_core.c if (n > count) n 1362 sound/oss/dmasound/dmasound_core.c n = count; n 1363 sound/oss/dmasound/dmasound_core.c if (n <= 0) n 1365 sound/oss/dmasound/dmasound_core.c if (copy_to_user(buf, &state.buf[state.ptr], n)) n 1367 sound/oss/dmasound/dmasound_core.c state.ptr += n; n 1368 sound/oss/dmasound/dmasound_core.c return n; n 368 sound/parisc/harmony.c int n; n 372 sound/parisc/harmony.c n = HARMONY_DF_16BIT_LINEAR; n 375 sound/parisc/harmony.c n = HARMONY_DF_8BIT_ALAW; n 378 sound/parisc/harmony.c n = HARMONY_DF_8BIT_ULAW; n 381 sound/parisc/harmony.c n = HARMONY_DF_16BIT_LINEAR; n 385 sound/parisc/harmony.c if (force || o != n) { n 391 sound/parisc/harmony.c return n; n 983 sound/pci/asihpi/hpi_internal.h struct hpi_profile_res_name n; n 1016 sound/pci/asihpi/hpi_internal.h struct hpi_nvmemory_msg n; n 1077 sound/pci/asihpi/hpi_internal.h struct hpi_nvmemory_res n; n 1185 sound/pci/asihpi/hpi_internal.h struct hpi_nvmemory_msg n; n 1204 sound/pci/asihpi/hpi_internal.h struct hpi_nvmemory_res n; n 1415 sound/pci/asihpi/hpifunc.c unsigned int n = 0; n 1447 sound/pci/asihpi/hpifunc.c n++; n 1448 sound/pci/asihpi/hpifunc.c if (n >= string_length) { n 57 sound/pci/au88x0/au88x0_eq.c int i = 0, n /*esp2c */; n 59 sound/pci/au88x0/au88x0_eq.c for (n = 0; n < eqhw->this04; n++) { n 60 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b000 + n * 0x30, coefs[i + 0]); n 61 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b004 + n * 0x30, coefs[i + 1]); n 64 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b008 + n * 0x30, coefs[i + 2]); n 65 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b00c + n * 0x30, coefs[i + 3]); n 66 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b010 + n * 0x30, coefs[i + 4]); n 68 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b008 + n * 0x30, sign_invert(coefs[2 + i])); n 69 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b00c + n * 0x30, sign_invert(coefs[3 + i])); n 70 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b010 + n * 0x30, sign_invert(coefs[4 + i])); n 79 sound/pci/au88x0/au88x0_eq.c int i = 0, n /*esp2c */; n 81 sound/pci/au88x0/au88x0_eq.c for (n = 0; n < eqhw->this04; n++) { n 82 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1e0 + n * 0x30, coefs[0 + i]); n 83 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1e4 + n * 0x30, coefs[1 + i]); n 86 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1e8 + n * 0x30, coefs[2 + i]); n 87 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1ec + n * 0x30, coefs[3 + i]); n 88 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1f0 + n * 0x30, coefs[4 + i]); n 90 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1e8 + n * 0x30, sign_invert(coefs[2 + i])); n 91 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1ec + n * 0x30, sign_invert(coefs[3 + i])); n 92 sound/pci/au88x0/au88x0_eq.c hwwrite(vortex->mmio, 0x2b1f0 + n * 0x30, sign_invert(coefs[4 + i])); n 369 sound/pci/ca0106/ca0106_main.c int n, result; n 381 sound/pci/ca0106/ca0106_main.c for (n = 0; n < 100; n++) { n 1593 sound/pci/ca0106/ca0106_main.c int size, n; n 1597 sound/pci/ca0106/ca0106_main.c for (n = 0; n < size; n++) n 1598 sound/pci/ca0106/ca0106_main.c snd_ca0106_i2c_write(chip, i2c_adc_init[n][0], n 1599 sound/pci/ca0106/ca0106_main.c i2c_adc_init[n][1]); n 1600 sound/pci/ca0106/ca0106_main.c for (n = 0; n < 4; n++) { n 1601 sound/pci/ca0106/ca0106_main.c chip->i2c_capture_volume[n][0] = 0xcf; n 1602 sound/pci/ca0106/ca0106_main.c chip->i2c_capture_volume[n][1] = 0xcf; n 1611 sound/pci/ca0106/ca0106_main.c int size, n; n 1614 sound/pci/ca0106/ca0106_main.c for (n = 0; n < size; n++) { n 1615 sound/pci/ca0106/ca0106_main.c int reg = spi_dac_init[n] >> SPI_REG_SHIFT; n 1617 sound/pci/ca0106/ca0106_main.c snd_ca0106_spi_write(chip, spi_dac_init[n]); n 1619 sound/pci/ca0106/ca0106_main.c chip->spi_dac_reg[reg] = spi_dac_init[n]; n 609 sound/pci/cmipci.c static int snd_cmipci_pll_rmn(unsigned int rate, unsigned int adcmult, int *r, int *m, int *n) n 616 sound/pci/cmipci.c *n = -1; n 637 sound/pci/cmipci.c *n = xn - 2; n 642 sound/pci/cmipci.c return (*n > -1); n 26 sound/pci/ctxfi/ctresource.c int i, j, k, n; n 29 sound/pci/ctxfi/ctresource.c for (i = 0, n = multi; i < amount; i++) { n 33 sound/pci/ctxfi/ctresource.c n = multi; n 36 sound/pci/ctxfi/ctresource.c if (!(--n)) n 46 sound/pci/ctxfi/ctresource.c for (n = multi; n > 0; n--) { n 60 sound/pci/ctxfi/ctresource.c unsigned int i, j, k, n; n 63 sound/pci/ctxfi/ctresource.c for (n = multi, i = idx; n > 0; n--) { n 73 sound/pci/ctxfi/ctresource.c int mgr_get_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int *ridx) n 77 sound/pci/ctxfi/ctresource.c if (n > mgr->avail) n 80 sound/pci/ctxfi/ctresource.c err = get_resource(mgr->rscs, mgr->amount, n, ridx); n 82 sound/pci/ctxfi/ctresource.c mgr->avail -= n; n 87 sound/pci/ctxfi/ctresource.c int mgr_put_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int idx) n 89 sound/pci/ctxfi/ctresource.c put_resource(mgr->rscs, n, idx); n 90 sound/pci/ctxfi/ctresource.c mgr->avail += n; n 66 sound/pci/ctxfi/ctresource.h int mgr_get_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int *ridx); n 67 sound/pci/ctxfi/ctresource.h int mgr_put_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int idx); n 360 sound/pci/ctxfi/ctsrc.c int i, n; n 363 sound/pci/ctxfi/ctsrc.c n = (MEMRD == desc->mode) ? desc->multi : 1; n 364 sound/pci/ctxfi/ctsrc.c for (i = 0, p = src; i < n; i++, p++) { n 394 sound/pci/ctxfi/ctsrc.c int i, n; n 397 sound/pci/ctxfi/ctsrc.c n = (MEMRD == src->mode) ? src->multi : 1; n 398 sound/pci/ctxfi/ctsrc.c for (i = 0, p = src; i < n; i++, p++) { n 638 sound/pci/echoaudio/echoaudio_dsp.c int i, m, n; n 641 sound/pci/echoaudio/echoaudio_dsp.c n = 0; n 643 sound/pci/echoaudio/echoaudio_dsp.c meters[n++] = chip->comm_page->vu_meter[m]; n 644 sound/pci/echoaudio/echoaudio_dsp.c meters[n++] = chip->comm_page->peak_meter[m]; n 646 sound/pci/echoaudio/echoaudio_dsp.c for (; n < 32; n++) n 647 sound/pci/echoaudio/echoaudio_dsp.c meters[n] = 0; n 654 sound/pci/echoaudio/echoaudio_dsp.c meters[n++] = chip->comm_page->vu_meter[m]; n 655 sound/pci/echoaudio/echoaudio_dsp.c meters[n++] = chip->comm_page->peak_meter[m]; n 657 sound/pci/echoaudio/echoaudio_dsp.c for (; n < 64; n++) n 658 sound/pci/echoaudio/echoaudio_dsp.c meters[n] = 0; n 662 sound/pci/echoaudio/echoaudio_dsp.c meters[n++] = chip->comm_page->vu_meter[m]; n 663 sound/pci/echoaudio/echoaudio_dsp.c meters[n++] = chip->comm_page->peak_meter[m]; n 666 sound/pci/echoaudio/echoaudio_dsp.c for (; n < 96; n++) n 667 sound/pci/echoaudio/echoaudio_dsp.c meters[n] = 0; n 230 sound/pci/emu10k1/emu10k1_main.c int size, n; n 233 sound/pci/emu10k1/emu10k1_main.c for (n = 0; n < size; n++) n 234 sound/pci/emu10k1/emu10k1_main.c snd_emu10k1_spi_write(emu, spi_dac_init[n]); n 250 sound/pci/emu10k1/emu10k1_main.c int size, n; n 257 sound/pci/emu10k1/emu10k1_main.c for (n = 0; n < size; n++) n 258 sound/pci/emu10k1/emu10k1_main.c snd_emu10k1_i2c_write(emu, i2c_adc_init[n][0], i2c_adc_init[n][1]); n 259 sound/pci/emu10k1/emu10k1_main.c for (n = 0; n < 4; n++) { n 260 sound/pci/emu10k1/emu10k1_main.c emu->i2c_capture_volume[n][0] = 0xcf; n 261 sound/pci/emu10k1/emu10k1_main.c emu->i2c_capture_volume[n][1] = 0xcf; n 653 sound/pci/emu10k1/emu10k1_main.c int n, i; n 675 sound/pci/emu10k1/emu10k1_main.c for (n = 0; n < fw_entry->size; n++) { n 676 sound/pci/emu10k1/emu10k1_main.c value = fw_entry->data[n]; n 267 sound/pci/emu10k1/emuproc.c unsigned int val, tmp, n; n 269 sound/pci/emu10k1/emuproc.c for (n = 0; n < 4; n++) { n 270 sound/pci/emu10k1/emuproc.c tmp = val >> (16 + (n*4)); n 271 sound/pci/emu10k1/emuproc.c if (tmp & 0x8) snd_iprintf(buffer, "Channel %d: Rate=%d\n", n, samplerate[tmp & 0x7]); n 272 sound/pci/emu10k1/emuproc.c else snd_iprintf(buffer, "Channel %d: No input\n", n); n 125 sound/pci/emu10k1/io.c int n, result; n 152 sound/pci/emu10k1/io.c for (n = 0; n < 100; n++) { n 721 sound/pci/ens1370.c unsigned int n, truncm, freq; n 724 sound/pci/ens1370.c n = rate / 3000; n 725 sound/pci/ens1370.c if ((1 << n) & ((1 << 15) | (1 << 13) | (1 << 11) | (1 << 9))) n 726 sound/pci/ens1370.c n--; n 727 sound/pci/ens1370.c truncm = (21 * n - 1) | 1; n 728 sound/pci/ens1370.c freq = ((48000UL << 15) / rate) * n; n 733 sound/pci/ens1370.c (((239 - truncm) >> 1) << 9) | (n << 4)); n 738 sound/pci/ens1370.c 0x8000 | (((119 - truncm) >> 1) << 9) | (n << 4)); n 745 sound/pci/ens1370.c snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC, n << 8); n 746 sound/pci/ens1370.c snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC + 1, n << 8); n 2171 sound/pci/es1968.c u32 n; n 2267 sound/pci/es1968.c n = inl(iobase + ESM_RING_BUS_CONTR_B); n 2268 sound/pci/es1968.c n &= ~RINGB_EN_SPDIF; /* SPDIF off */ n 2270 sound/pci/es1968.c outl(n, iobase + ESM_RING_BUS_CONTR_B); n 743 sound/pci/hda/hda_codec.c struct hda_pcm *pcm, *n; n 745 sound/pci/hda/hda_codec.c list_for_each_entry_safe(pcm, n, &codec->pcm_list_head, list) { n 498 sound/pci/hda/hda_eld.c unsigned int n; n 528 sound/pci/hda/hda_eld.c n = name[3] - '0'; n 531 sound/pci/hda/hda_eld.c n = 10 * n + name[4] - '0'; n 533 sound/pci/hda/hda_eld.c if (n >= ELD_MAX_SAD) n 536 sound/pci/hda/hda_eld.c e->sad[n].format = val; n 538 sound/pci/hda/hda_eld.c e->sad[n].channels = val; n 540 sound/pci/hda/hda_eld.c e->sad[n].rates = val; n 542 sound/pci/hda/hda_eld.c e->sad[n].sample_bits = val; n 544 sound/pci/hda/hda_eld.c e->sad[n].max_bitrate = val; n 546 sound/pci/hda/hda_eld.c e->sad[n].profile = val; n 547 sound/pci/hda/hda_eld.c if (n >= e->sad_count) n 548 sound/pci/hda/hda_eld.c e->sad_count = n + 1; n 630 sound/pci/hda/hda_generic.c int i, n; n 635 sound/pci/hda/hda_generic.c snd_array_for_each(&spec->paths, n, path) { n 781 sound/pci/hda/hda_generic.c int n, nums, idx; n 796 sound/pci/hda/hda_generic.c for (n = 0; n < nums; n++) n 797 sound/pci/hda/hda_generic.c init_amp(codec, nid, HDA_INPUT, n); n 802 sound/pci/hda/hda_generic.c for (n = 0; n < nums; n++) { n 803 sound/pci/hda/hda_generic.c if (n != idx) { n 804 sound/pci/hda/hda_generic.c if (conn[n] != spec->mixer_merge_nid) n 808 sound/pci/hda/hda_generic.c activate_amp(codec, nid, HDA_INPUT, n, n, false); n 812 sound/pci/hda/hda_generic.c check_and_activate_amp(codec, nid, HDA_INPUT, n, idx, enable); n 2772 sound/pci/hda/hda_generic.c unsigned int i, n = 0; n 2776 sound/pci/hda/hda_generic.c if (n == item_idx) n 2778 sound/pci/hda/hda_generic.c n++; n 2787 sound/pci/hda/hda_generic.c unsigned int i, n = 0; n 2791 sound/pci/hda/hda_generic.c return n; n 2793 sound/pci/hda/hda_generic.c n++; n 3168 sound/pci/hda/hda_generic.c int i, n, nums; n 3172 sound/pci/hda/hda_generic.c for (n = 0; n < spec->num_adc_nids; n++) { n 3174 sound/pci/hda/hda_generic.c if (!spec->input_paths[i][n]) n 3178 sound/pci/hda/hda_generic.c ok_bits |= (1 << n); n 3186 sound/pci/hda/hda_generic.c for (n = 0; n < spec->num_adc_nids; n++) { n 3187 sound/pci/hda/hda_generic.c if (spec->input_paths[i][n]) { n 3188 sound/pci/hda/hda_generic.c spec->dyn_adc_idx[i] = n; n 3199 sound/pci/hda/hda_generic.c for (n = 0; n < spec->num_adc_nids; n++) { n 3200 sound/pci/hda/hda_generic.c if (!(ok_bits & (1 << n))) n 3202 sound/pci/hda/hda_generic.c if (n != nums) { n 3203 sound/pci/hda/hda_generic.c spec->adc_nids[nums] = spec->adc_nids[n]; n 3208 sound/pci/hda/hda_generic.c spec->input_paths[i][n]; n 3209 sound/pci/hda/hda_generic.c spec->input_paths[i][n] = 0; n 3734 sound/pci/hda/hda_generic.c int i, n, nums, err; n 3751 sound/pci/hda/hda_generic.c for (n = 0; n < nums; n++) { n 3760 sound/pci/hda/hda_generic.c path = get_input_path(codec, n, i); n 3785 sound/pci/hda/hda_generic.c err = create_single_cap_vol_ctl(codec, n, vol, sw, n 3788 sound/pci/hda/hda_generic.c err = create_bind_cap_vol_ctl(codec, n, vol, sw); n 4176 sound/pci/hda/hda_generic.c int n; n 4178 sound/pci/hda/hda_generic.c snd_array_for_each(&spec->paths, n, path) { n 265 sound/pci/ice1712/psc724.c int n = kcontrol->private_value; n 267 sound/pci/ice1712/psc724.c ucontrol->value.integer.value[0] = psc724_cont[n].get(ice); n 276 sound/pci/ice1712/psc724.c int n = kcontrol->private_value; n 278 sound/pci/ice1712/psc724.c psc724_cont[n].set(ice, ucontrol->value.integer.value[0]); n 407 sound/pci/ice1712/se.c static int se200pci_get_enum_count(int n) n 412 sound/pci/ice1712/se.c member = se200pci_cont[n].member; n 435 sound/pci/ice1712/se.c int n, c; n 437 sound/pci/ice1712/se.c n = kc->private_value; n 438 sound/pci/ice1712/se.c c = se200pci_get_enum_count(n); n 441 sound/pci/ice1712/se.c return snd_ctl_enum_info(uinfo, 1, c, se200pci_cont[n].member); n 449 sound/pci/ice1712/se.c int n = kc->private_value; n 450 sound/pci/ice1712/se.c uc->value.integer.value[0] = spec->vol[n].ch1; n 451 sound/pci/ice1712/se.c uc->value.integer.value[1] = spec->vol[n].ch2; n 460 sound/pci/ice1712/se.c int n = kc->private_value; n 461 sound/pci/ice1712/se.c uc->value.integer.value[0] = spec->vol[n].ch1; n 470 sound/pci/ice1712/se.c int n = kc->private_value; n 471 sound/pci/ice1712/se.c uc->value.enumerated.item[0] = spec->vol[n].ch1; n 475 sound/pci/ice1712/se.c static void se200pci_cont_update(struct snd_ice1712 *ice, int n) n 478 sound/pci/ice1712/se.c switch (se200pci_cont[n].target) { n 481 sound/pci/ice1712/se.c se200pci_cont[n].ch, n 482 sound/pci/ice1712/se.c spec->vol[n].ch1, n 483 sound/pci/ice1712/se.c spec->vol[n].ch2); n 488 sound/pci/ice1712/se.c spec->vol[n].ch1, n 489 sound/pci/ice1712/se.c spec->vol[n].ch2); n 494 sound/pci/ice1712/se.c spec->vol[n].ch1, n 495 sound/pci/ice1712/se.c spec->vol[n].ch2); n 500 sound/pci/ice1712/se.c spec->vol[n].ch1); n 504 sound/pci/ice1712/se.c se200pci_WM8776_set_agc(ice, spec->vol[n].ch1); n 508 sound/pci/ice1712/se.c se200pci_WM8776_set_afl(ice, spec->vol[n].ch1); n 521 sound/pci/ice1712/se.c int n = kc->private_value; n 528 sound/pci/ice1712/se.c if (spec->vol[n].ch1 != vol1) { n 529 sound/pci/ice1712/se.c spec->vol[n].ch1 = vol1; n 532 sound/pci/ice1712/se.c if (spec->vol[n].ch2 != vol2) { n 533 sound/pci/ice1712/se.c spec->vol[n].ch2 = vol2; n 537 sound/pci/ice1712/se.c se200pci_cont_update(ice, n); n 547 sound/pci/ice1712/se.c int n = kc->private_value; n 551 sound/pci/ice1712/se.c if (spec->vol[n].ch1 != vol1) { n 552 sound/pci/ice1712/se.c spec->vol[n].ch1 = vol1; n 553 sound/pci/ice1712/se.c se200pci_cont_update(ice, n); n 564 sound/pci/ice1712/se.c int n = kc->private_value; n 568 sound/pci/ice1712/se.c if (vol1 >= se200pci_get_enum_count(n)) n 570 sound/pci/ice1712/se.c if (spec->vol[n].ch1 != vol1) { n 571 sound/pci/ice1712/se.c spec->vol[n].ch1 = vol1; n 572 sound/pci/ice1712/se.c se200pci_cont_update(ice, n); n 185 sound/pci/ice1712/wm8766.c int n = kcontrol->private_value; n 188 sound/pci/ice1712/wm8766.c uinfo->count = (wm->ctl[n].flags & WM8766_FLAG_STEREO) ? 2 : 1; n 189 sound/pci/ice1712/wm8766.c uinfo->value.integer.min = wm->ctl[n].min; n 190 sound/pci/ice1712/wm8766.c uinfo->value.integer.max = wm->ctl[n].max; n 199 sound/pci/ice1712/wm8766.c int n = kcontrol->private_value; n 201 sound/pci/ice1712/wm8766.c return snd_ctl_enum_info(uinfo, 1, wm->ctl[n].max, n 202 sound/pci/ice1712/wm8766.c wm->ctl[n].enum_names); n 209 sound/pci/ice1712/wm8766.c int n = kcontrol->private_value; n 212 sound/pci/ice1712/wm8766.c if (wm->ctl[n].get) n 213 sound/pci/ice1712/wm8766.c wm->ctl[n].get(wm, &val1, &val2); n 215 sound/pci/ice1712/wm8766.c val1 = wm->regs[wm->ctl[n].reg1] & wm->ctl[n].mask1; n 216 sound/pci/ice1712/wm8766.c val1 >>= __ffs(wm->ctl[n].mask1); n 217 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_STEREO) { n 218 sound/pci/ice1712/wm8766.c val2 = wm->regs[wm->ctl[n].reg2] & wm->ctl[n].mask2; n 219 sound/pci/ice1712/wm8766.c val2 >>= __ffs(wm->ctl[n].mask2); n 220 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_VOL_UPDATE) n 224 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_INVERT) { n 225 sound/pci/ice1712/wm8766.c val1 = wm->ctl[n].max - (val1 - wm->ctl[n].min); n 226 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_STEREO) n 227 sound/pci/ice1712/wm8766.c val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min); n 230 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_STEREO) n 240 sound/pci/ice1712/wm8766.c int n = kcontrol->private_value; n 246 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_INVERT) { n 247 sound/pci/ice1712/wm8766.c regval1 = wm->ctl[n].max - (regval1 - wm->ctl[n].min); n 248 sound/pci/ice1712/wm8766.c regval2 = wm->ctl[n].max - (regval2 - wm->ctl[n].min); n 250 sound/pci/ice1712/wm8766.c if (wm->ctl[n].set) n 251 sound/pci/ice1712/wm8766.c wm->ctl[n].set(wm, regval1, regval2); n 253 sound/pci/ice1712/wm8766.c val = wm->regs[wm->ctl[n].reg1] & ~wm->ctl[n].mask1; n 254 sound/pci/ice1712/wm8766.c val |= regval1 << __ffs(wm->ctl[n].mask1); n 256 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_STEREO && n 257 sound/pci/ice1712/wm8766.c wm->ctl[n].reg1 == wm->ctl[n].reg2) { n 258 sound/pci/ice1712/wm8766.c val &= ~wm->ctl[n].mask2; n 259 sound/pci/ice1712/wm8766.c val |= regval2 << __ffs(wm->ctl[n].mask2); n 261 sound/pci/ice1712/wm8766.c snd_wm8766_write(wm, wm->ctl[n].reg1, val); n 263 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_STEREO && n 264 sound/pci/ice1712/wm8766.c wm->ctl[n].reg1 != wm->ctl[n].reg2) { n 265 sound/pci/ice1712/wm8766.c val = wm->regs[wm->ctl[n].reg2] & ~wm->ctl[n].mask2; n 266 sound/pci/ice1712/wm8766.c val |= regval2 << __ffs(wm->ctl[n].mask2); n 267 sound/pci/ice1712/wm8766.c if (wm->ctl[n].flags & WM8766_FLAG_VOL_UPDATE) n 269 sound/pci/ice1712/wm8766.c snd_wm8766_write(wm, wm->ctl[n].reg2, val); n 459 sound/pci/ice1712/wm8776.c int n = kcontrol->private_value; n 462 sound/pci/ice1712/wm8776.c uinfo->count = (wm->ctl[n].flags & WM8776_FLAG_STEREO) ? 2 : 1; n 463 sound/pci/ice1712/wm8776.c uinfo->value.integer.min = wm->ctl[n].min; n 464 sound/pci/ice1712/wm8776.c uinfo->value.integer.max = wm->ctl[n].max; n 473 sound/pci/ice1712/wm8776.c int n = kcontrol->private_value; n 475 sound/pci/ice1712/wm8776.c return snd_ctl_enum_info(uinfo, 1, wm->ctl[n].max, n 476 sound/pci/ice1712/wm8776.c wm->ctl[n].enum_names); n 483 sound/pci/ice1712/wm8776.c int n = kcontrol->private_value; n 486 sound/pci/ice1712/wm8776.c if (wm->ctl[n].get) n 487 sound/pci/ice1712/wm8776.c wm->ctl[n].get(wm, &val1, &val2); n 489 sound/pci/ice1712/wm8776.c val1 = wm->regs[wm->ctl[n].reg1] & wm->ctl[n].mask1; n 490 sound/pci/ice1712/wm8776.c val1 >>= __ffs(wm->ctl[n].mask1); n 491 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_STEREO) { n 492 sound/pci/ice1712/wm8776.c val2 = wm->regs[wm->ctl[n].reg2] & wm->ctl[n].mask2; n 493 sound/pci/ice1712/wm8776.c val2 >>= __ffs(wm->ctl[n].mask2); n 494 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_VOL_UPDATE) n 498 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_INVERT) { n 499 sound/pci/ice1712/wm8776.c val1 = wm->ctl[n].max - (val1 - wm->ctl[n].min); n 500 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_STEREO) n 501 sound/pci/ice1712/wm8776.c val2 = wm->ctl[n].max - (val2 - wm->ctl[n].min); n 504 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_STEREO) n 514 sound/pci/ice1712/wm8776.c int n = kcontrol->private_value; n 520 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_INVERT) { n 521 sound/pci/ice1712/wm8776.c regval1 = wm->ctl[n].max - (regval1 - wm->ctl[n].min); n 522 sound/pci/ice1712/wm8776.c regval2 = wm->ctl[n].max - (regval2 - wm->ctl[n].min); n 524 sound/pci/ice1712/wm8776.c if (wm->ctl[n].set) n 525 sound/pci/ice1712/wm8776.c wm->ctl[n].set(wm, regval1, regval2); n 527 sound/pci/ice1712/wm8776.c val = wm->regs[wm->ctl[n].reg1] & ~wm->ctl[n].mask1; n 528 sound/pci/ice1712/wm8776.c val |= regval1 << __ffs(wm->ctl[n].mask1); n 530 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_STEREO && n 531 sound/pci/ice1712/wm8776.c wm->ctl[n].reg1 == wm->ctl[n].reg2) { n 532 sound/pci/ice1712/wm8776.c val &= ~wm->ctl[n].mask2; n 533 sound/pci/ice1712/wm8776.c val |= regval2 << __ffs(wm->ctl[n].mask2); n 535 sound/pci/ice1712/wm8776.c snd_wm8776_write(wm, wm->ctl[n].reg1, val); n 537 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_STEREO && n 538 sound/pci/ice1712/wm8776.c wm->ctl[n].reg1 != wm->ctl[n].reg2) { n 539 sound/pci/ice1712/wm8776.c val = wm->regs[wm->ctl[n].reg2] & ~wm->ctl[n].mask2; n 540 sound/pci/ice1712/wm8776.c val |= regval2 << __ffs(wm->ctl[n].mask2); n 541 sound/pci/ice1712/wm8776.c if (wm->ctl[n].flags & WM8776_FLAG_VOL_UPDATE) n 543 sound/pci/ice1712/wm8776.c snd_wm8776_write(wm, wm->ctl[n].reg2, val); n 2052 sound/pci/korg1212/korg1212.c int n; n 2063 sound/pci/korg1212/korg1212.c for (n=0; n<kAudioChannels; n++) n 2064 sound/pci/korg1212/korg1212.c snd_iprintf(buffer, " Channel %d: %s -> %s [%d]\n", n, n 2065 sound/pci/korg1212/korg1212.c channelName[n], n 2066 sound/pci/korg1212/korg1212.c channelName[korg1212->sharedBufferPtr->routeData[n]], n 2067 sound/pci/korg1212/korg1212.c korg1212->sharedBufferPtr->volumeData[n]); n 415 sound/pci/lola/lola_mixer.c int n, err; n 421 sound/pci/lola/lola_mixer.c for (n = 0; n < chip->pin[CAPT].num_pins; n += 2) { n 422 sound/pci/lola/lola_mixer.c unsigned int mask = 3U << n; /* handle the stereo case */ n 433 sound/pci/lola/lola_mixer.c err = lola_codec_write(chip, chip->pcm[CAPT].streams[n].nid, n 2277 sound/pci/maestro3.c u32 n; n 2287 sound/pci/maestro3.c pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n); n 2288 sound/pci/maestro3.c n &= ~(HV_CTRL_ENABLE | REDUCED_DEBOUNCE | HV_BUTTON_FROM_GD); n 2289 sound/pci/maestro3.c n |= chip->hv_config; n 2291 sound/pci/maestro3.c n |= REDUCED_DEBOUNCE; n 2292 sound/pci/maestro3.c n |= PM_CTRL_ENABLE | CLK_DIV_BY_49 | USE_PCI_TIMING; n 2293 sound/pci/maestro3.c pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n); n 2296 sound/pci/maestro3.c pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n); n 2297 sound/pci/maestro3.c n &= ~INT_CLK_SELECT; n 2299 sound/pci/maestro3.c n &= ~INT_CLK_MULT_ENABLE; n 2300 sound/pci/maestro3.c n |= INT_CLK_SRC_NOT_PCI; n 2302 sound/pci/maestro3.c n &= ~( CLK_MULT_MODE_SELECT | CLK_MULT_MODE_SELECT_2 ); n 2303 sound/pci/maestro3.c pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n); n 2306 sound/pci/maestro3.c pci_read_config_dword(pcidev, PCI_USER_CONFIG, &n); n 2307 sound/pci/maestro3.c n |= IN_CLK_12MHZ_SELECT; n 2308 sound/pci/maestro3.c pci_write_config_dword(pcidev, PCI_USER_CONFIG, n); n 437 sound/pci/rme32.c int n; n 449 sound/pci/rme32.c n = ((rme32->rcreg >> RME32_RCR_BITPOS_F0) & 1) + n 454 sound/pci/rme32.c switch (n) { /* supporting the CS8414 */ n 474 sound/pci/rme32.c switch (n) { /* supporting the CS8412 */ n 1463 sound/pci/rme32.c int n; n 1507 sound/pci/rme32.c if (snd_rme32_capture_getrate(rme32, &n) < 0) { n 1510 sound/pci/rme32.c if (n) { n 1516 sound/pci/rme32.c snd_rme32_capture_getrate(rme32, &n)); n 609 sound/pci/rme96.c int n, rate; n 614 sound/pci/rme96.c n = ((rme96->areg >> RME96_AR_BITPOS_F0) & 1) + n 616 sound/pci/rme96.c switch (n) { n 647 sound/pci/rme96.c n = ((rme96->rcreg >> RME96_RCR_BITPOS_F0) & 1) + n 651 sound/pci/rme96.c switch (n) { n 835 sound/pci/rme96.c int n; n 873 sound/pci/rme96.c if (snd_rme96_capture_getrate(rme96, &n) == 88200) { n 876 sound/pci/rme96.c if (snd_rme96_capture_getrate(rme96, &n) == 64000) { n 1729 sound/pci/rme96.c int n; n 1764 sound/pci/rme96.c if (snd_rme96_capture_getrate(rme96, &n) < 0) { n 1767 sound/pci/rme96.c if (n) { n 1773 sound/pci/rme96.c snd_rme96_capture_getrate(rme96, &n)); n 1800 sound/pci/rme96.c } else if (snd_rme96_capture_getrate(rme96, &n) < 0) { n 1074 sound/pci/rme9652/hdsp.c int n; n 1079 sound/pci/rme9652/hdsp.c n = 0; n 1081 sound/pci/rme9652/hdsp.c n++; n 1086 sound/pci/rme9652/hdsp.c s->control_register |= hdsp_encode_latency(n); n 1099 sound/pci/rme9652/hdsp.c u64 n; n 1106 sound/pci/rme9652/hdsp.c n = DDS_NUMERATOR; n 1107 sound/pci/rme9652/hdsp.c n = div_u64(n, rate); n 1109 sound/pci/rme9652/hdsp.c snd_BUG_ON(n >> 32); n 1112 sound/pci/rme9652/hdsp.c hdsp->dds_value = n; n 2825 sound/pci/rme9652/hdsp.c u64 n; n 2832 sound/pci/rme9652/hdsp.c n = DDS_NUMERATOR; n 2837 sound/pci/rme9652/hdsp.c n = div_u64(n, dds_value); n 2839 sound/pci/rme9652/hdsp.c n *= 4; n 2841 sound/pci/rme9652/hdsp.c n *= 2; n 2842 sound/pci/rme9652/hdsp.c return ((int)n) - system_sample_rate; n 1112 sound/pci/rme9652/hdspm.c static inline int HDSPM_bit2freq(int n) n 1117 sound/pci/rme9652/hdspm.c if (n < 1 || n > 9) n 1119 sound/pci/rme9652/hdspm.c return bit2freq_tab[n]; n 1450 sound/pci/rme9652/hdspm.c int n; n 1452 sound/pci/rme9652/hdspm.c n = hdspm_decode_latency(hdspm->control_register); n 1461 sound/pci/rme9652/hdspm.c if ((7 == n) && (RayDAT == hdspm->io_type || AIO == hdspm->io_type)) n 1462 sound/pci/rme9652/hdspm.c n = -1; n 1464 sound/pci/rme9652/hdspm.c return 1 << (n + 6); n 1511 sound/pci/rme9652/hdspm.c int n = hdspm->period_bytes; n 1518 sound/pci/rme9652/hdspm.c memset(buf, 0, n); n 1525 sound/pci/rme9652/hdspm.c int n; n 1540 sound/pci/rme9652/hdspm.c n = 7; n 1543 sound/pci/rme9652/hdspm.c n = 0; n 1545 sound/pci/rme9652/hdspm.c n++; n 1551 sound/pci/rme9652/hdspm.c s->control_register |= hdspm_encode_latency(n); n 1592 sound/pci/rme9652/hdspm.c u64 n; n 1604 sound/pci/rme9652/hdspm.c n = 131072000000000ULL; /* 125 MHz */ n 1608 sound/pci/rme9652/hdspm.c n = 110069313433624ULL; /* 105 MHz */ n 1612 sound/pci/rme9652/hdspm.c n = 104857600000000ULL; /* 100 MHz */ n 1619 sound/pci/rme9652/hdspm.c n = div_u64(n, rate); n 1621 sound/pci/rme9652/hdspm.c snd_BUG_ON(n >> 32); n 1622 sound/pci/rme9652/hdspm.c hdspm_write(hdspm, HDSPM_freqReg, (u32)n); n 439 sound/pci/rme9652/rme9652.c int n; n 448 sound/pci/rme9652/rme9652.c n = 0; n 450 sound/pci/rme9652/rme9652.c n++; n 455 sound/pci/rme9652/rme9652.c s->control_register |= rme9652_encode_latency(n); n 473 sound/pci/sonicvibes.c unsigned int r, m = 0, n = 0; n 492 sound/pci/sonicvibes.c n = xn - 2; n 497 sound/pci/sonicvibes.c *res_n = n; n 502 sound/pci/sonicvibes.c "pll: m = 0x%x, r = 0x%x, n = 0x%x\n", reg, m, r, n); n 511 sound/pci/sonicvibes.c unsigned int r, m, n; n 513 sound/pci/sonicvibes.c snd_sonicvibes_pll(rate, &r, &m, &n); n 517 sound/pci/sonicvibes.c snd_sonicvibes_out1(sonic, reg + 1, r | n); n 546 sound/pci/sonicvibes.c unsigned int rate, div, r, m, n; n 558 sound/pci/sonicvibes.c snd_sonicvibes_pll(rate, &r, &m, &n); n 561 sound/pci/sonicvibes.c params->rate_num = (SV_REFFREQUENCY/16) * (n+2) * r; n 23 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_KICK(n) (PS3_AUDIO_DMAC_REGBASE(n) + 0x00) n 24 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_SOURCE(n) (PS3_AUDIO_DMAC_REGBASE(n) + 0x04) n 25 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_DEST(n) (PS3_AUDIO_DMAC_REGBASE(n) + 0x08) n 26 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_DMASIZE(n) (PS3_AUDIO_DMAC_REGBASE(n) + 0x0C) n 45 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3WCTRL(n) (0x00006200 + 0x200 * (n)) n 53 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_SPD_REGBASE(n) (0x00007200 + 0x200 * (n)) n 55 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_SPDCTRL(n) \ n 56 sound/ppc/snd_ps3_reg.h (PS3_AUDIO_AO_SPD_REGBASE(n) + 0x00) n 57 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_SPDUB(n, x) \ n 58 sound/ppc/snd_ps3_reg.h (PS3_AUDIO_AO_SPD_REGBASE(n) + 0x04 + 0x04 * (x)) n 59 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_SPDCS(n, y) \ n 60 sound/ppc/snd_ps3_reg.h (PS3_AUDIO_AO_SPD_REGBASE(n) + 0x34 + 0x04 * (y)) n 77 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_INTR_0_CHAN(n) (1 << ((n) * 2)) n 131 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_MCTRL_ASOMT(n) (1 << (3 - (n))) /* RWIVF */ n 138 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_MCTRL_SPOMT(n) (1 << (5 - (n))) /* RWIVF */ n 167 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_ISBP_SPOBRN_MASK(n) (0x7 << 4 * (1 - (n))) /* R-IUF */ n 175 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_ISBP_SPOBWN_MASK(n) (0x7 << 4 * (5 - (n))) /* R-IUF */ n 195 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_AOBP_ASOBRN_MASK(n) (0x7 << 4 * (3 - (n))) /* R-IUF */ n 206 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_AOBP_ASOBWN_MASK(n) (0x7 << 4 * (7 - (n))) /* R-IUF */ n 273 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_IE_ASOBUIE(n) (1 << (3 - (n))) /* RWIVF */ n 281 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_IE_SPOBUIE(n) (1 << (7 - (n))) /* RWIVF */ n 287 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_IE_SPOBTCIE(n) (1 << (11 - (n))) /* RWIVF */ n 293 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_IE_ASOBEIE(n) (1 << (19 - (n))) /* RWIVF */ n 301 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AX_IE_SPOBEIE(n) (1 << (23 - (n))) /* RWIVF */ n 433 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3WMCTRL_ASORUN(n) (1 << (15 - (n))) /* R-IVF */ n 434 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3WMCTRL_ASORUN_STOPPED(n) (0 << (15 - (n))) /* R-I-V */ n 435 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3WMCTRL_ASORUN_RUNNING(n) (1 << (15 - (n))) /* R---V */ n 495 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3WMCTRL_ASOEN(n) (1 << (31 - (n))) /* RWIVF */ n 496 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3WMCTRL_ASOEN_DISABLED(n) (0 << (31 - (n))) /* RWI-V */ n 497 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3WMCTRL_ASOEN_ENABLED(n) (1 << (31 - (n))) /* RW--V */ n 729 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_KICK_EVENT_AUDIO_DMA(n) \ n 730 sound/ppc/snd_ps3_reg.h ((0x13 + (n)) << 16) /* RW--V */ n 830 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3W_LDATA(n) (0x1000 + (0x100 * (n))) n 831 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_3W_RDATA(n) (0x1080 + (0x100 * (n))) n 833 sound/ppc/snd_ps3_reg.h #define PS3_AUDIO_AO_SPD_DATA(n) (0x2000 + (0x400 * (n))) n 31 sound/soc/cirrus/ep93xx-ac97.c #define AC97CH(n) (((n) - 1) * 0x20) n 33 sound/soc/cirrus/ep93xx-ac97.c #define AC97DR(n) (AC97CH(n) + 0x0000) n 35 sound/soc/cirrus/ep93xx-ac97.c #define AC97RXCR(n) (AC97CH(n) + 0x0004) n 41 sound/soc/cirrus/ep93xx-ac97.c #define AC97TXCR(n) (AC97CH(n) + 0x0008) n 47 sound/soc/cirrus/ep93xx-ac97.c #define AC97SR(n) (AC97CH(n) + 0x000c) n 51 sound/soc/cirrus/ep93xx-ac97.c #define AC97RISR(n) (AC97CH(n) + 0x0010) n 52 sound/soc/cirrus/ep93xx-ac97.c #define AC97ISR(n) (AC97CH(n) + 0x0014) n 53 sound/soc/cirrus/ep93xx-ac97.c #define AC97IE(n) (AC97CH(n) + 0x0018) n 193 sound/soc/codecs/88pm860x-codec.c unsigned int n; n 283 sound/soc/codecs/88pm860x-codec.c if ((st_table[i].m == val[0]) && (st_table[i].n == val[1])) n 285 sound/soc/codecs/88pm860x-codec.c if ((st_table[i].m == val2[0]) && (st_table[i].n == val2[1])) n 312 sound/soc/codecs/88pm860x-codec.c st_table[val].n << 4); n 320 sound/soc/codecs/88pm860x-codec.c st_table[val2].n); n 18 sound/soc/codecs/adau-utils.c unsigned int r, n, m, i, j; n 23 sound/soc/codecs/adau-utils.c n = 0; n 33 sound/soc/codecs/adau-utils.c n = i / j; n 38 sound/soc/codecs/adau-utils.c n = 0; n 42 sound/soc/codecs/adau-utils.c if (n > 0xffff || m > 0xffff || div > 3 || r > 8 || r < 2) n 48 sound/soc/codecs/adau-utils.c regs[2] = n >> 8; n 49 sound/soc/codecs/adau-utils.c regs[3] = n & 0xff; n 118 sound/soc/codecs/alc5623.h #define ALC5623_PLL_CTRL_N_VAL(n) (((n)&0xff) << 8) n 201 sound/soc/codecs/alc5632.h #define ALC5632_PLL1_CTRL_N_VAL(n) (((n) & 0x0f) << 8) n 2095 sound/soc/codecs/arizona.c int n; n 2283 sound/soc/codecs/arizona.c cfg->n = target / (ratio * Fref); n 2289 sound/soc/codecs/arizona.c cfg->theta = (target - (cfg->n * ratio * Fref)) n 2319 sound/soc/codecs/arizona.c cfg->n, cfg->theta, cfg->lambda); n 2360 sound/soc/codecs/arizona.c ARIZONA_FLL1_CTRL_UPD | cfg->n); n 196 sound/soc/codecs/cs42xx8.h #define CS42XX8_DACMUTE_AOUT(n) (0x1 << n) n 69 sound/soc/codecs/cs53l30.h #define CS53L30_ASP_TDMTX_ENn(n) CS53L30_ASP_TDMTX_ENx((n) >> 3) n 322 sound/soc/codecs/madera.c int n, ret; n 324 sound/soc/codecs/madera.c n = device_property_count_u32(dev, propname); n 325 sound/soc/codecs/madera.c if (n < 0) { n 326 sound/soc/codecs/madera.c if (n == -EINVAL) n 329 sound/soc/codecs/madera.c dev_warn(dev, "%s malformed (%d)\n", propname, n); n 331 sound/soc/codecs/madera.c return n; n 332 sound/soc/codecs/madera.c } else if ((n % multiple) != 0) { n 339 sound/soc/codecs/madera.c if (n > n_max) n 340 sound/soc/codecs/madera.c n = n_max; n 342 sound/soc/codecs/madera.c ret = device_property_read_u32_array(dev, propname, dest, n); n 346 sound/soc/codecs/madera.c return n; n 354 sound/soc/codecs/madera.c int n, i, in_idx, ch_idx; n 359 sound/soc/codecs/madera.c n = madera_get_variable_u32_array(madera->dev, "cirrus,inmode", n 362 sound/soc/codecs/madera.c if (n < 0) n 367 sound/soc/codecs/madera.c for (i = 0; i < n; ++i) { n 382 sound/soc/codecs/madera.c int i, n; n 386 sound/soc/codecs/madera.c n = madera_get_variable_u32_array(madera->dev, "cirrus,out-mono", n 388 sound/soc/codecs/madera.c if (n > 0) n 389 sound/soc/codecs/madera.c for (i = 0; i < n; ++i) n 3611 sound/soc/codecs/madera.c cfg->n = fll->fout / (ratio * fref); n 3617 sound/soc/codecs/madera.c cfg->theta = (fll->fout - (cfg->n * ratio * fref)) n 3676 sound/soc/codecs/madera.c cfg->n, cfg->theta, cfg->lambda); n 3735 sound/soc/codecs/madera.c MADERA_FLL1_CTRL_UPD | cfg->n, &change); n 147 sound/soc/codecs/madera.h int n; n 261 sound/soc/codecs/max9860.c unsigned long n; n 398 sound/soc/codecs/max9860.c n = DIV_ROUND_CLOSEST_ULL(65536ULL * 96 * params_rate(params), n 407 sound/soc/codecs/max9860.c n |= 1; /* trigger rapid pll lock mode */ n 418 sound/soc/codecs/max9860.c dev_dbg(component->dev, "N %lu\n", n); n 420 sound/soc/codecs/max9860.c MAX9860_AUDIOCLKHIGH, n >> 8); n 426 sound/soc/codecs/max9860.c MAX9860_AUDIOCLKLOW, n & 0xff); n 258 sound/soc/codecs/max98925.c int rate, int clock, int *value, int *n, int *m) n 266 sound/soc/codecs/max98925.c *n = rate_table[i].divisors[clock][0]; n 352 sound/soc/codecs/max98925.c unsigned int dai_sr = 0, clock, mdll, n, m; n 401 sound/soc/codecs/max98925.c if (max98925_rate_value(component, rate, clock, &dai_sr, &n, &m)) n 415 sound/soc/codecs/max98925.c MAX98925_DAI_CLK_DIV_N_MSBS, n >> 8); n 417 sound/soc/codecs/max98925.c MAX98925_DAI_CLK_DIV_N_LSBS, n & 0xFF); n 30 sound/soc/codecs/pcm186x.h #define PCM186X_PAGE_BASE(n) (PCM186X_PAGE_LEN * n) n 58 sound/soc/codecs/pcm512x.c #define PCM512x_REGULATOR_EVENT(n) \ n 59 sound/soc/codecs/pcm512x.c static int pcm512x_regulator_event_##n(struct notifier_block *nb, \ n 63 sound/soc/codecs/pcm512x.c supply_nb[n]); \ n 17 sound/soc/codecs/pcm512x.h #define PCM512x_PAGE_BASE(n) (PCM512x_VIRT_BASE + (PCM512x_PAGE_LEN * n)) n 97 sound/soc/codecs/rl6231.c int n; n 141 sound/soc/codecs/rl6231.c int k_t, min_k, max_k, n = 0, m = 0, m_t = 0; n 155 sound/soc/codecs/rl6231.c n = pll_preset_table[i].n; n 180 sound/soc/codecs/rl6231.c n = n_t; n 188 sound/soc/codecs/rl6231.c n = n_t; n 200 sound/soc/codecs/rl6231.c n = n_t; n 216 sound/soc/codecs/rl6231.c pll_code->n_code = n; n 564 sound/soc/codecs/rt5631.h #define RT5631_PLL_CTRL_N_VAL(n) (((n)&0xff) << 8) n 1839 sound/soc/codecs/wm2200.c u16 n; n 1912 sound/soc/codecs/wm2200.c fll_div->n = target / (fratio * Fref); n 1920 sound/soc/codecs/wm2200.c fll_div->theta = (target - (fll_div->n * fratio * Fref)) n 1926 sound/soc/codecs/wm2200.c fll_div->n, fll_div->theta, fll_div->lambda); n 1993 sound/soc/codecs/wm2200.c factors.n); n 1659 sound/soc/codecs/wm5100.c u16 n; n 1732 sound/soc/codecs/wm5100.c fll_div->n = target / (fratio * Fref); n 1740 sound/soc/codecs/wm5100.c fll_div->theta = (target - (fll_div->n * fratio * Fref)) n 1746 sound/soc/codecs/wm5100.c fll_div->n, fll_div->theta, fll_div->lambda); n 1816 sound/soc/codecs/wm5100.c snd_soc_component_update_bits(component, base + 5, WM5100_FLL1_N_MASK, factors.n); n 963 sound/soc/codecs/wm8350.c int n; n 999 sound/soc/codecs/wm8350.c fll_div->n = t1 / t2; n 1046 sound/soc/codecs/wm8350.c freq_in, freq_out, fll_div.n, fll_div.k, fll_div.div, n 1056 sound/soc/codecs/wm8350.c n & WM8350_FLL_N_MASK)); n 855 sound/soc/codecs/wm8400.c u16 n; n 907 sound/soc/codecs/wm8400.c factors->n = target / (Fref * factors->fratio); n 927 sound/soc/codecs/wm8400.c factors->n, factors->k, factors->fratio, factors->outdiv); n 977 sound/soc/codecs/wm8400.c snd_soc_component_write(component, WM8400_FLL_CONTROL_3, factors.n); n 267 sound/soc/codecs/wm8510.c unsigned int n:4; n 295 sound/soc/codecs/wm8510.c pll_div.n = Ndiv; n 332 sound/soc/codecs/wm8510.c snd_soc_component_write(component, WM8510_PLLN, (pll_div.pre_div << 4) | pll_div.n); n 377 sound/soc/codecs/wm8580.c u32 n:4; n 443 sound/soc/codecs/wm8580.c pll_div->n = Ndiv; n 454 sound/soc/codecs/wm8580.c pll_div->n, pll_div->k, pll_div->prescale, pll_div->freqmode, n 512 sound/soc/codecs/wm8580.c (pll_div.k >> 18 & 0xf) | (pll_div.n << 4)); n 694 sound/soc/codecs/wm8753.c u32 n:4; n 720 sound/soc/codecs/wm8753.c pll_div->n = Ndiv; n 771 sound/soc/codecs/wm8753.c value = (pll_div.n << 5) + ((pll_div.k & 0x3c0000) >> 18); n 98 sound/soc/codecs/wm8770.c #define WM8770_REGULATOR_EVENT(n) \ n 99 sound/soc/codecs/wm8770.c static int wm8770_regulator_event_##n(struct notifier_block *nb, \ n 103 sound/soc/codecs/wm8770.c disable_nb[n]); \ n 82 sound/soc/codecs/wm8804.c #define WM8804_REGULATOR_EVENT(n) \ n 83 sound/soc/codecs/wm8804.c static int wm8804_regulator_event_##n(struct notifier_block *nb, \ n 87 sound/soc/codecs/wm8804.c disable_nb[n]); \ n 323 sound/soc/codecs/wm8804.c u32 n:4; n 385 sound/soc/codecs/wm8804.c pll_div->n = Ndiv; n 432 sound/soc/codecs/wm8804.c pll_div.n | (pll_div.prescale << 4)); n 677 sound/soc/codecs/wm8900.c u16 n; n 728 sound/soc/codecs/wm8900.c fll_div->n = Ndiv / fll_div->fll_ratio; n 745 sound/soc/codecs/wm8900.c WARN_ON(!K && target != Fref * fll_div->fll_ratio * fll_div->n)) n 787 sound/soc/codecs/wm8900.c snd_soc_component_write(component, WM8900_REG_FLLCTL4, fll_div.n >> 5); n 789 sound/soc/codecs/wm8900.c (fll_div.fllclk_div << 6) | (fll_div.n & 0x1f)); n 1584 sound/soc/codecs/wm8904.c u16 n; n 1663 sound/soc/codecs/wm8904.c fll_div->n = Ndiv; n 1681 sound/soc/codecs/wm8904.c fll_div->n, fll_div->k, n 1800 sound/soc/codecs/wm8904.c fll_div.n << WM8904_FLL_N_SHIFT); n 515 sound/soc/codecs/wm8940.c unsigned int n:4; n 554 sound/soc/codecs/wm8940.c pll_div.n = Ndiv; n 596 sound/soc/codecs/wm8940.c (pll_div.pre_scale << 4) | pll_div.n | (1 << 6)); n 599 sound/soc/codecs/wm8940.c (pll_div.pre_scale << 4) | pll_div.n); n 134 sound/soc/codecs/wm8955.c int n; n 170 sound/soc/codecs/wm8955.c pll->n = Ndiv; n 187 sound/soc/codecs/wm8955.c dev_dbg(dev, "N=%x K=%x OUTDIV=%x\n", pll->n, pll->k, pll->outdiv); n 293 sound/soc/codecs/wm8955.c (pll.n << WM8955_N_SHIFT) | n 1107 sound/soc/codecs/wm8960.c u32 n:4; n 1161 sound/soc/codecs/wm8960.c pll_div->n = Ndiv; n 1179 sound/soc/codecs/wm8960.c pll_div->n, pll_div->k, pll_div->pre_div); n 1207 sound/soc/codecs/wm8960.c reg |= pll_div.n; n 91 sound/soc/codecs/wm8962.c #define WM8962_REGULATOR_EVENT(n) \ n 92 sound/soc/codecs/wm8962.c static int wm8962_regulator_event_##n(struct notifier_block *nb, \ n 96 sound/soc/codecs/wm8962.c disable_nb[n]); \ n 2710 sound/soc/codecs/wm8962.c u16 n; n 2787 sound/soc/codecs/wm8962.c fll_div->n = target / (fratio * Fref); n 2795 sound/soc/codecs/wm8962.c fll_div->theta = (target - (fll_div->n * fratio * Fref)) n 2801 sound/soc/codecs/wm8962.c fll_div->n, fll_div->theta, fll_div->lambda); n 2878 sound/soc/codecs/wm8962.c snd_soc_component_write(component, WM8962_FLL_CONTROL_8, fll_div.n); n 272 sound/soc/codecs/wm8974.c unsigned int n:4; n 302 sound/soc/codecs/wm8974.c pll_div->n = Ndiv; n 340 sound/soc/codecs/wm8974.c snd_soc_component_write(component, WM8974_PLLN, (pll_div.pre_div << 4) | pll_div.n); n 402 sound/soc/codecs/wm8978.c u8 n; n 428 sound/soc/codecs/wm8978.c pll_div->n = n_div; n 539 sound/soc/codecs/wm8978.c __func__, pll_div.n, pll_div.k, pll_div.div2); n 544 sound/soc/codecs/wm8978.c snd_soc_component_write(component, WM8978_PLL_N, (pll_div.div2 << 4) | pll_div.n); n 745 sound/soc/codecs/wm8983.c u32 n:4; n 769 sound/soc/codecs/wm8983.c pll_div->n = Ndiv; n 810 sound/soc/codecs/wm8983.c | pll_div.n); n 842 sound/soc/codecs/wm8985.c u32 n:4; n 866 sound/soc/codecs/wm8985.c pll_div->n = Ndiv; n 903 sound/soc/codecs/wm8985.c | pll_div.n); n 885 sound/soc/codecs/wm8990.c u32 n; n 912 sound/soc/codecs/wm8990.c pll_div->n = Ndiv; n 948 sound/soc/codecs/wm8990.c snd_soc_component_write(component, WM8990_PLL1, pll_div.n | WM8990_SDM | n 877 sound/soc/codecs/wm8991.c u32 n; n 904 sound/soc/codecs/wm8991.c pll_div->n = Ndiv; n 942 sound/soc/codecs/wm8991.c snd_soc_component_write(component, WM8991_PLL1, pll_div.n | WM8991_SDM | n 361 sound/soc/codecs/wm8993.c u16 n; n 441 sound/soc/codecs/wm8993.c fll_div->n = Ndiv; n 459 sound/soc/codecs/wm8993.c fll_div->n, fll_div->k, n 537 sound/soc/codecs/wm8993.c reg4 |= fll_div.n << WM8993_FLL_N_SHIFT; n 2044 sound/soc/codecs/wm8994.c u16 n; n 2100 sound/soc/codecs/wm8994.c fll->n = Ndiv; n 2120 sound/soc/codecs/wm8994.c pr_debug("N=%x K=%x\n", fll->n, fll->k); n 2126 sound/soc/codecs/wm8994.c fll->k = (freq_out - (freq_in * fll->n)) / gcd_fll; n 2238 sound/soc/codecs/wm8994.c fll.n << WM8994_FLL1_N_SHIFT); n 393 sound/soc/codecs/wm8995.c #define WM8995_REGULATOR_EVENT(n) \ n 394 sound/soc/codecs/wm8995.c static int wm8995_regulator_event_##n(struct notifier_block *nb, \ n 398 sound/soc/codecs/wm8995.c disable_nb[n]); \ n 1717 sound/soc/codecs/wm8995.c u16 n; n 1772 sound/soc/codecs/wm8995.c fll->n = Ndiv; n 1789 sound/soc/codecs/wm8995.c pr_debug("N=%x K=%x\n", fll->n, fll->k); n 1878 sound/soc/codecs/wm8995.c fll.n << WM8995_FLL1_N_SHIFT); n 101 sound/soc/codecs/wm8996.c #define WM8996_REGULATOR_EVENT(n) \ n 102 sound/soc/codecs/wm8996.c static int wm8996_regulator_event_##n(struct notifier_block *nb, \ n 106 sound/soc/codecs/wm8996.c disable_nb[n]); \ n 1896 sound/soc/codecs/wm8996.c u16 n; n 1979 sound/soc/codecs/wm8996.c fll_div->n = target / (fratio * Fref); n 1987 sound/soc/codecs/wm8996.c fll_div->theta = (target - (fll_div->n * fratio * Fref)) n 1993 sound/soc/codecs/wm8996.c fll_div->n, fll_div->theta, fll_div->lambda); n 2075 sound/soc/codecs/wm8996.c (fll_div.n << WM8996_FLL_N_SHIFT) | n 441 sound/soc/codecs/wm9081.c u16 n; n 520 sound/soc/codecs/wm9081.c fll_div->n = Ndiv; n 538 sound/soc/codecs/wm9081.c fll_div->n, fll_div->k, n 610 sound/soc/codecs/wm9081.c reg4 |= fll_div.n << WM9081_FLL_N_SHIFT; n 744 sound/soc/codecs/wm9713.c u32 n:4; n 792 sound/soc/codecs/wm9713.c pll_div->n = Ndiv; n 833 sound/soc/codecs/wm9713.c reg = (pll_div.n << 12) | (pll_div.lf << 11) | n 838 sound/soc/codecs/wm9713.c reg2 = (pll_div.n << 12) | (pll_div.lf << 11) | (1 << 10) | n 427 sound/soc/generic/simple-card.c int i, n, len; n 432 sound/soc/generic/simple-card.c n = len / sizeof(__be32); n 433 sound/soc/generic/simple-card.c if (n <= 0) n 437 sound/soc/generic/simple-card.c n, sizeof(*card->aux_dev), GFP_KERNEL); n 441 sound/soc/generic/simple-card.c for (i = 0; i < n; i++) { n 448 sound/soc/generic/simple-card.c card->num_aux_devs = n; n 95 sound/soc/hisilicon/hi6210-i2s.c int ret, n; n 103 sound/soc/hisilicon/hi6210-i2s.c for (n = 0; n < i2s->clocks; n++) { n 104 sound/soc/hisilicon/hi6210-i2s.c ret = clk_prepare_enable(i2s->clk[n]); n 106 sound/soc/hisilicon/hi6210-i2s.c while (n--) n 107 sound/soc/hisilicon/hi6210-i2s.c clk_disable_unprepare(i2s->clk[n]); n 174 sound/soc/hisilicon/hi6210-i2s.c int n; n 176 sound/soc/hisilicon/hi6210-i2s.c for (n = 0; n < i2s->clocks; n++) n 177 sound/soc/hisilicon/hi6210-i2s.c clk_disable_unprepare(i2s->clk[n]); n 47 sound/soc/intel/common/sst-firmware.c int i, m, n; n 51 sound/soc/intel/common/sst-firmware.c n = bytes % 4; n 56 sound/soc/intel/common/sst-firmware.c if (n) { n 57 sound/soc/intel/common/sst-firmware.c for (i = 0; i < n; i++) n 153 sound/soc/rockchip/rockchip_pdm.c unsigned long m, n; n 173 sound/soc/rockchip/rockchip_pdm.c &m, &n); n 176 sound/soc/rockchip/rockchip_pdm.c (n << PDM_FD_DENOMINATOR_SFT); n 186 sound/soc/rockchip/rockchip_pdm.c clk_div = n / m; n 91 sound/soc/samsung/arndale_rt5631.c int n, ret; n 97 sound/soc/samsung/arndale_rt5631.c for (n = 0; np && n < ARRAY_SIZE(arndale_rt5631_dai); n++) { n 98 sound/soc/samsung/arndale_rt5631.c if (!arndale_rt5631_dai[n].cpus->dai_name) { n 99 sound/soc/samsung/arndale_rt5631.c arndale_rt5631_dai[n].cpus->of_node = of_parse_phandle(np, n 100 sound/soc/samsung/arndale_rt5631.c "samsung,audio-cpu", n); n 102 sound/soc/samsung/arndale_rt5631.c if (!arndale_rt5631_dai[n].cpus->of_node) { n 108 sound/soc/samsung/arndale_rt5631.c if (!arndale_rt5631_dai[n].platforms->name) n 109 sound/soc/samsung/arndale_rt5631.c arndale_rt5631_dai[n].platforms->of_node = n 110 sound/soc/samsung/arndale_rt5631.c arndale_rt5631_dai[n].cpus->of_node; n 112 sound/soc/samsung/arndale_rt5631.c arndale_rt5631_dai[n].codecs->name = NULL; n 113 sound/soc/samsung/arndale_rt5631.c arndale_rt5631_dai[n].codecs->of_node = of_parse_phandle(np, n 114 sound/soc/samsung/arndale_rt5631.c "samsung,audio-codec", n); n 43 sound/soc/sh/rcar/gen.c #define RSND_REG_SET(id, offset, _id_offset, n) \ n 48 sound/soc/sh/rcar/gen.c .reg_name = n, \ n 243 sound/soc/sh/rcar/src.c u64 n; n 246 sound/soc/sh/rcar/src.c n = (u64)0x0400000 * fin; n 247 sound/soc/sh/rcar/src.c do_div(n, fout); n 248 sound/soc/sh/rcar/src.c fsrate = n; n 482 sound/soc/soc-dapm.c unsigned int n; n 485 sound/soc/soc-dapm.c n = data->wlist->num_widgets + 1; n 487 sound/soc/soc-dapm.c n = 1; n 490 sound/soc/soc-dapm.c struct_size(new_wlist, widgets, n), n 495 sound/soc/soc-dapm.c new_wlist->widgets[n - 1] = widget; n 496 sound/soc/soc-dapm.c new_wlist->num_widgets = n; n 1634 sound/soc/soc-dapm.c struct snd_soc_dapm_widget *w, *n; n 1649 sound/soc/soc-dapm.c list_for_each_entry_safe(w, n, list, power_list) { n 1679 sound/soc/soc-dapm.c list_for_each_entry_safe_continue(w, n, list, n 1692 sound/soc/soc-dapm.c list_for_each_entry_safe_continue(w, n, list, n 81 sound/soc/sof/utils.c int m, n; n 84 sound/soc/sof/utils.c n = size % 4; n 89 sound/soc/sof/utils.c if (n) { n 90 sound/soc/sof/utils.c affected_mask = (1 << (8 * n)) - 1; n 64 sound/soc/sti/uniperif.h #define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n)) n 66 sound/soc/sti/uniperif.h readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n)) n 67 sound/soc/sti/uniperif.h #define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \ n 69 sound/soc/sti/uniperif.h UNIPERIF_CHANNEL_STA_REGN(ip, n)) n 1052 sound/soc/sti/uniperif.h #define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n)) n 1054 sound/soc/sti/uniperif.h readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n)) n 1055 sound/soc/sti/uniperif.h #define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \ n 1057 sound/soc/sti/uniperif.h UNIPERIF_CHANNEL_STA_REGN(ip, n)) n 221 sound/soc/sti/uniperif_player.c int n; n 294 sound/soc/sti/uniperif_player.c for (n = 0; n < 6; ++n) { n 296 sound/soc/sti/uniperif_player.c player->stream_settings.iec958.status[0 + (n * 4)] & 0xf; n 298 sound/soc/sti/uniperif_player.c player->stream_settings.iec958.status[1 + (n * 4)] << 8; n 300 sound/soc/sti/uniperif_player.c player->stream_settings.iec958.status[2 + (n * 4)] << 16; n 302 sound/soc/sti/uniperif_player.c player->stream_settings.iec958.status[3 + (n * 4)] << 24; n 303 sound/soc/sti/uniperif_player.c SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status); n 156 sound/soc/stm/stm32_adfsdm.c static void stm32_memcpy_32to16(void *dest, const void *src, size_t n) n 162 sound/soc/stm/stm32_adfsdm.c for (i = n >> 1; i > 0; i--) { n 81 sound/soc/ti/davinci-mcasp.h #define DAVINCI_MCASP_XRSRCTL_REG(n) (DAVINCI_MCASP_XRSRCTL_BASE_REG + \ n 82 sound/soc/ti/davinci-mcasp.h (n << 2)) n 85 sound/soc/ti/davinci-mcasp.h #define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2)) n 87 sound/soc/ti/davinci-mcasp.h #define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2)) n 112 sound/soc/ti/davinci-mcasp.h #define PIN_BIT_AXR(n) (n) n 226 sound/soc/ti/davinci-mcasp.h #define TXTDMS(n) (1<<n) n 231 sound/soc/ti/davinci-mcasp.h #define RXTDMS(n) (1<<n) n 31 sound/soc/txx9/txx9aclc.h #define ACINT_CODECRDY(n) (0x00000001 << (n)) /* CODECn ready */ n 17 sound/soc/uniphier/aio-reg.h #define A2CHNMAPCTR0(n) (0x00000 + 0x40 * (n)) n 18 sound/soc/uniphier/aio-reg.h #define A2RBNMAPCTR0(n) (0x01000 + 0x40 * (n)) n 19 sound/soc/uniphier/aio-reg.h #define A2IPORTNMAPCTR0(n) (0x02000 + 0x40 * (n)) n 20 sound/soc/uniphier/aio-reg.h #define A2IPORTNMAPCTR1(n) (0x02004 + 0x40 * (n)) n 21 sound/soc/uniphier/aio-reg.h #define A2IIFNMAPCTR0(n) (0x03000 + 0x40 * (n)) n 22 sound/soc/uniphier/aio-reg.h #define A2OPORTNMAPCTR0(n) (0x04000 + 0x40 * (n)) n 23 sound/soc/uniphier/aio-reg.h #define A2OPORTNMAPCTR1(n) (0x04004 + 0x40 * (n)) n 24 sound/soc/uniphier/aio-reg.h #define A2OPORTNMAPCTR2(n) (0x04008 + 0x40 * (n)) n 25 sound/soc/uniphier/aio-reg.h #define A2OIFNMAPCTR0(n) (0x05000 + 0x40 * (n)) n 26 sound/soc/uniphier/aio-reg.h #define A2ATNMAPCTR0(n) (0x06000 + 0x40 * (n)) n 66 sound/soc/uniphier/aio-reg.h #define IPORTMXCTR1(n) (0x22000 + 0x400 * (n)) n 97 sound/soc/uniphier/aio-reg.h #define IPORTMXCTR2(n) (0x22004 + 0x400 * (n)) n 120 sound/soc/uniphier/aio-reg.h #define IPORTMXCNTCTR(n) (0x22010 + 0x400 * (n)) n 121 sound/soc/uniphier/aio-reg.h #define IPORTMXCOUNTER(n) (0x22014 + 0x400 * (n)) n 122 sound/soc/uniphier/aio-reg.h #define IPORTMXCNTMONI(n) (0x22018 + 0x400 * (n)) n 123 sound/soc/uniphier/aio-reg.h #define IPORTMXACLKSEL0EX(n) (0x22020 + 0x400 * (n)) n 127 sound/soc/uniphier/aio-reg.h #define IPORTMXEXNOE(n) (0x22070 + 0x400 * (n)) n 131 sound/soc/uniphier/aio-reg.h #define IPORTMXMASK(n) (0x22078 + 0x400 * (n)) n 138 sound/soc/uniphier/aio-reg.h #define IPORTMXRSTCTR(n) (0x2207c + 0x400 * (n)) n 144 sound/soc/uniphier/aio-reg.h #define PBINMXCTR(n) (0x20200 + 0x40 * (n)) n 168 sound/soc/uniphier/aio-reg.h #define PBINMXPAUSECTR0(n) (0x20204 + 0x40 * (n)) n 169 sound/soc/uniphier/aio-reg.h #define PBINMXPAUSECTR1(n) (0x20208 + 0x40 * (n)) n 187 sound/soc/uniphier/aio-reg.h #define OPORTMXCTR1(n) (0x42000 + 0x400 * (n)) n 211 sound/soc/uniphier/aio-reg.h #define OPORTMXCTR2(n) (0x42004 + 0x400 * (n)) n 231 sound/soc/uniphier/aio-reg.h #define OPORTMXCTR3(n) (0x42008 + 0x400 * (n)) n 248 sound/soc/uniphier/aio-reg.h #define OPORTMXSRC1CTR(n) (0x4200c + 0x400 * (n)) n 276 sound/soc/uniphier/aio-reg.h #define OPORTMXDSDMUTEDAT(n) (0x42020 + 0x400 * (n)) n 277 sound/soc/uniphier/aio-reg.h #define OPORTMXDXDFREQMODE(n) (0x42024 + 0x400 * (n)) n 278 sound/soc/uniphier/aio-reg.h #define OPORTMXDSDSEL(n) (0x42028 + 0x400 * (n)) n 279 sound/soc/uniphier/aio-reg.h #define OPORTMXDSDPORT(n) (0x4202c + 0x400 * (n)) n 280 sound/soc/uniphier/aio-reg.h #define OPORTMXACLKSEL0EX(n) (0x42030 + 0x400 * (n)) n 281 sound/soc/uniphier/aio-reg.h #define OPORTMXPATH(n) (0x42040 + 0x400 * (n)) n 282 sound/soc/uniphier/aio-reg.h #define OPORTMXSYNC(n) (0x42044 + 0x400 * (n)) n 283 sound/soc/uniphier/aio-reg.h #define OPORTMXREPET(n) (0x42050 + 0x400 * (n)) n 298 sound/soc/uniphier/aio-reg.h #define OPORTMXPAUDAT(n) (0x42054 + 0x400 * (n)) n 307 sound/soc/uniphier/aio-reg.h #define OPORTMXRATE_I(n) (0x420e4 + 0x400 * (n)) n 351 sound/soc/uniphier/aio-reg.h #define OPORTMXEXNOE(n) (0x420f0 + 0x400 * (n)) n 352 sound/soc/uniphier/aio-reg.h #define OPORTMXMASK(n) (0x420f8 + 0x400 * (n)) n 365 sound/soc/uniphier/aio-reg.h #define OPORTMXDEBUG(n) (0x420fc + 0x400 * (n)) n 366 sound/soc/uniphier/aio-reg.h #define OPORTMXTYVOLPARA1(n, m) (0x42100 + 0x400 * (n) + 0x20 * (m)) n 368 sound/soc/uniphier/aio-reg.h #define OPORTMXTYVOLPARA2(n, m) (0x42104 + 0x400 * (n) + 0x20 * (m)) n 374 sound/soc/uniphier/aio-reg.h #define OPORTMXTYVOLGAINSTATUS(n, m) (0x42108 + 0x400 * (n) + 0x20 * (m)) n 376 sound/soc/uniphier/aio-reg.h #define OPORTMXTYSLOTCTR(n, m) (0x42114 + 0x400 * (n) + 0x20 * (m)) n 387 sound/soc/uniphier/aio-reg.h #define OPORTMXTYRSTCTR(n, m) (0x4211c + 0x400 * (n) + 0x20 * (m)) n 395 sound/soc/uniphier/aio-reg.h #define PBOUTMXCTR0(n) (0x40200 + 0x40 * (n)) n 409 sound/soc/uniphier/aio-reg.h #define PBOUTMXCTR1(n) (0x40204 + 0x40 * (n)) n 410 sound/soc/uniphier/aio-reg.h #define PBOUTMXINTCTR(n) (0x40208 + 0x40 * (n)) n 424 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXCTRL1(n) (0x12000 + 0x80 * (n)) n 428 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXCTRL2(n) (0x12004 + 0x80 * (n)) n 429 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXSRCAMODE(n) (0x12020 + 0x80 * (n)) n 430 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXDSTAMODE(n) (0x12024 + 0x80 * (n)) n 443 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXSRCSTRTADRS(n) (0x12030 + 0x80 * (n)) n 444 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXSRCSTRTADRSU(n) (0x12034 + 0x80 * (n)) n 445 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXDSTSTRTADRS(n) (0x12038 + 0x80 * (n)) n 446 sound/soc/uniphier/aio-reg.h #define CDA2D_CHMXDSTSTRTADRSU(n) (0x1203c + 0x80 * (n)) n 458 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXBGNADRS(n) (0x14000 + 0x80 * (n)) n 459 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXBGNADRSU(n) (0x14004 + 0x80 * (n)) n 460 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXENDADRS(n) (0x14008 + 0x80 * (n)) n 461 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXENDADRSU(n) (0x1400c + 0x80 * (n)) n 462 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXBTH(n) (0x14038 + 0x80 * (n)) n 463 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXRTH(n) (0x1403c + 0x80 * (n)) n 464 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXRDPTR(n) (0x14020 + 0x80 * (n)) n 465 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXRDPTRU(n) (0x14024 + 0x80 * (n)) n 466 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXWRPTR(n) (0x14028 + 0x80 * (n)) n 467 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXWRPTRU(n) (0x1402c + 0x80 * (n)) n 469 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXCNFG(n) (0x14030 + 0x80 * (n)) n 470 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXIR(n) (0x14014 + 0x80 * (n)) n 471 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXIE(n) (0x14018 + 0x80 * (n)) n 472 sound/soc/uniphier/aio-reg.h #define CDA2D_RBMXID(n) (0x1401c + 0x80 * (n)) n 19 sound/soc/uniphier/evea.c #define AADCPOW(n) (0x0078 + 0x04 * (n)) n 35 sound/soc/uniphier/evea.c #define ADACSEQ1(n) (0x0144 + 0x40 * (n)) n 37 sound/soc/uniphier/evea.c #define ADACSEQ2(n) (0x0160 + 0x40 * (n)) n 154 sound/soc/ux500/ux500_msp_i2s.h #define MSP_RX_CLKPOL_BIT(n) ((n & RCKPOL_MASK) << RCKPOL_SHIFT) n 155 sound/soc/ux500/ux500_msp_i2s.h #define MSP_TX_CLKPOL_BIT(n) ((n & TCKPOL_MASK) << TCKPOL_SHIFT) n 183 sound/soc/ux500/ux500_msp_i2s.h #define MSP_P1_ELEM_LEN_BITS(n) (n & P1ELEN_MASK) n 184 sound/soc/ux500/ux500_msp_i2s.h #define MSP_P2_ELEM_LEN_BITS(n) (((n) << P2ELEN_SHIFT) & P2ELEN_MASK) n 185 sound/soc/ux500/ux500_msp_i2s.h #define MSP_P1_FRAME_LEN_BITS(n) (((n) << P1FLEN_SHIFT) & P1FLEN_MASK) n 186 sound/soc/ux500/ux500_msp_i2s.h #define MSP_P2_FRAME_LEN_BITS(n) (((n) << P2FLEN_SHIFT) & P2FLEN_MASK) n 187 sound/soc/ux500/ux500_msp_i2s.h #define MSP_DATA_DELAY_BITS(n) (((n) << DDLY_SHIFT) & DDLY_MASK) n 188 sound/soc/ux500/ux500_msp_i2s.h #define MSP_DATA_TYPE_BITS(n) (((n) << DTYP_SHIFT) & DTYP_MASK) n 189 sound/soc/ux500/ux500_msp_i2s.h #define MSP_P2_START_MODE_BIT(n) ((n << P2SM_SHIFT) & P2SM_MASK) n 190 sound/soc/ux500/ux500_msp_i2s.h #define MSP_P2_ENABLE_BIT(n) ((n << P2EN_SHIFT) & P2EN_MASK) n 191 sound/soc/ux500/ux500_msp_i2s.h #define MSP_SET_ENDIANNES_BIT(n) ((n << ENDN_SHIFT) & ENDN_MASK) n 192 sound/soc/ux500/ux500_msp_i2s.h #define MSP_FSYNC_POL(n) ((n << TFSPOL_SHIFT) & TFSPOL_MASK) n 193 sound/soc/ux500/ux500_msp_i2s.h #define MSP_DATA_WORD_SWAP(n) ((n << TBSWAP_SHIFT) & TBSWAP_MASK) n 194 sound/soc/ux500/ux500_msp_i2s.h #define MSP_SET_COMPANDING_MODE(n) ((n << DTYP_SHIFT) & \ n 196 sound/soc/ux500/ux500_msp_i2s.h #define MSP_SET_FSYNC_IGNORE(n) ((n << FSYNC_SHIFT) & FSYNC_MASK) n 226 sound/soc/ux500/ux500_msp_i2s.h #define FRAME_WIDTH_BITS(n) (((n) << FRWID_SHIFT) & 0x0000FC00) n 227 sound/soc/ux500/ux500_msp_i2s.h #define FRAME_PERIOD_BITS(n) (((n) << FRPER_SHIFT) & 0x1FFF0000) n 164 sound/sound_core.c int n=low; n 168 sound/sound_core.c while (*list && (*list)->unit_minor<n) n 171 sound/sound_core.c while(n<top) n 174 sound/sound_core.c if(*list==NULL || (*list)->unit_minor>n) n 177 sound/sound_core.c n+=SOUND_STEP; n 180 sound/sound_core.c if(n>=top) n 183 sound/sound_core.c n = low+(index*16); n 185 sound/sound_core.c if ((*list)->unit_minor==n) n 187 sound/sound_core.c if ((*list)->unit_minor>n) n 197 sound/sound_core.c s->unit_minor=n; n 208 sound/sound_core.c return n; n 762 sound/sparc/dbri.c int n; n 769 sound/sparc/dbri.c for (n = 0; n < DBRI_NO_PIPES; n++) n 770 sound/sparc/dbri.c dbri->pipes[n].desc = dbri->pipes[n].first_desc = -1; n 454 sound/usb/caiaq/audio.c int c, n, sz = 0; n 465 sound/usb/caiaq/audio.c for (n = 0; n < BYTES_PER_SAMPLE; n++) { n 466 sound/usb/caiaq/audio.c audio_buf[cdev->audio_in_buf_pos[stream]++] = usb_buf[i+n]; n 571 sound/usb/caiaq/audio.c int c, n, sz = 0; n 580 sound/usb/caiaq/audio.c for (n = 0; n < BYTES_PER_SAMPLE; n++) { n 582 sound/usb/caiaq/audio.c usb_buf[i+n] = audio_buf[cdev->audio_out_buf_pos[stream]++]; n 587 sound/usb/caiaq/audio.c usb_buf[i+n] = 0; n 339 sound/usb/card.c struct snd_usb_endpoint *ep, *n; n 341 sound/usb/card.c list_for_each_entry_safe(ep, n, &chip->ep_list, list) n 247 sound/usb/hiface/pcm.c static void memcpy_swahw32(u8 *dest, u8 *src, unsigned int n) n 251 sound/usb/hiface/pcm.c for (i = 0; i < n / 4; i++) n 175 sound/usb/line6/playback.c int n; n 178 sound/usb/line6/playback.c n = line6pcm->out.count / frame_factor; n 179 sound/usb/line6/playback.c line6pcm->out.count -= n * frame_factor; n 180 sound/usb/line6/playback.c fsize = n; n 413 sound/usb/line6/pod.c #define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n) n 313 sound/usb/line6/podhd.c #define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n) n 473 sound/usb/line6/toneport.c #define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n) n 187 sound/usb/line6/variax.c #define LINE6_IF_NUM(prod, n) USB_DEVICE_INTERFACE_NUMBER(0x0e41, prod, n) n 44 sound/usb/stream.c struct audioformat *fp, *n; n 48 sound/usb/stream.c list_for_each_entry_safe(fp, n, &subs->fmt_list, list) n 205 sound/usb/usx2y/usbusx2y.c int n = us428ctls->CtlSnapShotLast + 1; n 206 sound/usb/usx2y/usbusx2y.c if (n >= N_us428_ctl_BUFS || n < 0) n 207 sound/usb/usx2y/usbusx2y.c n = 0; n 208 sound/usb/usx2y/usbusx2y.c memcpy(us428ctls->CtlSnapShot + n, usX2Y->In04Buf, sizeof(us428ctls->CtlSnapShot[0])); n 209 sound/usb/usx2y/usbusx2y.c us428ctls->CtlSnapShotDiffersAt[n] = diff; n 210 sound/usb/usx2y/usbusx2y.c us428ctls->CtlSnapShotLast = n; n 68 tools/accounting/getdelays.c struct nlmsghdr n; n 130 tools/accounting/getdelays.c msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN); n 131 tools/accounting/getdelays.c msg.n.nlmsg_type = nlmsg_type; n 132 tools/accounting/getdelays.c msg.n.nlmsg_flags = NLM_F_REQUEST; n 133 tools/accounting/getdelays.c msg.n.nlmsg_seq = 0; n 134 tools/accounting/getdelays.c msg.n.nlmsg_pid = nlmsg_pid; n 141 tools/accounting/getdelays.c msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len); n 144 tools/accounting/getdelays.c buflen = msg.n.nlmsg_len ; n 166 tools/accounting/getdelays.c struct nlmsghdr n; n 183 tools/accounting/getdelays.c if (ans.n.nlmsg_type == NLMSG_ERROR || n 184 tools/accounting/getdelays.c (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len)) n 460 tools/accounting/getdelays.c if (msg.n.nlmsg_type == NLMSG_ERROR || n 461 tools/accounting/getdelays.c !NLMSG_OK((&msg.n), rep_len)) { n 469 tools/accounting/getdelays.c sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len); n 472 tools/accounting/getdelays.c rep_len = GENLMSG_PAYLOAD(&msg.n); n 162 tools/arch/arm/include/uapi/asm/kvm.h #define ARM_CP15_REG_SHIFT_MASK(x,n) \ n 163 tools/arch/arm/include/uapi/asm/kvm.h (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) n 301 tools/arch/arm/include/uapi/asm/kvm.h #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) n 203 tools/arch/arm64/include/uapi/asm/kvm.h #define ARM64_SYS_REG_SHIFT_MASK(x,n) \ n 204 tools/arch/arm64/include/uapi/asm/kvm.h (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \ n 205 tools/arch/arm64/include/uapi/asm/kvm.h KVM_REG_ARM64_SYSREG_ ## n ## _MASK) n 256 tools/arch/arm64/include/uapi/asm/kvm.h #define KVM_REG_ARM64_SVE_ZREG(n, i) \ n 259 tools/arch/arm64/include/uapi/asm/kvm.h (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \ n 262 tools/arch/arm64/include/uapi/asm/kvm.h #define KVM_REG_ARM64_SVE_PREG(n, i) \ n 265 tools/arch/arm64/include/uapi/asm/kvm.h (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \ n 360 tools/arch/arm64/include/uapi/asm/kvm.h #define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) n 166 tools/arch/mips/include/uapi/asm/kvm.h #define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n)) n 167 tools/arch/mips/include/uapi/asm/kvm.h #define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n)) n 168 tools/arch/mips/include/uapi/asm/kvm.h #define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n)) n 531 tools/arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n)) n 536 tools/arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n)) n 542 tools/arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n)) n 649 tools/arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n)) n 653 tools/arch/powerpc/include/uapi/asm/kvm.h #define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n)) n 30 tools/arch/x86/lib/inat.c int n; n 32 tools/arch/x86/lib/inat.c n = inat_escape_id(esc_attr); n 34 tools/arch/x86/lib/inat.c table = inat_escape_tables[n][0]; n 38 tools/arch/x86/lib/inat.c table = inat_escape_tables[n][lpfx_id]; n 49 tools/arch/x86/lib/inat.c int n; n 51 tools/arch/x86/lib/inat.c n = inat_group_id(grp_attr); n 53 tools/arch/x86/lib/inat.c table = inat_group_tables[n][0]; n 57 tools/arch/x86/lib/inat.c table = inat_group_tables[n][lpfx_id]; n 17 tools/arch/x86/lib/insn.c #define validate_next(t, insn, n) \ n 18 tools/arch/x86/lib/insn.c ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) n 23 tools/arch/x86/lib/insn.c #define __peek_nbyte_next(t, insn, n) \ n 24 tools/arch/x86/lib/insn.c ({ t r = *(t*)((insn)->next_byte + n); r; }) n 29 tools/arch/x86/lib/insn.c #define peek_nbyte_next(t, insn, n) \ n 30 tools/arch/x86/lib/insn.c ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); }) n 708 tools/bpf/bpftool/btf.c int n; n 713 tools/bpf/bpftool/btf.c n = 0; n 716 tools/bpf/bpftool/btf.c printf("%s%u", n++ == 0 ? " prog_ids " : ",", n 720 tools/bpf/bpftool/btf.c n = 0; n 723 tools/bpf/bpftool/btf.c printf("%s%u", n++ == 0 ? " map_ids " : ",", n 272 tools/bpf/bpftool/common.c ssize_t n; n 276 tools/bpf/bpftool/common.c n = readlink(path, buf, sizeof(buf)); n 277 tools/bpf/bpftool/common.c if (n < 0) { n 281 tools/bpf/bpftool/common.c if (n == sizeof(path)) { n 299 tools/bpf/bpftool/common.c ssize_t n; n 308 tools/bpf/bpftool/common.c while ((n = getline(&line, &line_n, fdi)) > 0) { n 288 tools/bpf/bpftool/feature.c static bool read_next_kernel_config_option(gzFile file, char *buf, size_t n, n 293 tools/bpf/bpftool/feature.c while (gzgets(file, buf, n)) { n 149 tools/bpf/bpftool/main.c void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep) n 154 tools/bpf/bpftool/main.c for (i = 0; i < n; i++) { n 106 tools/bpf/bpftool/main.h void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep); n 170 tools/bpf/bpftool/map.c unsigned int i, n, step; n 174 tools/bpf/bpftool/map.c n = get_possible_cpus(); n 176 tools/bpf/bpftool/map.c for (i = 0; i < n; i++) { n 228 tools/bpf/bpftool/map.c unsigned int i, n, step; n 230 tools/bpf/bpftool/map.c n = get_possible_cpus(); n 238 tools/bpf/bpftool/map.c for (i = 0; i < n; i++) { n 308 tools/bpf/bpftool/map.c unsigned int i, n, step; n 310 tools/bpf/bpftool/map.c n = get_possible_cpus(); n 319 tools/bpf/bpftool/map.c for (i = 0; i < n; i++) { n 331 tools/bpf/bpftool/map.c unsigned int n) n 341 tools/bpf/bpftool/map.c while (i < n && argv[i]) { n 350 tools/bpf/bpftool/map.c if (i != n) { n 351 tools/bpf/bpftool/map.c p_err("%s expected %d bytes got %d", name, n, i); n 361 tools/bpf/bpftool/map.c unsigned int i, n, step; n 366 tools/bpf/bpftool/map.c n = get_possible_cpus(); n 368 tools/bpf/bpftool/map.c for (i = 1; i < n; i++) n 429 tools/bpf/bpftool/prog.c ssize_t n; n 532 tools/bpf/bpftool/prog.c n = write(fd, buf, member_len); n 534 tools/bpf/bpftool/prog.c if (n != member_len) { n 536 tools/bpf/bpftool/prog.c n < 0 ? strerror(errno) : "short write"); n 38 tools/firmware/ihex2fw.c static uint8_t nybble(const uint8_t n) n 40 tools/firmware/ihex2fw.c if (n >= '0' && n <= '9') return n - '0'; n 41 tools/firmware/ihex2fw.c else if (n >= 'A' && n <= 'F') return n - ('A' - 10); n 42 tools/firmware/ihex2fw.c else if (n >= 'a' && n <= 'f') return n - ('a' - 10); n 30 tools/include/linux/jhash.h #define jhash_size(n) ((u32)1<<(n)) n 32 tools/include/linux/jhash.h #define jhash_mask(n) (jhash_size(n)-1) n 16 tools/include/linux/kernel.h #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) n 425 tools/include/linux/list.h #define list_for_each_safe(pos, n, head) \ n 426 tools/include/linux/list.h for (pos = (head)->next, n = pos->next; pos != (head); \ n 427 tools/include/linux/list.h pos = n, n = pos->next) n 435 tools/include/linux/list.h #define list_for_each_prev_safe(pos, n, head) \ n 436 tools/include/linux/list.h for (pos = (head)->prev, n = pos->prev; \ n 438 tools/include/linux/list.h pos = n, n = pos->prev) n 520 tools/include/linux/list.h #define list_for_each_entry_safe(pos, n, head, member) \ n 522 tools/include/linux/list.h n = list_next_entry(pos, member); \ n 524 tools/include/linux/list.h pos = n, n = list_next_entry(n, member)) n 536 tools/include/linux/list.h #define list_for_each_entry_safe_continue(pos, n, head, member) \ n 538 tools/include/linux/list.h n = list_next_entry(pos, member); \ n 540 tools/include/linux/list.h pos = n, n = list_next_entry(n, member)) n 552 tools/include/linux/list.h #define list_for_each_entry_safe_from(pos, n, head, member) \ n 553 tools/include/linux/list.h for (n = list_next_entry(pos, member); \ n 555 tools/include/linux/list.h pos = n, n = list_next_entry(n, member)) n 567 tools/include/linux/list.h #define list_for_each_entry_safe_reverse(pos, n, head, member) \ n 569 tools/include/linux/list.h n = list_prev_entry(pos, member); \ n 571 tools/include/linux/list.h pos = n, n = list_prev_entry(n, member)) n 585 tools/include/linux/list.h #define list_safe_reset_next(pos, n, member) \ n 586 tools/include/linux/list.h n = list_next_entry(pos, member) n 614 tools/include/linux/list.h static inline void __hlist_del(struct hlist_node *n) n 616 tools/include/linux/list.h struct hlist_node *next = n->next; n 617 tools/include/linux/list.h struct hlist_node **pprev = n->pprev; n 624 tools/include/linux/list.h static inline void hlist_del(struct hlist_node *n) n 626 tools/include/linux/list.h __hlist_del(n); n 627 tools/include/linux/list.h n->next = LIST_POISON1; n 628 tools/include/linux/list.h n->pprev = LIST_POISON2; n 631 tools/include/linux/list.h static inline void hlist_del_init(struct hlist_node *n) n 633 tools/include/linux/list.h if (!hlist_unhashed(n)) { n 634 tools/include/linux/list.h __hlist_del(n); n 635 tools/include/linux/list.h INIT_HLIST_NODE(n); n 639 tools/include/linux/list.h static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) n 642 tools/include/linux/list.h n->next = first; n 644 tools/include/linux/list.h first->pprev = &n->next; n 645 tools/include/linux/list.h h->first = n; n 646 tools/include/linux/list.h n->pprev = &h->first; n 650 tools/include/linux/list.h static inline void hlist_add_before(struct hlist_node *n, n 653 tools/include/linux/list.h n->pprev = next->pprev; n 654 tools/include/linux/list.h n->next = next; n 655 tools/include/linux/list.h next->pprev = &n->next; n 656 tools/include/linux/list.h *(n->pprev) = n; n 659 tools/include/linux/list.h static inline void hlist_add_behind(struct hlist_node *n, n 662 tools/include/linux/list.h n->next = prev->next; n 663 tools/include/linux/list.h prev->next = n; n 664 tools/include/linux/list.h n->pprev = &prev->next; n 666 tools/include/linux/list.h if (n->next) n 667 tools/include/linux/list.h n->next->pprev = &n->next; n 671 tools/include/linux/list.h static inline void hlist_add_fake(struct hlist_node *n) n 673 tools/include/linux/list.h n->pprev = &n->next; n 699 tools/include/linux/list.h #define hlist_for_each_safe(pos, n, head) \ n 700 tools/include/linux/list.h for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ n 701 tools/include/linux/list.h pos = n) n 745 tools/include/linux/list.h #define hlist_for_each_entry_safe(pos, n, head, member) \ n 747 tools/include/linux/list.h pos && ({ n = pos->member.next; 1; }); \ n 748 tools/include/linux/list.h pos = hlist_entry_safe(n, typeof(*pos), member)) n 21 tools/include/linux/log2.h int __ilog2_u32(u32 n) n 23 tools/include/linux/log2.h return fls(n) - 1; n 27 tools/include/linux/log2.h int __ilog2_u64(u64 n) n 29 tools/include/linux/log2.h return fls64(n) - 1; n 38 tools/include/linux/log2.h bool is_power_of_2(unsigned long n) n 40 tools/include/linux/log2.h return (n != 0 && ((n & (n - 1)) == 0)); n 47 tools/include/linux/log2.h unsigned long __roundup_pow_of_two(unsigned long n) n 49 tools/include/linux/log2.h return 1UL << fls_long(n - 1); n 56 tools/include/linux/log2.h unsigned long __rounddown_pow_of_two(unsigned long n) n 58 tools/include/linux/log2.h return 1UL << (fls_long(n) - 1); n 71 tools/include/linux/log2.h #define ilog2(n) \ n 73 tools/include/linux/log2.h __builtin_constant_p(n) ? ( \ n 74 tools/include/linux/log2.h (n) < 2 ? 0 : \ n 75 tools/include/linux/log2.h (n) & (1ULL << 63) ? 63 : \ n 76 tools/include/linux/log2.h (n) & (1ULL << 62) ? 62 : \ n 77 tools/include/linux/log2.h (n) & (1ULL << 61) ? 61 : \ n 78 tools/include/linux/log2.h (n) & (1ULL << 60) ? 60 : \ n 79 tools/include/linux/log2.h (n) & (1ULL << 59) ? 59 : \ n 80 tools/include/linux/log2.h (n) & (1ULL << 58) ? 58 : \ n 81 tools/include/linux/log2.h (n) & (1ULL << 57) ? 57 : \ n 82 tools/include/linux/log2.h (n) & (1ULL << 56) ? 56 : \ n 83 tools/include/linux/log2.h (n) & (1ULL << 55) ? 55 : \ n 84 tools/include/linux/log2.h (n) & (1ULL << 54) ? 54 : \ n 85 tools/include/linux/log2.h (n) & (1ULL << 53) ? 53 : \ n 86 tools/include/linux/log2.h (n) & (1ULL << 52) ? 52 : \ n 87 tools/include/linux/log2.h (n) & (1ULL << 51) ? 51 : \ n 88 tools/include/linux/log2.h (n) & (1ULL << 50) ? 50 : \ n 89 tools/include/linux/log2.h (n) & (1ULL << 49) ? 49 : \ n 90 tools/include/linux/log2.h (n) & (1ULL << 48) ? 48 : \ n 91 tools/include/linux/log2.h (n) & (1ULL << 47) ? 47 : \ n 92 tools/include/linux/log2.h (n) & (1ULL << 46) ? 46 : \ n 93 tools/include/linux/log2.h (n) & (1ULL << 45) ? 45 : \ n 94 tools/include/linux/log2.h (n) & (1ULL << 44) ? 44 : \ n 95 tools/include/linux/log2.h (n) & (1ULL << 43) ? 43 : \ n 96 tools/include/linux/log2.h (n) & (1ULL << 42) ? 42 : \ n 97 tools/include/linux/log2.h (n) & (1ULL << 41) ? 41 : \ n 98 tools/include/linux/log2.h (n) & (1ULL << 40) ? 40 : \ n 99 tools/include/linux/log2.h (n) & (1ULL << 39) ? 39 : \ n 100 tools/include/linux/log2.h (n) & (1ULL << 38) ? 38 : \ n 101 tools/include/linux/log2.h (n) & (1ULL << 37) ? 37 : \ n 102 tools/include/linux/log2.h (n) & (1ULL << 36) ? 36 : \ n 103 tools/include/linux/log2.h (n) & (1ULL << 35) ? 35 : \ n 104 tools/include/linux/log2.h (n) & (1ULL << 34) ? 34 : \ n 105 tools/include/linux/log2.h (n) & (1ULL << 33) ? 33 : \ n 106 tools/include/linux/log2.h (n) & (1ULL << 32) ? 32 : \ n 107 tools/include/linux/log2.h (n) & (1ULL << 31) ? 31 : \ n 108 tools/include/linux/log2.h (n) & (1ULL << 30) ? 30 : \ n 109 tools/include/linux/log2.h (n) & (1ULL << 29) ? 29 : \ n 110 tools/include/linux/log2.h (n) & (1ULL << 28) ? 28 : \ n 111 tools/include/linux/log2.h (n) & (1ULL << 27) ? 27 : \ n 112 tools/include/linux/log2.h (n) & (1ULL << 26) ? 26 : \ n 113 tools/include/linux/log2.h (n) & (1ULL << 25) ? 25 : \ n 114 tools/include/linux/log2.h (n) & (1ULL << 24) ? 24 : \ n 115 tools/include/linux/log2.h (n) & (1ULL << 23) ? 23 : \ n 116 tools/include/linux/log2.h (n) & (1ULL << 22) ? 22 : \ n 117 tools/include/linux/log2.h (n) & (1ULL << 21) ? 21 : \ n 118 tools/include/linux/log2.h (n) & (1ULL << 20) ? 20 : \ n 119 tools/include/linux/log2.h (n) & (1ULL << 19) ? 19 : \ n 120 tools/include/linux/log2.h (n) & (1ULL << 18) ? 18 : \ n 121 tools/include/linux/log2.h (n) & (1ULL << 17) ? 17 : \ n 122 tools/include/linux/log2.h (n) & (1ULL << 16) ? 16 : \ n 123 tools/include/linux/log2.h (n) & (1ULL << 15) ? 15 : \ n 124 tools/include/linux/log2.h (n) & (1ULL << 14) ? 14 : \ n 125 tools/include/linux/log2.h (n) & (1ULL << 13) ? 13 : \ n 126 tools/include/linux/log2.h (n) & (1ULL << 12) ? 12 : \ n 127 tools/include/linux/log2.h (n) & (1ULL << 11) ? 11 : \ n 128 tools/include/linux/log2.h (n) & (1ULL << 10) ? 10 : \ n 129 tools/include/linux/log2.h (n) & (1ULL << 9) ? 9 : \ n 130 tools/include/linux/log2.h (n) & (1ULL << 8) ? 8 : \ n 131 tools/include/linux/log2.h (n) & (1ULL << 7) ? 7 : \ n 132 tools/include/linux/log2.h (n) & (1ULL << 6) ? 6 : \ n 133 tools/include/linux/log2.h (n) & (1ULL << 5) ? 5 : \ n 134 tools/include/linux/log2.h (n) & (1ULL << 4) ? 4 : \ n 135 tools/include/linux/log2.h (n) & (1ULL << 3) ? 3 : \ n 136 tools/include/linux/log2.h (n) & (1ULL << 2) ? 2 : \ n 138 tools/include/linux/log2.h (sizeof(n) <= 4) ? \ n 139 tools/include/linux/log2.h __ilog2_u32(n) : \ n 140 tools/include/linux/log2.h __ilog2_u64(n) \ n 151 tools/include/linux/log2.h #define roundup_pow_of_two(n) \ n 153 tools/include/linux/log2.h __builtin_constant_p(n) ? ( \ n 154 tools/include/linux/log2.h (n == 1) ? 1 : \ n 155 tools/include/linux/log2.h (1UL << (ilog2((n) - 1) + 1)) \ n 157 tools/include/linux/log2.h __roundup_pow_of_two(n) \ n 168 tools/include/linux/log2.h #define rounddown_pow_of_two(n) \ n 170 tools/include/linux/log2.h __builtin_constant_p(n) ? ( \ n 171 tools/include/linux/log2.h (1UL << ilog2(n))) : \ n 172 tools/include/linux/log2.h __rounddown_pow_of_two(n) \ n 250 tools/include/linux/overflow.h static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) n 254 tools/include/linux/overflow.h if (check_mul_overflow(n, size, &bytes)) n 273 tools/include/linux/overflow.h #define struct_size(p, member, n) \ n 274 tools/include/linux/overflow.h __ab_c_size(n, \ n 97 tools/include/linux/rbtree.h #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ n 99 tools/include/linux/rbtree.h pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ n 101 tools/include/linux/rbtree.h pos = n) n 103 tools/include/linux/rbtree.h static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) n 105 tools/include/linux/rbtree.h rb_erase(n, root); n 106 tools/include/linux/rbtree.h RB_CLEAR_NODE(n); n 56 tools/include/linux/refcount.h #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } n 58 tools/include/linux/refcount.h static inline void refcount_set(refcount_t *r, unsigned int n) n 60 tools/include/linux/refcount.h atomic_set(&r->refs, n); n 1653 tools/include/nolibc/nolibc.h unsigned long n; n 1656 tools/include/nolibc/nolibc.h } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout }; n 2307 tools/include/nolibc/nolibc.h int memcmp(const void *s1, const void *s2, size_t n) n 2312 tools/include/nolibc/nolibc.h while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) { n 2408 tools/include/nolibc/nolibc.h unsigned long n = neg ? -in : in; n 2412 tools/include/nolibc/nolibc.h *pos-- = '0' + n % 10; n 2413 tools/include/nolibc/nolibc.h n /= 10; n 2416 tools/include/nolibc/nolibc.h } while (n); n 179 tools/include/uapi/linux/if_link.h #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg)) n 1164 tools/include/uapi/linux/kvm.h __u64 n; /* number of regs */ n 364 tools/lib/api/fs/fs.c int fd, n, err = 0; n 383 tools/lib/api/fs/fs.c n = read(fd, bf + size, alloc_size - size); n 384 tools/lib/api/fs/fs.c if (n < 0) { n 395 tools/lib/api/fs/fs.c size += n; n 396 tools/lib/api/fs/fs.c } while (n > 0); n 251 tools/lib/bpf/btf_dump.c int i, j, n = btf__get_nr_types(d->btf); n 255 tools/lib/bpf/btf_dump.c for (i = 1; i <= n; i++) { n 2484 tools/lib/bpf/libbpf.c size_t n = strlen(name); n 2487 tools/lib/bpf/libbpf.c for (i = n - 5; i >= 0; i--) { n 2491 tools/lib/bpf/libbpf.c return n; n 2515 tools/lib/bpf/libbpf.c int i, err, n; n 2530 tools/lib/bpf/libbpf.c n = btf__get_nr_types(targ_btf); n 2531 tools/lib/bpf/libbpf.c for (i = 1; i <= n; i++) { n 2641 tools/lib/bpf/libbpf.c int i, n, found; n 2654 tools/lib/bpf/libbpf.c n = btf_vlen(targ_type); n 2656 tools/lib/bpf/libbpf.c for (i = 0; i < n; i++, m++) { n 4401 tools/lib/bpf/libbpf.c int bpf_program__nth_fd(const struct bpf_program *prog, int n) n 4408 tools/lib/bpf/libbpf.c if (n >= prog->instances.nr || n < 0) { n 4410 tools/lib/bpf/libbpf.c n, prog->section_name, prog->instances.nr); n 4414 tools/lib/bpf/libbpf.c fd = prog->instances.fds[n]; n 4417 tools/lib/bpf/libbpf.c n, prog->section_name); n 5910 tools/lib/bpf/libbpf.c int err = 0, n, len, start, end = -1; n 5922 tools/lib/bpf/libbpf.c n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); n 5923 tools/lib/bpf/libbpf.c if (n <= 0 || n > 2) { n 5924 tools/lib/bpf/libbpf.c pr_warning("Failed to get CPU range %s: %d\n", s, n); n 5927 tools/lib/bpf/libbpf.c } else if (n == 1) { n 5989 tools/lib/bpf/libbpf.c int err, n, i, tmp_cpus; n 5996 tools/lib/bpf/libbpf.c err = parse_cpu_mask_file(fcpu, &mask, &n); n 6001 tools/lib/bpf/libbpf.c for (i = 0; i < n; i++) { n 245 tools/lib/bpf/libbpf.h typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n, n 252 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n); n 25 tools/lib/lockdep/lockdep.c static struct new_utsname n = (struct new_utsname) { n 30 tools/lib/lockdep/lockdep.c return &n; n 464 tools/lib/rbtree.c struct rb_node *n; n 466 tools/lib/rbtree.c n = root->rb_node; n 467 tools/lib/rbtree.c if (!n) n 469 tools/lib/rbtree.c while (n->rb_left) n 470 tools/lib/rbtree.c n = n->rb_left; n 471 tools/lib/rbtree.c return n; n 476 tools/lib/rbtree.c struct rb_node *n; n 478 tools/lib/rbtree.c n = root->rb_node; n 479 tools/lib/rbtree.c if (!n) n 481 tools/lib/rbtree.c while (n->rb_right) n 482 tools/lib/rbtree.c n = n->rb_right; n 483 tools/lib/rbtree.c return n; n 123 tools/lib/subcmd/help.c unsigned int n = j * rows + i; n 126 tools/lib/subcmd/help.c if (n >= cmds->cnt) n 128 tools/lib/subcmd/help.c if (j == cols-1 || n + rows >= cmds->cnt) n 130 tools/lib/subcmd/help.c printf("%-*s", size, cmds->names[n]->name); n 10 tools/lib/subcmd/sigchain.c int n; n 26 tools/lib/subcmd/sigchain.c ALLOC_GROW(s->old, s->n + 1, s->alloc); n 27 tools/lib/subcmd/sigchain.c s->old[s->n] = signal(sig, f); n 28 tools/lib/subcmd/sigchain.c if (s->old[s->n] == SIG_ERR) n 30 tools/lib/subcmd/sigchain.c s->n++; n 38 tools/lib/subcmd/sigchain.c if (s->n < 1) n 41 tools/lib/subcmd/sigchain.c if (signal(sig, s->old[s->n - 1]) == SIG_ERR) n 43 tools/lib/subcmd/sigchain.c s->n--; n 36 tools/lib/symbol/kallsyms.c size_t n; n 51 tools/lib/symbol/kallsyms.c line_len = getline(&line, &n, file); n 5069 tools/lib/traceevent/event-parse.c int n; n 5071 tools/lib/traceevent/event-parse.c n = print_ip_arg(s, ptr, data, size, event, arg); n 5072 tools/lib/traceevent/event-parse.c if (n > 0) { n 5073 tools/lib/traceevent/event-parse.c ptr += n - 1; n 29 tools/lib/traceevent/trace-seq.c #define TRACE_SEQ_CHECK_RET_N(s, n) \ n 33 tools/lib/traceevent/trace-seq.c return n; \ n 54 tools/perf/arch/arm/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 58 tools/perf/arch/arm/util/dwarf-regs.c if (roff->dwarfnum == n) n 75 tools/perf/arch/arm64/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 79 tools/perf/arch/arm64/util/dwarf-regs.c if (roff->dwarfnum == n) n 46 tools/perf/arch/csky/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 48 tools/perf/arch/csky/util/dwarf-regs.c return (n < CSKY_MAX_REGS) ? csky_dwarf_regs_table[n] : NULL; n 84 tools/perf/arch/powerpc/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 88 tools/perf/arch/powerpc/util/dwarf-regs.c if (roff->dwarfnum == n) n 57 tools/perf/arch/powerpc/util/sym-handling.c unsigned int n) n 65 tools/perf/arch/powerpc/util/sym-handling.c return strncmp(namea, nameb, n); n 59 tools/perf/arch/riscv/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 61 tools/perf/arch/riscv/util/dwarf-regs.c return (n < RISCV_MAX_REGS) ? riscv_dwarf_regs_table[n].name : NULL; n 20 tools/perf/arch/s390/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 22 tools/perf/arch/s390/util/dwarf-regs.c return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n]; n 38 tools/perf/arch/sh/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 40 tools/perf/arch/sh/util/dwarf-regs.c return (n < SH_MAX_REGS) ? sh_regs_table[n] : NULL; n 36 tools/perf/arch/sparc/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 38 tools/perf/arch/sparc/util/dwarf-regs.c return (n < SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL; n 99 tools/perf/arch/x86/tests/rdpmc.c int n; n 132 tools/perf/arch/x86/tests/rdpmc.c for (n = 0; n < 6; n++) { n 144 tools/perf/arch/x86/tests/rdpmc.c pr_debug("%14d: %14Lu\n", n, (long long)delta); n 45 tools/perf/arch/x86/util/dwarf-regs.c # define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)} n 46 tools/perf/arch/x86/util/dwarf-regs.c # define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = -1} n 48 tools/perf/arch/x86/util/dwarf-regs.c # define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = -1} n 49 tools/perf/arch/x86/util/dwarf-regs.c # define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)} n 95 tools/perf/arch/x86/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 97 tools/perf/arch/x86/util/dwarf-regs.c return (n < ARCH_MAX_REGS) ? regoffset_table[n].name : NULL; n 12 tools/perf/arch/x86/util/group.c int n; n 14 tools/perf/arch/x86/util/group.c if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0) n 16 tools/perf/arch/x86/util/group.c if (n > 0) { n 307 tools/perf/arch/x86/util/intel-pt.c static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d) n 312 tools/perf/arch/x86/util/intel-pt.c *n = ebx; n 59 tools/perf/arch/x86/util/perf_regs.c #define SDT_NAME_REG(n, m) {.sdt_name = "%" #n, .uprobe_name = "%" #m} n 18 tools/perf/arch/xtensa/util/dwarf-regs.c const char *get_arch_regstr(unsigned int n) n 20 tools/perf/arch/xtensa/util/dwarf-regs.c return n < XTENSA_MAX_REGS ? xtensa_regs_table[n] : NULL; n 158 tools/perf/bench/epoll-wait.c static void shuffle(void *array, size_t n, size_t size) n 164 tools/perf/bench/epoll-wait.c if (n <= 1) n 171 tools/perf/bench/epoll-wait.c for (i = 1; i < n; ++i) { n 172 tools/perf/bench/epoll-wait.c size_t j = i + rand() / (RAND_MAX / (n - i) + 1); n 879 tools/perf/bench/numa.c int n, t; n 898 tools/perf/bench/numa.c for (n = 0; n < MAX_NR_NODES; n++) n 899 tools/perf/bench/numa.c nodes += node_present[n]; n 920 tools/perf/bench/numa.c int n; n 925 tools/perf/bench/numa.c n = numa_node_of_cpu(td->curr_cpu); n 926 tools/perf/bench/numa.c if (n == node) { n 2031 tools/perf/builtin-c2c.c struct numa_node *n; n 2042 tools/perf/builtin-c2c.c n = session->header.env.numa_nodes; n 2043 tools/perf/builtin-c2c.c if (!n) n 2062 tools/perf/builtin-c2c.c struct perf_cpu_map *map = n[node].map; n 375 tools/perf/builtin-ftrace.c int n = read(trace_fd, buf, sizeof(buf)); n 376 tools/perf/builtin-ftrace.c if (n < 0) n 378 tools/perf/builtin-ftrace.c if (fwrite(buf, n, 1, stdout) != 1) n 387 tools/perf/builtin-ftrace.c int n = read(trace_fd, buf, sizeof(buf)); n 388 tools/perf/builtin-ftrace.c if (n <= 0) n 390 tools/perf/builtin-ftrace.c if (fwrite(buf, n, 1, stdout) != 1) n 259 tools/perf/builtin-inject.c u64 n = 0; n 266 tools/perf/builtin-inject.c event->mmap.filename, sample->pid, &n); n 270 tools/perf/builtin-inject.c inject->bytes_written += n; n 297 tools/perf/builtin-inject.c u64 n = 0; n 304 tools/perf/builtin-inject.c event->mmap2.filename, sample->pid, &n); n 308 tools/perf/builtin-inject.c inject->bytes_written += n; n 478 tools/perf/builtin-kvm.c COMPARE_EVENT_KEY(count, stats.n); n 757 tools/perf/builtin-kvm.c s64 n = 0; n 787 tools/perf/builtin-kvm.c if (n == 0) n 791 tools/perf/builtin-kvm.c n++; n 792 tools/perf/builtin-kvm.c if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) n 797 tools/perf/builtin-kvm.c return n; n 803 tools/perf/builtin-kvm.c s64 n, ntotal = 0; n 807 tools/perf/builtin-kvm.c n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time); n 808 tools/perf/builtin-kvm.c if (n < 0) n 821 tools/perf/builtin-kvm.c ntotal += n; n 822 tools/perf/builtin-kvm.c if (n == PERF_KVM__MAX_EVENTS_PER_MMAP) n 324 tools/perf/builtin-mem.c #define MEM_OPT(n, m) \ n 325 tools/perf/builtin-mem.c { .name = n, .mode = (m) } n 1145 tools/perf/builtin-record.c int n = rec->switch_output.cur_file + 1; n 1147 tools/perf/builtin-record.c if (n >= rec->switch_output.num_files) n 1148 tools/perf/builtin-record.c n = 0; n 1149 tools/perf/builtin-record.c rec->switch_output.cur_file = n; n 1150 tools/perf/builtin-record.c if (rec->switch_output.filenames[n]) { n 1151 tools/perf/builtin-record.c remove(rec->switch_output.filenames[n]); n 1152 tools/perf/builtin-record.c zfree(&rec->switch_output.filenames[n]); n 1154 tools/perf/builtin-record.c rec->switch_output.filenames[n] = new_filename; n 1831 tools/perf/builtin-record.c #define CLOCKID_MAP(n, c) \ n 1832 tools/perf/builtin-record.c { .name = n, .clockid = (c), } n 1872 tools/perf/builtin-sched.c int i, n = __roundup_pow_of_two(cpu+1); n 1875 tools/perf/builtin-sched.c p = realloc(r->last_time, n * sizeof(u64)); n 1880 tools/perf/builtin-sched.c for (i = r->ncpu; i < n; ++i) n 1883 tools/perf/builtin-sched.c r->ncpu = n; n 1908 tools/perf/builtin-sched.c int n; n 1911 tools/perf/builtin-sched.c n = scnprintf(str, sizeof(str), "%s", comm); n 1914 tools/perf/builtin-sched.c n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid); n 1917 tools/perf/builtin-sched.c n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid); n 1919 tools/perf/builtin-sched.c if (n > comm_width) n 1920 tools/perf/builtin-sched.c comm_width = n; n 2666 tools/perf/builtin-sched.c (u64) r->run_stats.n); n 2686 tools/perf/builtin-sched.c (u64) r->run_stats.n); n 2715 tools/perf/builtin-sched.c if (r && r->run_stats.n) { n 2717 tools/perf/builtin-sched.c stats->sched_count += r->run_stats.n; n 2852 tools/perf/builtin-sched.c if (r && r->run_stats.n) { n 2853 tools/perf/builtin-sched.c totals.sched_count += r->run_stats.n; n 1398 tools/perf/builtin-script.c const int n = strlen(PERF_IP_FLAG_CHARS); n 1420 tools/perf/builtin-script.c for (i = 0; i < n; i++, flags >>= 1) { n 390 tools/perf/builtin-top.c struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL; n 411 tools/perf/builtin-top.c n = rb_entry(next, struct hist_entry, rb_node); n 412 tools/perf/builtin-top.c if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) { n 413 tools/perf/builtin-top.c found = n; n 416 tools/perf/builtin-top.c next = rb_next(&n->rb_node); n 592 tools/perf/builtin-trace.c #define P_MODE(n) \ n 593 tools/perf/builtin-trace.c if (mode & n##_OK) { \ n 594 tools/perf/builtin-trace.c printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ n 595 tools/perf/builtin-trace.c mode &= ~n##_OK; \ n 623 tools/perf/builtin-trace.c #define P_FLAG(n) \ n 624 tools/perf/builtin-trace.c if (flags & O_##n) { \ n 625 tools/perf/builtin-trace.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 626 tools/perf/builtin-trace.c flags &= ~O_##n; \ n 655 tools/perf/builtin-trace.c #define P_FLAG(n) \ n 656 tools/perf/builtin-trace.c if (flags & GRND_##n) { \ n 657 tools/perf/builtin-trace.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 658 tools/perf/builtin-trace.c flags &= ~GRND_##n; \ n 1812 tools/perf/builtin-trace.c static u64 n; n 1814 tools/perf/builtin-trace.c id, perf_evsel__name(evsel), ++n); n 3678 tools/perf/builtin-trace.c entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0; n 3705 tools/perf/builtin-trace.c u64 n = (u64) stats->n; n 3713 tools/perf/builtin-trace.c n, syscall_stats_entry->msecs, min, avg); n 91 tools/perf/lib/cpumap.c int n, cpu, prev; n 97 tools/perf/lib/cpumap.c n = fscanf(file, "%u%c", &cpu, &sep); n 98 tools/perf/lib/cpumap.c if (n <= 0) n 126 tools/perf/lib/cpumap.c if (n == 2 && sep == '-') n 130 tools/perf/lib/cpumap.c if (n == 1 || sep == '\n') n 9 tools/perf/lib/include/internal/lib.h ssize_t readn(int fd, void *buf, size_t n); n 10 tools/perf/lib/include/internal/lib.h ssize_t writen(int fd, const void *buf, size_t n); n 10 tools/perf/lib/lib.c static ssize_t ion(bool is_read, int fd, void *buf, size_t n) n 13 tools/perf/lib/lib.c size_t left = n; n 29 tools/perf/lib/lib.c BUG_ON((size_t)(buf - buf_start) != n); n 30 tools/perf/lib/lib.c return n; n 36 tools/perf/lib/lib.c ssize_t readn(int fd, void *buf, size_t n) n 38 tools/perf/lib/lib.c return ion(true, fd, buf, n); n 44 tools/perf/lib/lib.c ssize_t writen(int fd, const void *buf, size_t n) n 47 tools/perf/lib/lib.c return ion(false, fd, (void *)buf, n); n 25 tools/perf/lib/xyarray.c size_t n = xy->entries * xy->entry_size; n 27 tools/perf/lib/xyarray.c memset(xy->contents, 0, n); n 696 tools/perf/pmu-events/jevents.c int n; n 705 tools/perf/pmu-events/jevents.c n = asprintf(&tblname, "pme_%s", fname); n 706 tools/perf/pmu-events/jevents.c if (n < 0) { n 755 tools/perf/pmu-events/jevents.c int n = 16384; n 765 tools/perf/pmu-events/jevents.c line = malloc(n); n 780 tools/perf/pmu-events/jevents.c p = fgets(line, n, mapfp); n 789 tools/perf/pmu-events/jevents.c p = fgets(line, n, mapfp); n 238 tools/perf/tests/dso-data.c static int set_fd_limit(int n) n 245 tools/perf/tests/dso-data.c pr_debug("file limit %ld, new %d\n", (long) rlim.rlim_cur, n); n 247 tools/perf/tests/dso-data.c rlim.rlim_cur = n; n 284 tools/perf/tests/dso-data.c ssize_t n; n 286 tools/perf/tests/dso-data.c n = dso__data_read_offset(dso, &machine, 0, buf, BUFSIZE); n 287 tools/perf/tests/dso-data.c TEST_ASSERT_VAL("failed to read dso", n == BUFSIZE); n 44 tools/perf/tests/kmod-path.c #define T(path, an, k, c, n) \ n 45 tools/perf/tests/kmod-path.c TEST_ASSERT_VAL("failed", !test(path, an, k, c, n)) n 17 tools/perf/tests/mem.c int n; n 19 tools/perf/tests/mem.c n = perf_mem__snp_scnprintf(out, sizeof out, &mi); n 20 tools/perf/tests/mem.c n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, &mi); n 13 tools/perf/tests/unit_number__scnprintf.c u64 n; n 28 tools/perf/tests/unit_number__scnprintf.c unit_number__scnprintf(buf, sizeof(buf), test[i].n); n 31 tools/perf/tests/unit_number__scnprintf.c test[i].n, test[i].str, buf); n 18 tools/perf/trace/beauty/clone.c #define P_FLAG(n) \ n 19 tools/perf/trace/beauty/clone.c if (flags & CLONE_##n) { \ n 20 tools/perf/trace/beauty/clone.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 21 tools/perf/trace/beauty/clone.c flags &= ~CLONE_##n; \ n 22 tools/perf/trace/beauty/eventfd.c #define P_FLAG(n) \ n 23 tools/perf/trace/beauty/eventfd.c if (flags & EFD_##n) { \ n 24 tools/perf/trace/beauty/eventfd.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 25 tools/perf/trace/beauty/eventfd.c flags &= ~EFD_##n; \ n 37 tools/perf/trace/beauty/futex_op.c #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n); n 14 tools/perf/trace/beauty/mmap.c #define P_MMAP_PROT(n) \ n 15 tools/perf/trace/beauty/mmap.c if (prot & PROT_##n) { \ n 16 tools/perf/trace/beauty/mmap.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prot_prefix :"", #n); \ n 17 tools/perf/trace/beauty/mmap.c prot &= ~PROT_##n; \ n 64 tools/perf/trace/beauty/mmap.c #define P_MREMAP_FLAG(n) \ n 65 tools/perf/trace/beauty/mmap.c if (flags & MREMAP_##n) { \ n 66 tools/perf/trace/beauty/mmap.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? flags_prefix : "", #n); \ n 67 tools/perf/trace/beauty/mmap.c flags &= ~MREMAP_##n; \ n 29 tools/perf/trace/beauty/mode_t.c #define P_MODE(n) \ n 30 tools/perf/trace/beauty/mode_t.c if ((mode & S_##n) == S_##n) { \ n 31 tools/perf/trace/beauty/mode_t.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 32 tools/perf/trace/beauty/mode_t.c mode &= ~S_##n; \ n 30 tools/perf/trace/beauty/msg_flags.c #define P_MSG_FLAG(n) \ n 31 tools/perf/trace/beauty/msg_flags.c if (flags & MSG_##n) { \ n 32 tools/perf/trace/beauty/msg_flags.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 33 tools/perf/trace/beauty/msg_flags.c flags &= ~MSG_##n; \ n 34 tools/perf/trace/beauty/open_flags.c #define P_FLAG(n) \ n 35 tools/perf/trace/beauty/open_flags.c if (flags & O_##n) { \ n 36 tools/perf/trace/beauty/open_flags.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 37 tools/perf/trace/beauty/open_flags.c flags &= ~O_##n; \ n 28 tools/perf/trace/beauty/perf_event_open.c #define P_FLAG(n) \ n 29 tools/perf/trace/beauty/perf_event_open.c if (flags & PERF_FLAG_##n) { \ n 30 tools/perf/trace/beauty/perf_event_open.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 31 tools/perf/trace/beauty/perf_event_open.c flags &= ~PERF_FLAG_##n; \ n 35 tools/perf/trace/beauty/sched_policy.c #define P_POLICY_FLAG(n) \ n 36 tools/perf/trace/beauty/sched_policy.c if (flags & SCHED_##n) { \ n 37 tools/perf/trace/beauty/sched_policy.c printed += scnprintf(bf + printed, size - printed, "|%s%s", show_prefix ? prefix : "", #n); \ n 38 tools/perf/trace/beauty/sched_policy.c flags &= ~SCHED_##n; \ n 17 tools/perf/trace/beauty/seccomp.c #define P_SECCOMP_SET_MODE_OP(n) case SECCOMP_SET_MODE_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n); break n 40 tools/perf/trace/beauty/seccomp.c #define P_FLAG(n) \ n 41 tools/perf/trace/beauty/seccomp.c if (flags & SECCOMP_FILTER_FLAG_##n) { \ n 42 tools/perf/trace/beauty/seccomp.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 43 tools/perf/trace/beauty/seccomp.c flags &= ~SECCOMP_FILTER_FLAG_##n; \ n 11 tools/perf/trace/beauty/signum.c #define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n) n 34 tools/perf/trace/beauty/socket_type.c #define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n); break; n 47 tools/perf/trace/beauty/socket_type.c #define P_SK_FLAG(n) \ n 48 tools/perf/trace/beauty/socket_type.c if (flags & SOCK_##n) { \ n 49 tools/perf/trace/beauty/socket_type.c printed += scnprintf(bf + printed, size - printed, "|%s", #n); \ n 50 tools/perf/trace/beauty/socket_type.c flags &= ~SOCK_##n; \ n 22 tools/perf/trace/beauty/statx.c #define P_FLAG(n) \ n 23 tools/perf/trace/beauty/statx.c if (flags & AT_##n) { \ n 24 tools/perf/trace/beauty/statx.c printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 25 tools/perf/trace/beauty/statx.c flags &= ~AT_##n; \ n 50 tools/perf/trace/beauty/statx.c #define P_FLAG(n) \ n 51 tools/perf/trace/beauty/statx.c if (flags & STATX_##n) { \ n 52 tools/perf/trace/beauty/statx.c printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 53 tools/perf/trace/beauty/statx.c flags &= ~STATX_##n; \ n 12 tools/perf/trace/beauty/waitid_options.c #define P_OPTION(n) \ n 13 tools/perf/trace/beauty/waitid_options.c if (options & W##n) { \ n 14 tools/perf/trace/beauty/waitid_options.c printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ n 15 tools/perf/trace/beauty/waitid_options.c options &= ~W##n; \ n 175 tools/perf/ui/browsers/hists.c int n = 0; n 184 tools/perf/ui/browsers/hists.c ++n; n 193 tools/perf/ui/browsers/hists.c n += callchain_node__count_rows_rb_tree(child); n 196 tools/perf/ui/browsers/hists.c return n; n 203 tools/perf/ui/browsers/hists.c int n = 0; n 212 tools/perf/ui/browsers/hists.c n++; n 222 tools/perf/ui/browsers/hists.c n++; n 225 tools/perf/ui/browsers/hists.c return n; n 237 tools/perf/ui/browsers/hists.c int n = 0; n 245 tools/perf/ui/browsers/hists.c ++n; n 251 tools/perf/ui/browsers/hists.c n += callchain_node__count_rows_rb_tree(node); n 253 tools/perf/ui/browsers/hists.c return n; n 259 tools/perf/ui/browsers/hists.c int n = 0; n 263 tools/perf/ui/browsers/hists.c n += callchain_node__count_rows(node); n 266 tools/perf/ui/browsers/hists.c return n; n 463 tools/perf/ui/browsers/hists.c int n = 0; n 472 tools/perf/ui/browsers/hists.c ++n; n 478 tools/perf/ui/browsers/hists.c n += callchain_node__set_folding_rb_tree(child, unfold); n 481 tools/perf/ui/browsers/hists.c return n; n 488 tools/perf/ui/browsers/hists.c int n = 0; n 491 tools/perf/ui/browsers/hists.c ++n; n 497 tools/perf/ui/browsers/hists.c n += callchain_node__set_folding_rb_tree(node, unfold); n 499 tools/perf/ui/browsers/hists.c return n; n 505 tools/perf/ui/browsers/hists.c int n = 0; n 509 tools/perf/ui/browsers/hists.c n += callchain_node__set_folding(node, unfold); n 512 tools/perf/ui/browsers/hists.c return n; n 521 tools/perf/ui/browsers/hists.c int n = 0; n 527 tools/perf/ui/browsers/hists.c n++; n 530 tools/perf/ui/browsers/hists.c return n; n 540 tools/perf/ui/browsers/hists.c int n; n 543 tools/perf/ui/browsers/hists.c n = callchain__set_folding(&he->sorted_chain, unfold); n 545 tools/perf/ui/browsers/hists.c n = hierarchy_set_folding(hb, he, unfold); n 547 tools/perf/ui/browsers/hists.c he->nr_rows = unfold ? n : 0; n 2259 tools/perf/ui/browsers/hists.c static inline void free_popup_options(char **options, int n) n 2263 tools/perf/ui/browsers/hists.c for (i = 0; i < n; ++i) n 2551 tools/perf/ui/browsers/hists.c int n = 0; n 2564 tools/perf/ui/browsers/hists.c n = scnprintf(script_opt, len, " -c %s ", n 2567 tools/perf/ui/browsers/hists.c n = scnprintf(script_opt, len, " -S %s ", n 2582 tools/perf/ui/browsers/hists.c n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end); n 2634 tools/perf/ui/browsers/hists.c int n, j; n 2637 tools/perf/ui/browsers/hists.c n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, ""); n 2651 tools/perf/ui/browsers/hists.c n += add_script_opt_2(browser, act, optstr, thread, sym, n 2655 tools/perf/ui/browsers/hists.c return n; n 35 tools/perf/ui/browsers/res_sample.c int i, n; n 68 tools/perf/ui/browsers/res_sample.c n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange); n 69 tools/perf/ui/browsers/res_sample.c trange[n++] = ','; n 70 tools/perf/ui/browsers/res_sample.c timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n); n 98 tools/perf/ui/gtk/annotate.c struct disasm_line *pos, *n; n 155 tools/perf/ui/gtk/annotate.c list_for_each_entry_safe(pos, n, ¬es->src->source, al.node) { n 2486 tools/perf/util/annotate.c struct annotation_line *al, *n; n 2488 tools/perf/util/annotate.c list_for_each_entry_safe(al, n, &as->source, node) { n 2593 tools/perf/util/annotate.c static inline int width_jumps(int n) n 2595 tools/perf/util/annotate.c if (n >= 100) n 2597 tools/perf/util/annotate.c if (n / 10) n 3020 tools/perf/util/annotate.c #define ANNOTATION__CFG(n) \ n 3021 tools/perf/util/annotate.c { .name = #n, .value = &annotation__default_options.n, } n 14 tools/perf/util/arm-spe-pkt-decoder.c #define BIT(n) (1ULL << (n)) n 45 tools/perf/util/arm-spe-pkt-decoder.c #define memcpy_le64(d, s, n) do { \ n 46 tools/perf/util/arm-spe-pkt-decoder.c memcpy((d), (s), (n)); \ n 625 tools/perf/util/auxtrace.c struct auxtrace_index *auxtrace_index, *n; n 627 tools/perf/util/auxtrace.c list_for_each_entry_safe(auxtrace_index, n, head, list) { n 1525 tools/perf/util/auxtrace.c struct addr_filter *filt, *n; n 1527 tools/perf/util/auxtrace.c list_for_each_entry_safe(filt, n, &filts->head, list) { n 1551 tools/perf/util/auxtrace.c size_t n; n 1557 tools/perf/util/auxtrace.c n = strcspn(*inp, str_delim); n 1558 tools/perf/util/auxtrace.c if (!n) n 1560 tools/perf/util/auxtrace.c *inp += n; n 84 tools/perf/util/block-range.c struct rb_node *n, *parent = NULL; n 111 tools/perf/util/block-range.c n = parent; n 113 tools/perf/util/block-range.c n = rb_next(n); n 114 tools/perf/util/block-range.c if (!n) n 117 tools/perf/util/block-range.c next = rb_entry(n, struct block_range, node); n 40 tools/perf/util/block-range.h struct rb_node *n = rb_next(&br->node); n 41 tools/perf/util/block-range.h if (!n) n 43 tools/perf/util/block-range.h return rb_entry(n, struct block_range, node); n 418 tools/perf/util/bpf-loader.c preproc_gen_prologue(struct bpf_program *prog, int n, n 434 tools/perf/util/bpf-loader.c if (n < 0 || n >= priv->nr_types) n 439 tools/perf/util/bpf-loader.c if (priv->type_mapping[i] == n) n 444 tools/perf/util/bpf-loader.c pr_debug("Internal error: prologue type %d not found\n", n); n 552 tools/perf/util/bpf-loader.c int n; n 554 tools/perf/util/bpf-loader.c n = ptevs[i] - pev->tevs; n 556 tools/perf/util/bpf-loader.c mapping[n] = type; n 557 tools/perf/util/bpf-loader.c pr_debug("mapping[%d]=%d\n", n, type); n 562 tools/perf/util/bpf-loader.c mapping[n] = type; n 564 tools/perf/util/bpf-loader.c mapping[n] = ++type; n 566 tools/perf/util/bpf-loader.c pr_debug("mapping[%d]=%d\n", n, mapping[n]); n 844 tools/perf/util/bpf-loader.c struct bpf_map_op *pos, *n; n 846 tools/perf/util/bpf-loader.c list_for_each_entry_safe(pos, n, &priv->ops_list, list) { n 1699 tools/perf/util/bpf-loader.c size_t n; n 1702 tools/perf/util/bpf-loader.c n = snprintf(buf, size, "Failed to load %s%s: ", n 1704 tools/perf/util/bpf-loader.c if (n >= size) { n 1708 tools/perf/util/bpf-loader.c buf += n; n 1709 tools/perf/util/bpf-loader.c size -= n; n 40 tools/perf/util/call-path.c struct call_path_block *pos, *n; n 42 tools/perf/util/call-path.c list_for_each_entry_safe(pos, n, &cpr->blocks, node) { n 56 tools/perf/util/call-path.c size_t n; n 69 tools/perf/util/call-path.c n = cpr->next++ & CALL_PATH_BLOCK_MASK; n 70 tools/perf/util/call-path.c cp = &cpb->cp[n]; n 416 tools/perf/util/callchain.c struct rb_node *n; n 419 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); n 420 tools/perf/util/callchain.c while (n) { n 421 tools/perf/util/callchain.c child = rb_entry(n, struct callchain_node, rb_node_in); n 422 tools/perf/util/callchain.c n = rb_next(n); n 446 tools/perf/util/callchain.c struct rb_node *n; n 450 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); n 452 tools/perf/util/callchain.c while (n) { n 453 tools/perf/util/callchain.c child = rb_entry(n, struct callchain_node, rb_node_in); n 454 tools/perf/util/callchain.c n = rb_next(n); n 474 tools/perf/util/callchain.c struct rb_node *n; n 481 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); n 482 tools/perf/util/callchain.c while (n) { n 483 tools/perf/util/callchain.c child = rb_entry(n, struct callchain_node, rb_node_in); n 484 tools/perf/util/callchain.c n = rb_next(n); n 540 tools/perf/util/callchain.c struct rb_node *n; n 546 tools/perf/util/callchain.c n = rb_first(&new->rb_root_in); n 547 tools/perf/util/callchain.c while (n) { n 548 tools/perf/util/callchain.c child = rb_entry(n, struct callchain_node, rb_node_in); n 550 tools/perf/util/callchain.c n = rb_next(n); n 1002 tools/perf/util/callchain.c struct rb_node *n; n 1021 tools/perf/util/callchain.c n = rb_first(&src->rb_root_in); n 1022 tools/perf/util/callchain.c while (n) { n 1023 tools/perf/util/callchain.c child = container_of(n, struct callchain_node, rb_node_in); n 1024 tools/perf/util/callchain.c n = rb_next(n); n 1259 tools/perf/util/callchain.c struct rb_node *n; n 1261 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); n 1262 tools/perf/util/callchain.c while (n) { n 1263 tools/perf/util/callchain.c child = rb_entry(n, struct callchain_node, rb_node_in); n 1264 tools/perf/util/callchain.c n = rb_next(n); n 1458 tools/perf/util/callchain.c struct rb_node *n; n 1472 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); n 1473 tools/perf/util/callchain.c while (n) { n 1474 tools/perf/util/callchain.c child = container_of(n, struct callchain_node, rb_node_in); n 1475 tools/perf/util/callchain.c n = rb_next(n); n 1494 tools/perf/util/callchain.c struct rb_node *n; n 1497 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); n 1498 tools/perf/util/callchain.c while (n) { n 1499 tools/perf/util/callchain.c child = container_of(n, struct callchain_node, rb_node_in); n 1502 tools/perf/util/callchain.c n = rb_next(n); n 145 tools/perf/util/cgroup.c int n; n 153 tools/perf/util/cgroup.c n = 0; n 155 tools/perf/util/cgroup.c if (n == nr_cgroups) n 157 tools/perf/util/cgroup.c n++; n 18 tools/perf/util/copyfile.c size_t n; n 32 tools/perf/util/copyfile.c while (getline(&line, &n, from_fp) > 0) n 415 tools/perf/util/cpumap.c int n; n 425 tools/perf/util/cpumap.c n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); n 426 tools/perf/util/cpumap.c if (n == PATH_MAX) { n 440 tools/perf/util/cpumap.c n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); n 441 tools/perf/util/cpumap.c if (n == PATH_MAX) { n 35 tools/perf/util/data-convert-bt.c #define pr_N(n, fmt, ...) \ n 36 tools/perf/util/data-convert-bt.c eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) n 1106 tools/perf/util/data-convert-bt.c #define ADD_FIELD(cl, t, n) \ n 1108 tools/perf/util/data-convert-bt.c pr2(" field '%s'\n", n); \ n 1109 tools/perf/util/data-convert-bt.c if (bt_ctf_event_class_add_field(cl, t, n)) { \ n 1110 tools/perf/util/data-convert-bt.c pr_err("Failed to add field '%s';\n", n); \ n 1217 tools/perf/util/data-convert-bt.c #define __NON_SAMPLE_ADD_FIELD(t, n) \ n 1219 tools/perf/util/data-convert-bt.c pr2(" field '%s'\n", #n); \ n 1220 tools/perf/util/data-convert-bt.c if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ n 1221 tools/perf/util/data-convert-bt.c pr_err("Failed to add field '%s';\n", #n);\ n 27 tools/perf/util/debug.h #define pr_debugN(n, fmt, ...) \ n 28 tools/perf/util/debug.h eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__) n 33 tools/perf/util/debug.h #define pr_time_N(n, var, t, fmt, ...) \ n 34 tools/perf/util/debug.h eprintf_time(n, var, t, fmt, ##__VA_ARGS__) n 21 tools/perf/util/demangle-java.c #define BASE_ENT(c, n) [c - 'A']=n n 209 tools/perf/util/dso.h #define dso__for_each_symbol(dso, pos, n) \ n 210 tools/perf/util/dso.h symbols__for_each_entry(&(dso)->symbols, pos, n) n 28 tools/perf/util/dwarf-regs.c #define __get_dwarf_regstr(tbl, n) (((n) < ARRAY_SIZE(tbl)) ? (tbl)[(n)] : NULL) n 31 tools/perf/util/dwarf-regs.c const char *get_dwarf_regstr(unsigned int n, unsigned int machine) n 35 tools/perf/util/dwarf-regs.c return get_arch_regstr(n); n 37 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(x86_32_regstr_tbl, n); n 39 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(x86_64_regstr_tbl, n); n 41 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(arm_regstr_tbl, n); n 43 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(aarch64_regstr_tbl, n); n 45 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(sh_regstr_tbl, n); n 47 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(s390_regstr_tbl, n); n 50 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(powerpc_regstr_tbl, n); n 53 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(sparc_regstr_tbl, n); n 55 tools/perf/util/dwarf-regs.c return __get_dwarf_regstr(xtensa_regstr_tbl, n); n 51 tools/perf/util/env.c struct rb_node *n; n 54 tools/perf/util/env.c n = env->bpf_progs.infos.rb_node; n 56 tools/perf/util/env.c while (n) { n 57 tools/perf/util/env.c node = rb_entry(n, struct bpf_prog_info_node, rb_node); n 59 tools/perf/util/env.c n = n->rb_left; n 61 tools/perf/util/env.c n = n->rb_right; n 105 tools/perf/util/env.c struct rb_node *n; n 108 tools/perf/util/env.c n = env->bpf_progs.btfs.rb_node; n 110 tools/perf/util/env.c while (n) { n 111 tools/perf/util/env.c node = rb_entry(n, struct btf_node, rb_node); n 113 tools/perf/util/env.c n = n->rb_left; n 115 tools/perf/util/env.c n = n->rb_right; n 126 tools/perf/util/evlist.c struct evsel *pos, *n; n 128 tools/perf/util/evlist.c evlist__for_each_entry_safe(evlist, n, pos) { n 241 tools/perf/util/evlist.c struct evsel *evsel, *n; n 257 tools/perf/util/evlist.c __evlist__for_each_entry_safe(&head, n, evsel) n 494 tools/perf/util/evlist.c ssize_t n; n 496 tools/perf/util/evlist.c n = (event->header.size - sizeof(event->header)) >> 3; n 499 tools/perf/util/evlist.c if (evlist->id_pos >= n) n 503 tools/perf/util/evlist.c if (evlist->is_pos > n) n 505 tools/perf/util/evlist.c n -= evlist->is_pos; n 506 tools/perf/util/evlist.c *id = array[n]; n 1512 tools/perf/util/evlist.c struct evsel *evsel, *n; n 1518 tools/perf/util/evlist.c evlist__for_each_entry_safe(evlist, n, evsel) { n 545 tools/perf/util/header.c int i, ret, n; n 551 tools/perf/util/header.c n = perf_env.nr_cmdline + 1; n 553 tools/perf/util/header.c ret = do_write(ff, &n, sizeof(n)); n 648 tools/perf/util/header.c int ret = -1, n; n 661 tools/perf/util/header.c n = sscanf(buf, "%*s %"PRIu64, &mem); n 662 tools/perf/util/header.c if (n == 1) n 687 tools/perf/util/header.c struct numa_topology_node *n = &tp->nodes[i]; n 689 tools/perf/util/header.c ret = do_write(ff, &n->node, sizeof(u32)); n 693 tools/perf/util/header.c ret = do_write(ff, &n->mem_total, sizeof(u64)); n 697 tools/perf/util/header.c ret = do_write(ff, &n->mem_free, sizeof(u64)); n 701 tools/perf/util/header.c ret = do_write_string(ff, n->cpus); n 1203 tools/perf/util/header.c static int memory_node__read(struct memory_node *n, unsigned long idx) n 1232 tools/perf/util/header.c n->set = bitmap_alloc(size); n 1233 tools/perf/util/header.c if (!n->set) { n 1238 tools/perf/util/header.c n->node = idx; n 1239 tools/perf/util/header.c n->size = size; n 1244 tools/perf/util/header.c set_bit(phys, n->set); n 1353 tools/perf/util/header.c struct memory_node *n = &nodes[i]; n 1356 tools/perf/util/header.c ret = do_write(ff, &n->v, sizeof(n->v)); \ n 1365 tools/perf/util/header.c ret = do_write_bitmap(ff, n->set, n->size); n 1723 tools/perf/util/header.c struct numa_node *n; n 1726 tools/perf/util/header.c n = &ff->ph->env.numa_nodes[i]; n 1730 tools/perf/util/header.c n->node, n->mem_total, n->mem_free); n 1732 tools/perf/util/header.c fprintf(fp, "# node%u cpu list : ", n->node); n 1733 tools/perf/util/header.c cpu_map__fprintf(n->map, fp); n 1857 tools/perf/util/header.c static void memory_node__fprintf(struct memory_node *n, n 1863 tools/perf/util/header.c size = bsize * bitmap_weight(n->set, n->size); n 1866 tools/perf/util/header.c bitmap_scnprintf(n->set, n->size, buf_map, 100); n 1867 tools/perf/util/header.c fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); n 2331 tools/perf/util/header.c struct numa_node *nodes, *n; n 2344 tools/perf/util/header.c n = &nodes[i]; n 2347 tools/perf/util/header.c if (do_read_u32(ff, &n->node)) n 2350 tools/perf/util/header.c if (do_read_u64(ff, &n->mem_total)) n 2353 tools/perf/util/header.c if (do_read_u64(ff, &n->mem_free)) n 2360 tools/perf/util/header.c n->map = perf_cpu_map__new(str); n 2361 tools/perf/util/header.c if (!n->map) n 2617 tools/perf/util/header.c struct memory_node n; n 2620 tools/perf/util/header.c if (do_read_u64(ff, &n.v)) \ n 2628 tools/perf/util/header.c if (do_read_bitmap(ff, &n.set, &n.size)) n 2631 tools/perf/util/header.c nodes[i] = n; n 2812 tools/perf/util/header.c #define FEAT_OPR(n, func, __full_only) \ n 2813 tools/perf/util/header.c [HEADER_##n] = { \ n 2814 tools/perf/util/header.c .name = __stringify(n), \ n 2822 tools/perf/util/header.c #define FEAT_OPN(n, func, __full_only) \ n 2823 tools/perf/util/header.c [HEADER_##n] = { \ n 2824 tools/perf/util/header.c .name = __stringify(n), \ n 57 tools/perf/util/help-unknown-cmd.c unsigned int i, n = 0, best_similarity = 0; n 85 tools/perf/util/help-unknown-cmd.c n = 1; n 86 tools/perf/util/help-unknown-cmd.c while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len) n 87 tools/perf/util/help-unknown-cmd.c ++n; n 90 tools/perf/util/help-unknown-cmd.c if (autocorrect && n == 1) { n 111 tools/perf/util/help-unknown-cmd.c n < 2 ? "this": "one of these"); n 113 tools/perf/util/help-unknown-cmd.c for (i = 0; i < n; i++) n 227 tools/perf/util/hist.c struct hist_entry *n; n 233 tools/perf/util/hist.c n = rb_entry(next, struct hist_entry, rb_node); n 234 tools/perf/util/hist.c if (!n->filtered) n 235 tools/perf/util/hist.c hists__calc_col_len(hists, n); n 236 tools/perf/util/hist.c next = rb_next(&n->rb_node); n 363 tools/perf/util/hist.c struct hist_entry *n; n 366 tools/perf/util/hist.c n = rb_entry(next, struct hist_entry, rb_node); n 367 tools/perf/util/hist.c next = rb_next(&n->rb_node); n 368 tools/perf/util/hist.c if (((zap_user && n->level == '.') || n 369 tools/perf/util/hist.c (zap_kernel && n->level != '.') || n 370 tools/perf/util/hist.c hists__decay_entry(hists, n))) { n 371 tools/perf/util/hist.c hists__delete_entry(hists, n); n 379 tools/perf/util/hist.c struct hist_entry *n; n 382 tools/perf/util/hist.c n = rb_entry(next, struct hist_entry, rb_node); n 383 tools/perf/util/hist.c next = rb_next(&n->rb_node); n 385 tools/perf/util/hist.c hists__delete_entry(hists, n); n 392 tools/perf/util/hist.c struct hist_entry *n; n 396 tools/perf/util/hist.c n = rb_entry(next, struct hist_entry, rb_node); n 398 tools/perf/util/hist.c return n; n 400 tools/perf/util/hist.c next = rb_next(&n->rb_node); n 1591 tools/perf/util/hist.c struct hist_entry *n; n 1606 tools/perf/util/hist.c n = rb_entry(next, struct hist_entry, rb_node_in); n 1607 tools/perf/util/hist.c next = rb_next(&n->rb_node_in); n 1609 tools/perf/util/hist.c rb_erase_cached(&n->rb_node_in, root); n 1610 tools/perf/util/hist.c ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); n 1620 tools/perf/util/hist.c hists__apply_filters(hists, n); n 1837 tools/perf/util/hist.c struct hist_entry *n; n 1869 tools/perf/util/hist.c n = rb_entry(next, struct hist_entry, rb_node_in); n 1870 tools/perf/util/hist.c next = rb_next(&n->rb_node_in); n 1872 tools/perf/util/hist.c if (cb && cb(n, cb_arg)) n 1875 tools/perf/util/hist.c __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); n 1876 tools/perf/util/hist.c hists__inc_stats(hists, n); n 1878 tools/perf/util/hist.c if (!n->filtered) n 1879 tools/perf/util/hist.c hists__calc_col_len(hists, n); n 2375 tools/perf/util/hist.c struct rb_node *n; n 2378 tools/perf/util/hist.c n = hists->entries_collapsed.rb_root.rb_node; n 2380 tools/perf/util/hist.c n = hists->entries_in->rb_root.rb_node; n 2382 tools/perf/util/hist.c while (n) { n 2383 tools/perf/util/hist.c struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); n 2387 tools/perf/util/hist.c n = n->rb_left; n 2389 tools/perf/util/hist.c n = n->rb_right; n 2400 tools/perf/util/hist.c struct rb_node *n = root->rb_root.rb_node; n 2402 tools/perf/util/hist.c while (n) { n 2407 tools/perf/util/hist.c iter = rb_entry(n, struct hist_entry, rb_node_in); n 2415 tools/perf/util/hist.c n = n->rb_left; n 2417 tools/perf/util/hist.c n = n->rb_right; n 6 tools/perf/util/include/dwarf-regs.h const char *get_arch_regstr(unsigned int n); n 12 tools/perf/util/include/dwarf-regs.h const char *get_dwarf_regstr(unsigned int n, unsigned int machine); n 213 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d) n 217 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c return (t / d) * n + ((t % d) * n) / d; n 548 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c size_t old_len, len, n; n 561 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c n = INTEL_PT_PKT_MAX_SZ - len; n 562 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c if (n > decoder->len) n 563 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c n = decoder->len; n 564 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c memcpy(buf + len, decoder->buf, n); n 565 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c len += n; n 2717 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c const char *n = INTEL_PT_PSB_STR; n 2726 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c p = memrchr(buf, n[0], k); n 2729 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1)) n 186 tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c int n, i; n 196 tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c n = snprintf(x->out, left, "insn: "); n 197 tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c left -= n; n 199 tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c n += snprintf(x->out + n, left, "%02x ", inbuf[i]); n 200 tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c left -= n; n 15 tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c #define BIT(n) (1 << (n)) n 25 tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c #define memcpy_le64(d, s, n) do { \ n 26 tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c memcpy((d), (s), (n)); \ n 1605 tools/perf/util/intel-pt.c int n = pebs_gp_regs[i] - 1; n 1607 tools/perf/util/intel-pt.c if (n < 0) n 1614 tools/perf/util/intel-pt.c if (mask & 1 << n && regs_mask & bit) { n 1616 tools/perf/util/intel-pt.c *pos++ = gp_regs[n]; n 2994 tools/perf/util/intel-pt.c int n = opts->range_num; n 2997 tools/perf/util/intel-pt.c if (!n || !p || pt->timeless_decoding) n 3000 tools/perf/util/intel-pt.c pt->time_ranges = calloc(n, sizeof(struct range)); n 3004 tools/perf/util/intel-pt.c pt->range_cnt = n; n 3006 tools/perf/util/intel-pt.c intel_pt_log("%s: %u range(s)\n", __func__, n); n 3008 tools/perf/util/intel-pt.c for (i = 0; i < n; i++) { n 75 tools/perf/util/intlist.h #define intlist__for_each_entry_safe(pos, n, ilist) \ n 76 tools/perf/util/intlist.h for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\ n 77 tools/perf/util/intlist.h pos = n, n = intlist__next(n)) n 138 tools/perf/util/jitdump.c void *n, *buf = NULL; n 216 tools/perf/util/jitdump.c n = realloc(buf, bs); n 217 tools/perf/util/jitdump.c if (!n) n 220 tools/perf/util/jitdump.c buf = n; n 288 tools/perf/util/jitdump.c void *n; n 289 tools/perf/util/jitdump.c n = realloc(jd->buf, bs); n 290 tools/perf/util/jitdump.c if (!n) n 292 tools/perf/util/jitdump.c jd->buf = n; n 307 tools/perf/util/jitdump.c uint64_t n; n 310 tools/perf/util/jitdump.c for (n = 0 ; n < jr->info.nr_entry; n++) { n 311 tools/perf/util/jitdump.c jr->info.entries[n].addr = bswap_64(jr->info.entries[n].addr); n 312 tools/perf/util/jitdump.c jr->info.entries[n].lineno = bswap_32(jr->info.entries[n].lineno); n 313 tools/perf/util/jitdump.c jr->info.entries[n].discrim = bswap_32(jr->info.entries[n].discrim); n 168 tools/perf/util/machine.c struct dso *pos, *n; n 172 tools/perf/util/machine.c list_for_each_entry_safe(pos, n, &dsos->head, node) { n 224 tools/perf/util/machine.c struct thread *thread, *n; n 233 tools/perf/util/machine.c list_for_each_entry_safe(thread, n, &threads->dead, node) n 91 tools/perf/util/map.h #define map__for_each_symbol(map, pos, n) \ n 92 tools/perf/util/map.h dso__for_each_symbol(map->dso, pos, n) n 18 tools/perf/util/mem-events.c #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s } n 48 tools/perf/util/mem2node.c struct memory_node *n, *nodes = &env->memory_nodes[0]; n 57 tools/perf/util/mem2node.c n = &nodes[i]; n 58 tools/perf/util/mem2node.c max += bitmap_weight(n->set, n->size); n 68 tools/perf/util/mem2node.c n = &nodes[i]; n 70 tools/perf/util/mem2node.c for (bit = 0; bit < n->size; bit++) { n 73 tools/perf/util/mem2node.c if (!test_bit(bit, n->set)) n 86 tools/perf/util/mem2node.c (prev->node == n->node)) { n 92 tools/perf/util/mem2node.c phys_entry__init(&entries[j++], start, bsize, n->node); n 200 tools/perf/util/metricgroup.c static bool match_metric(const char *n, const char *list) n 209 tools/perf/util/metricgroup.c if (!n) n 212 tools/perf/util/metricgroup.c m = strcasestr(n, list); n 215 tools/perf/util/metricgroup.c if ((m == n || m[-1] == ';' || m[-1] == ' ') && n 286 tools/perf/util/metricgroup.c int n = 0; n 290 tools/perf/util/metricgroup.c printf("%s%s", n > 0 ? " " : "", sn->s); n 293 tools/perf/util/metricgroup.c n++; n 13 tools/perf/util/ordered-events.c #define pr_N(n, fmt, ...) \ n 14 tools/perf/util/ordered-events.c eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__) n 9 tools/perf/util/parse-branch-options.c #define BRANCH_OPT(n, m) \ n 10 tools/perf/util/parse-branch-options.c { .name = n, .mode = (m) } n 380 tools/perf/util/parse-events.c int n, longest = -1; n 384 tools/perf/util/parse-events.c n = strlen(names[i][j]); n 385 tools/perf/util/parse-events.c if (n > longest && !strncasecmp(str, names[i][j], n)) n 386 tools/perf/util/parse-events.c longest = n; n 416 tools/perf/util/parse-events.c int i, n; n 428 tools/perf/util/parse-events.c n = snprintf(name, MAX_NAME_LEN, "%s", type); n 433 tools/perf/util/parse-events.c n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); n 2762 tools/perf/util/parse-events.c struct parse_events_term *term, *n; n 2776 tools/perf/util/parse-events.c ret = parse_events_term__clone(&n, term); n 2779 tools/perf/util/parse-events.c list_add_tail(&n->list, *new); n 30 tools/perf/util/perf_event_attr_fprintf.c #define bit_name(n) { PERF_SAMPLE_##n, #n } n 46 tools/perf/util/perf_event_attr_fprintf.c #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n } n 61 tools/perf/util/perf_event_attr_fprintf.c #define bit_name(n) { PERF_FORMAT_##n, #n } n 14 tools/perf/util/perf_regs.h #define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) } n 15 tools/perf/util/perf_regs.h #define SMPL_REG2(n, b) { .name = #n, .mask = 3ULL << (b) } n 1356 tools/perf/util/pmu.c int n = strcmp(as->topic, bs->topic); n 1358 tools/perf/util/pmu.c if (n) n 1359 tools/perf/util/pmu.c return n; n 1367 tools/perf/util/pmu.c int n; n 1376 tools/perf/util/pmu.c n = printf("%s%.*s", column > start ? " " : "", wlen, s); n 1377 tools/perf/util/pmu.c if (n <= 0) n 1380 tools/perf/util/pmu.c column += n; n 538 tools/perf/util/probe-file.c struct probe_cache_entry *entry, *n; n 540 tools/perf/util/probe-file.c list_for_each_entry_safe(entry, n, &pcache->entries, node) { n 1229 tools/perf/util/probe-finder.c int n = 0; n 1239 tools/perf/util/probe-finder.c args[n] = pf->pev->args[i]; n 1240 tools/perf/util/probe-finder.c n++; n 1244 tools/perf/util/probe-finder.c vf.nargs = n; n 1248 tools/perf/util/probe-finder.c pr_debug(" (%d)\n", vf.nargs - n); n 1251 tools/perf/util/probe-finder.c n = vf.nargs; n 1253 tools/perf/util/probe-finder.c return n; n 1546 tools/perf/util/probe-finder.c int n, i; n 1558 tools/perf/util/probe-finder.c n = dwfl_module_relocations(dbg->mod); n 1559 tools/perf/util/probe-finder.c if (n < 0) n 1562 tools/perf/util/probe-finder.c for (i = 0; i < n; i++) { n 922 tools/perf/util/python.c int timeout = -1, n; n 927 tools/perf/util/python.c n = evlist__poll(evlist, timeout); n 928 tools/perf/util/python.c if (n < 0) { n 933 tools/perf/util/python.c return Py_BuildValue("i", n); n 229 tools/perf/util/s390-cpumsf.c size_t n = fwrite(sample->raw_data, sample->raw_size - 4, 1, n 231 tools/perf/util/s390-cpumsf.c if (n != 1) { n 209 tools/perf/util/scripting-engines/trace-event-python.c unsigned n = 0; n 220 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name)); n 221 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name)); n 222 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyLong_FromLong(value)); n 223 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_str)); n 249 tools/perf/util/scripting-engines/trace-event-python.c unsigned n = 0; n 261 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name)); n 262 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name)); n 264 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(delim)); n 802 tools/perf/util/scripting-engines/trace-event-python.c unsigned n = 0; n 846 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(handler_name)); n 847 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, context); n 855 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu)); n 856 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyLong_FromLong(s)); n 857 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyLong_FromLong(ns)); n 858 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyLong_FromLong(pid)); n 859 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyUnicode_FromString(comm)); n 860 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, callchain); n 894 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, obj); n 901 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, dict); n 903 tools/perf/util/scripting-engines/trace-event-python.c if (get_argument_count(handler) == (int) n + 1) { n 906 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, all_entries_dict); n 911 tools/perf/util/scripting-engines/trace-event-python.c if (_PyTuple_Resize(&t, n) == -1) n 1283 tools/perf/util/scripting-engines/trace-event-python.c unsigned n = 0; n 1303 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, dict); n 1304 tools/perf/util/scripting-engines/trace-event-python.c if (_PyTuple_Resize(&t, n) == -1) n 1361 tools/perf/util/scripting-engines/trace-event-python.c int n = 0; n 1376 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyLong_FromLong(cpu)); n 1377 tools/perf/util/scripting-engines/trace-event-python.c PyTuple_SetItem(t, n++, _PyLong_FromLong(thread)); n 1379 tools/perf/util/scripting-engines/trace-event-python.c tuple_set_u64(t, n++, tstamp); n 1380 tools/perf/util/scripting-engines/trace-event-python.c tuple_set_u64(t, n++, count->val); n 1381 tools/perf/util/scripting-engines/trace-event-python.c tuple_set_u64(t, n++, count->ena); n 1382 tools/perf/util/scripting-engines/trace-event-python.c tuple_set_u64(t, n++, count->run); n 1384 tools/perf/util/scripting-engines/trace-event-python.c if (_PyTuple_Resize(&t, n) == -1) n 1418 tools/perf/util/scripting-engines/trace-event-python.c int n = 0; n 1430 tools/perf/util/scripting-engines/trace-event-python.c tuple_set_u64(t, n++, tstamp); n 1432 tools/perf/util/scripting-engines/trace-event-python.c if (_PyTuple_Resize(&t, n) == -1) n 365 tools/perf/util/session.c static int skipn(int fd, off_t n) n 370 tools/perf/util/session.c while (n > 0) { n 371 tools/perf/util/session.c ret = read(fd, buf, min(n, (off_t)sizeof(buf))); n 374 tools/perf/util/session.c n -= ret; n 726 tools/perf/util/session.c #define bswap_safe(f, n) \ n 728 tools/perf/util/session.c sizeof(attr->f) * (n))) n 57 tools/perf/util/sort.c int n; n 61 tools/perf/util/sort.c n = vsnprintf(bf, size, fmt, ap); n 62 tools/perf/util/sort.c if (symbol_conf.field_sep && n > 0) { n 74 tools/perf/util/sort.c if (n >= (int)size) n 76 tools/perf/util/sort.c return n; n 1659 tools/perf/util/sort.c #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } n 1684 tools/perf/util/sort.c #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } n 1702 tools/perf/util/sort.c #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } n 1724 tools/perf/util/sort.c #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } n 2785 tools/perf/util/sort.c char *n; n 2790 tools/perf/util/sort.c if (asprintf(&n, "%s,%s", pre, str) < 0) n 2794 tools/perf/util/sort.c return n; n 3127 tools/perf/util/sort.c static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, n 3132 tools/perf/util/sort.c for (i = 0; i < n; i++) n 3136 tools/perf/util/sort.c static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, n 3141 tools/perf/util/sort.c for (i = 0; i < n; i++) n 646 tools/perf/util/srcline.c struct rb_node *n = tree->rb_root.rb_node; n 648 tools/perf/util/srcline.c while (n) { n 649 tools/perf/util/srcline.c struct srcline_node *i = rb_entry(n, struct srcline_node, n 653 tools/perf/util/srcline.c n = n->rb_left; n 655 tools/perf/util/srcline.c n = n->rb_right; n 730 tools/perf/util/srcline.c struct rb_node *n = tree->rb_root.rb_node; n 732 tools/perf/util/srcline.c while (n) { n 733 tools/perf/util/srcline.c struct inline_node *i = rb_entry(n, struct inline_node, n 737 tools/perf/util/srcline.c n = n->rb_left; n 739 tools/perf/util/srcline.c n = n->rb_right; n 168 tools/perf/util/stat-display.c int n; n 181 tools/perf/util/stat-display.c n = fprintf(out, " # "); n 183 tools/perf/util/stat-display.c n += color_fprintf(out, color, fmt, val); n 185 tools/perf/util/stat-display.c n += fprintf(out, fmt, val); n 186 tools/perf/util/stat-display.c fprintf(out, " %-*s", METRIC_LEN - n - 1, unit); n 1063 tools/perf/util/stat-display.c int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5); n 1068 tools/perf/util/stat-display.c for (h = 0; h < n; h++) n 1086 tools/perf/util/stat-display.c int n; n 1121 tools/perf/util/stat-display.c sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 && n 1122 tools/perf/util/stat-display.c n > 0) n 420 tools/perf/util/stat-shadow.c return v->stats.n; n 736 tools/perf/util/stat-shadow.c char *n, *pn; n 761 tools/perf/util/stat-shadow.c n = strdup(metric_events[i]->name); n 762 tools/perf/util/stat-shadow.c if (!n) n 769 tools/perf/util/stat-shadow.c pn = strchr(n, ' '); n 774 tools/perf/util/stat-shadow.c expr__add_id(&pctx, n, metric_total); n 776 tools/perf/util/stat-shadow.c expr__add_id(&pctx, n, avg_stats(stats)*scale); n 22 tools/perf/util/stat.c stats->n++; n 24 tools/perf/util/stat.c stats->mean += delta / stats->n; n 59 tools/perf/util/stat.c if (stats->n < 2) n 62 tools/perf/util/stat.c variance = stats->M2 / (stats->n - 1); n 63 tools/perf/util/stat.c variance_mean = variance / stats->n; n 16 tools/perf/util/stat.h double n, mean, M2; n 138 tools/perf/util/stat.h stats->n = 0.0; n 87 tools/perf/util/strlist.h #define strlist__for_each_entry_safe(pos, n, slist) \ n 88 tools/perf/util/strlist.h for (pos = strlist__first(slist), n = strlist__next(pos); pos;\ n 89 tools/perf/util/strlist.h pos = n, n = strlist__next(n)) n 452 tools/perf/util/symbol-elf.c #define NOTE_ALIGN(n) (((n) + 3) & -4U) n 595 tools/perf/util/symbol-elf.c int n = namesz + descsz; n 597 tools/perf/util/symbol-elf.c if (n > (int)sizeof(bf)) { n 598 tools/perf/util/symbol-elf.c n = sizeof(bf); n 602 tools/perf/util/symbol-elf.c if (read(fd, bf, n) != n) n 1260 tools/perf/util/symbol-elf.c size_t n; n 1274 tools/perf/util/symbol-elf.c n = page_size; n 1275 tools/perf/util/symbol-elf.c if (len < n) n 1276 tools/perf/util/symbol-elf.c n = len; n 1278 tools/perf/util/symbol-elf.c r = read(from, buf, n); n 1283 tools/perf/util/symbol-elf.c n = r; n 1284 tools/perf/util/symbol-elf.c r = write(to, buf, n); n 1287 tools/perf/util/symbol-elf.c if ((size_t)r != n) n 1289 tools/perf/util/symbol-elf.c len -= n; n 119 tools/perf/util/symbol.c unsigned int n) n 121 tools/perf/util/symbol.c return strncmp(namea, nameb, n); n 358 tools/perf/util/symbol.c struct rb_node *n; n 363 tools/perf/util/symbol.c n = symbols->rb_root.rb_node; n 365 tools/perf/util/symbol.c while (n) { n 366 tools/perf/util/symbol.c struct symbol *s = rb_entry(n, struct symbol, rb_node); n 369 tools/perf/util/symbol.c n = n->rb_left; n 371 tools/perf/util/symbol.c n = n->rb_right; n 381 tools/perf/util/symbol.c struct rb_node *n = rb_first_cached(symbols); n 383 tools/perf/util/symbol.c if (n) n 384 tools/perf/util/symbol.c return rb_entry(n, struct symbol, rb_node); n 391 tools/perf/util/symbol.c struct rb_node *n = rb_last(&symbols->rb_root); n 393 tools/perf/util/symbol.c if (n) n 394 tools/perf/util/symbol.c return rb_entry(n, struct symbol, rb_node); n 401 tools/perf/util/symbol.c struct rb_node *n = rb_next(&sym->rb_node); n 403 tools/perf/util/symbol.c if (n) n 404 tools/perf/util/symbol.c return rb_entry(n, struct symbol, rb_node); n 464 tools/perf/util/symbol.c struct rb_node *n; n 470 tools/perf/util/symbol.c n = symbols->rb_root.rb_node; n 472 tools/perf/util/symbol.c while (n) { n 475 tools/perf/util/symbol.c s = rb_entry(n, struct symbol_name_rb_node, rb_node); n 479 tools/perf/util/symbol.c n = n->rb_left; n 481 tools/perf/util/symbol.c n = n->rb_right; n 486 tools/perf/util/symbol.c if (n == NULL) n 491 tools/perf/util/symbol.c for (n = rb_prev(n); n; n = rb_prev(n)) { n 494 tools/perf/util/symbol.c tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); n 550 tools/perf/util/symbol.c struct rb_node *n = rb_next(&s->rb_node); n 552 tools/perf/util/symbol.c return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; n 579 tools/perf/util/symbol.c size_t n; n 593 tools/perf/util/symbol.c line_len = getline(&line, &n, file); n 960 tools/perf/util/symbol.c struct rb_node *n = modules->rb_node; n 962 tools/perf/util/symbol.c while (n) { n 966 tools/perf/util/symbol.c m = rb_entry(n, struct module_info, rb_node); n 969 tools/perf/util/symbol.c n = n->rb_left; n 971 tools/perf/util/symbol.c n = n->rb_right; n 1470 tools/perf/util/symbol.c size_t n; n 1483 tools/perf/util/symbol.c line_len = getline(&line, &n, file); n 239 tools/perf/util/symbol.h unsigned int n); n 77 tools/perf/util/synthetic-events.c ssize_t n; n 91 tools/perf/util/synthetic-events.c n = read(fd, bf, sizeof(bf) - 1); n 93 tools/perf/util/synthetic-events.c if (n <= 0) { n 98 tools/perf/util/synthetic-events.c bf[n] = '\0'; n 317 tools/perf/util/synthetic-events.c ssize_t n; n 335 tools/perf/util/synthetic-events.c n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n", n 344 tools/perf/util/synthetic-events.c if (n < 7) n 730 tools/perf/util/synthetic-events.c int m, n, i, j; n 740 tools/perf/util/synthetic-events.c n = scandir(proc_path, &dirent, 0, alphasort); n 741 tools/perf/util/synthetic-events.c if (n < 0) n 752 tools/perf/util/synthetic-events.c dirent, base, n); n 755 tools/perf/util/synthetic-events.c if (thread_nr > n) n 756 tools/perf/util/synthetic-events.c thread_nr = n; n 766 tools/perf/util/synthetic-events.c num_per_thread = n / thread_nr; n 767 tools/perf/util/synthetic-events.c m = n % thread_nr; n 799 tools/perf/util/synthetic-events.c for (i = 0; i < n; i++) n 1407 tools/perf/util/synthetic-events.c size_t nr = 0, i = 0, sz, max_nr, n; n 1418 tools/perf/util/synthetic-events.c n = nr > max_nr ? max_nr : nr; n 1419 tools/perf/util/synthetic-events.c sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry); n 1426 tools/perf/util/synthetic-events.c ev->id_index.nr = n; n 1435 tools/perf/util/synthetic-events.c if (i >= n) { n 1439 tools/perf/util/synthetic-events.c nr -= n; n 93 tools/perf/util/trigger.h #define DEFINE_TRIGGER(n) \ n 94 tools/perf/util/trigger.h struct trigger n = {.state = TRIGGER_OFF, .name = #n} n 58 tools/perf/util/units.c int unit_number__scnprintf(char *buf, size_t size, u64 n) n 63 tools/perf/util/units.c while (((n / 1024) > 1) && (i < 3)) { n 64 tools/perf/util/units.c n /= 1024; n 68 tools/perf/util/units.c return scnprintf(buf, size, "%" PRIu64 "%c", n, unit[i]); n 16 tools/perf/util/units.h int unit_number__scnprintf(char *buf, size_t size, u64 n); n 237 tools/perf/util/util.c size_t n = 1; n 240 tools/perf/util/util.c ++n; n 242 tools/perf/util/util.c return n; n 376 tools/perf/util/util.c int n = readlink("/proc/self/exe", buf, len); n 377 tools/perf/util/util.c if (n > 0) { n 378 tools/perf/util/util.c buf[n] = 0; n 25 tools/perf/util/xyarray.c size_t n = xy->entries * xy->entry_size; n 27 tools/perf/util/xyarray.c memset(xy->contents, 0, n); n 15 tools/power/cpupower/utils/helpers/bitmask.c #define longsperbits(n) howmany(n, bitsperlong) n 24 tools/power/cpupower/utils/helpers/bitmask.c struct bitmask *bitmask_alloc(unsigned int n) n 31 tools/power/cpupower/utils/helpers/bitmask.c bmp->size = n; n 32 tools/power/cpupower/utils/helpers/bitmask.c bmp->maskp = calloc(longsperbits(n), sizeof(unsigned long)); n 62 tools/power/cpupower/utils/helpers/bitmask.c static unsigned int _getbit(const struct bitmask *bmp, unsigned int n) n 64 tools/power/cpupower/utils/helpers/bitmask.c if (n < bmp->size) n 65 tools/power/cpupower/utils/helpers/bitmask.c return (bmp->maskp[n/bitsperlong] >> (n % bitsperlong)) & 1; n 71 tools/power/cpupower/utils/helpers/bitmask.c static void _setbit(struct bitmask *bmp, unsigned int n, unsigned int v) n 73 tools/power/cpupower/utils/helpers/bitmask.c if (n < bmp->size) { n 75 tools/power/cpupower/utils/helpers/bitmask.c bmp->maskp[n/bitsperlong] |= 1UL << (n % bitsperlong); n 77 tools/power/cpupower/utils/helpers/bitmask.c bmp->maskp[n/bitsperlong] &= n 78 tools/power/cpupower/utils/helpers/bitmask.c ~(1UL << (n % bitsperlong)); n 175 tools/power/cpupower/utils/helpers/bitmask.c unsigned int n; n 176 tools/power/cpupower/utils/helpers/bitmask.c for (n = i; n < bmp->size; n++) n 177 tools/power/cpupower/utils/helpers/bitmask.c if (_getbit(bmp, n)) n 179 tools/power/cpupower/utils/helpers/bitmask.c return n; n 16 tools/power/cpupower/utils/helpers/bitmask.h struct bitmask *bitmask_alloc(unsigned int n); n 66 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c void print_n_spaces(int n) n 69 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c for (x = 0; x < n; x++) n 76 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c int fill_string_with_spaces(char *s, int n) n 81 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c if (len >= n) n 84 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c temp = malloc(sizeof(char) * (n+1)); n 85 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c for (; len < n; len++) n 88 tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c snprintf(temp, n+1, " %s", s); n 223 tools/testing/nvdimm/test/iomap.c resource_size_t n); n 243 tools/testing/nvdimm/test/iomap.c resource_size_t n) n 267 tools/testing/nvdimm/test/iomap.c WARN(!res || resource_size(res) != n, n 269 tools/testing/nvdimm/test/iomap.c __func__, start, n, res); n 280 tools/testing/nvdimm/test/iomap.c resource_size_t n, const char *name, int flags) n 290 tools/testing/nvdimm/test/iomap.c if (start + n > nfit_res->res.start n 293 tools/testing/nvdimm/test/iomap.c __func__, start, n, n 318 tools/testing/nvdimm/test/iomap.c res->end = start + n - 1; n 343 tools/testing/nvdimm/test/iomap.c return __devm_request_region(dev, parent, start, n, name); n 344 tools/testing/nvdimm/test/iomap.c return __request_region(parent, start, n, name, flags); n 348 tools/testing/nvdimm/test/iomap.c resource_size_t start, resource_size_t n, const char *name, n 351 tools/testing/nvdimm/test/iomap.c return nfit_test_request_region(NULL, parent, start, n, name, flags); n 373 tools/testing/nvdimm/test/iomap.c resource_size_t n, const char *name) n 377 tools/testing/nvdimm/test/iomap.c return nfit_test_request_region(dev, parent, start, n, name, 0); n 382 tools/testing/nvdimm/test/iomap.c resource_size_t n) n 384 tools/testing/nvdimm/test/iomap.c if (!nfit_test_release_region(NULL, parent, start, n)) n 385 tools/testing/nvdimm/test/iomap.c __release_region(parent, start, n); n 390 tools/testing/nvdimm/test/iomap.c resource_size_t start, resource_size_t n) n 392 tools/testing/nvdimm/test/iomap.c if (!nfit_test_release_region(dev, parent, start, n)) n 393 tools/testing/nvdimm/test/iomap.c __devm_release_region(dev, parent, start, n); n 1484 tools/testing/nvdimm/test/nfit.c struct nfit_test_resource *n, *nfit_res = NULL; n 1490 tools/testing/nvdimm/test/nfit.c list_for_each_entry(n, &t->resources, list) { n 1491 tools/testing/nvdimm/test/nfit.c if (addr >= n->res.start && (addr < n->res.start n 1492 tools/testing/nvdimm/test/nfit.c + resource_size(&n->res))) { n 1493 tools/testing/nvdimm/test/nfit.c nfit_res = n; n 1495 tools/testing/nvdimm/test/nfit.c } else if (addr >= (unsigned long) n->buf n 1496 tools/testing/nvdimm/test/nfit.c && (addr < (unsigned long) n->buf n 1497 tools/testing/nvdimm/test/nfit.c + resource_size(&n->res))) { n 1498 tools/testing/nvdimm/test/nfit.c nfit_res = n; n 53 tools/testing/scatterlist/linux/mm.h #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) n 53 tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c struct hlist_node n; n 198 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c char n[16]; n 58 tools/testing/selftests/bpf/test_btf_dump.c int test_btf_dump_case(int n, struct btf_dump_test_case *test_case) n 65 tools/testing/selftests/bpf/test_btf_dump.c fprintf(stderr, "Test case #%d (%s): ", n, test_case->name); n 32 tools/testing/selftests/bpf/test_hashmap.c static inline size_t next_pow_2(size_t n) n 36 tools/testing/selftests/bpf/test_hashmap.c while (r < n) n 46 tools/testing/selftests/bpf/test_lpm_map.c size_t n; n 48 tools/testing/selftests/bpf/test_lpm_map.c n = (n_bits + 7) / 8; n 53 tools/testing/selftests/bpf/test_lpm_map.c memcpy(node->key, key, n); n 59 tools/testing/selftests/bpf/test_lpm_map.c node = malloc(sizeof(*node) + n); n 64 tools/testing/selftests/bpf/test_lpm_map.c memcpy(node->key, key, n); n 1438 tools/testing/selftests/bpf/test_maps.c unsigned int n) n 1457 tools/testing/selftests/bpf/test_maps.c for (i = 0; i < n; i++) { n 42 tools/testing/selftests/breakpoints/breakpoint_test.c static void set_breakpoint_addr(void *addr, int n) n 47 tools/testing/selftests/breakpoints/breakpoint_test.c offsetof(struct user, u_debugreg[n]), addr); n 53 tools/testing/selftests/breakpoints/breakpoint_test.c static void toggle_breakpoint(int n, int type, int len, n 92 tools/testing/selftests/breakpoints/breakpoint_test.c vdr7 <<= 4 * n; n 95 tools/testing/selftests/breakpoints/breakpoint_test.c vdr7 |= 1 << (2 * n); n 99 tools/testing/selftests/breakpoints/breakpoint_test.c vdr7 |= 2 << (2 * n); n 66 tools/testing/selftests/ia64/aliasing-test.c int i, n, r, rc = 0, result = 0; n 69 tools/testing/selftests/ia64/aliasing-test.c n = scandir(path, &namelist, 0, alphasort); n 70 tools/testing/selftests/ia64/aliasing-test.c if (n < 0) { n 75 tools/testing/selftests/ia64/aliasing-test.c for (i = 0; i < n; i++) { n 151 tools/testing/selftests/ia64/aliasing-test.c int i, n, r, rc = 0, result = 0; n 154 tools/testing/selftests/ia64/aliasing-test.c n = scandir(path, &namelist, 0, alphasort); n 155 tools/testing/selftests/ia64/aliasing-test.c if (n < 0) { n 160 tools/testing/selftests/ia64/aliasing-test.c for (i = 0; i < n; i++) { n 98 tools/testing/selftests/ir/ir_loopback.c int rlircfd, wlircfd, protocolfd, i, n; n 133 tools/testing/selftests/ir/ir_loopback.c for (n = 0; n < TEST_SCANCODES; n++) { n 240 tools/testing/selftests/kvm/include/x86_64/processor.h static inline void set_xmm(int n, unsigned long val) n 242 tools/testing/selftests/kvm/include/x86_64/processor.h switch (n) { n 271 tools/testing/selftests/kvm/include/x86_64/processor.h static inline unsigned long get_xmm(int n) n 273 tools/testing/selftests/kvm/include/x86_64/processor.h assert(n >= 0 && n <= 7); n 283 tools/testing/selftests/kvm/include/x86_64/processor.h switch (n) { n 30 tools/testing/selftests/kvm/lib/assert.c size_t n = 20; n 31 tools/testing/selftests/kvm/lib/assert.c void *stack[n]; n 36 tools/testing/selftests/kvm/lib/assert.c n * (((sizeof(void *)) * 2) + 1) + n 41 tools/testing/selftests/kvm/lib/assert.c n = backtrace(stack, n); n 49 tools/testing/selftests/kvm/lib/assert.c for (i = 2; i < n; i++) n 23 tools/testing/selftests/kvm/lib/kvm_util_internal.h #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) n 1377 tools/testing/selftests/kvm/lib/sparsebit.c sparsebit_num_t n; n 1403 tools/testing/selftests/kvm/lib/sparsebit.c for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--) n 1408 tools/testing/selftests/kvm/lib/sparsebit.c middle_end = middle_start + (n & -MASK_BITS) - 1; n 1409 tools/testing/selftests/kvm/lib/sparsebit.c if (n >= MASK_BITS) { n 1444 tools/testing/selftests/kvm/lib/sparsebit.c n -= middle_end - middle_start + 1; n 1447 tools/testing/selftests/kvm/lib/sparsebit.c assert(n < MASK_BITS); n 1448 tools/testing/selftests/kvm/lib/sparsebit.c for (; n > 0; idx++, n--) n 1459 tools/testing/selftests/kvm/lib/sparsebit.c sparsebit_num_t n; n 1466 tools/testing/selftests/kvm/lib/sparsebit.c for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--) n 1471 tools/testing/selftests/kvm/lib/sparsebit.c middle_end = middle_start + (n & -MASK_BITS) - 1; n 1472 tools/testing/selftests/kvm/lib/sparsebit.c if (n >= MASK_BITS) { n 1513 tools/testing/selftests/kvm/lib/sparsebit.c n -= middle_end - middle_start + 1; n 1516 tools/testing/selftests/kvm/lib/sparsebit.c assert(n < MASK_BITS); n 1517 tools/testing/selftests/kvm/lib/sparsebit.c for (; n > 0; idx++, n--) n 1504 tools/testing/selftests/net/nettest.c int i, n = 0, olen = len + 1; n 1515 tools/testing/selftests/net/nettest.c i = snprintf(m + n, olen - n, "%.26s", n 1517 tools/testing/selftests/net/nettest.c n += i; n 1520 tools/testing/selftests/net/nettest.c i = snprintf(m + n, olen - n, "%.*s", len, n 354 tools/testing/selftests/net/psock_tpacket.c static inline void *get_next_frame(struct ring *ring, int n) n 361 tools/testing/selftests/net/psock_tpacket.c return ring->rd[n].iov_base; n 363 tools/testing/selftests/net/psock_tpacket.c return f0 + (n * ring->req3.tp_frame_size); n 92 tools/testing/selftests/pidfd/pidfd_open_test.c size_t n = 0; n 102 tools/testing/selftests/pidfd/pidfd_open_test.c while (getline(&line, &n, f) != -1) { n 73 tools/testing/selftests/powerpc/alignment/alignment_handler.c #define XFORM(reg, n) " " #reg " ,%"#n",%2 ;" n 74 tools/testing/selftests/powerpc/alignment/alignment_handler.c #define DFORM(reg, n) " " #reg " ,0(%"#n") ;" n 152 tools/testing/selftests/powerpc/alignment/alignment_handler.c void dumpdata(char *s1, char *s2, int n, char *test_name) n 158 tools/testing/selftests/powerpc/alignment/alignment_handler.c for (i = 0; i < n; i++) n 162 tools/testing/selftests/powerpc/alignment/alignment_handler.c for (i = 0; i < n; i++) n 167 tools/testing/selftests/powerpc/alignment/alignment_handler.c int test_memcmp(void *s1, void *s2, int n, int offset, char *test_name) n 176 tools/testing/selftests/powerpc/alignment/alignment_handler.c if (memcmp(s1c, s2c, n)) { n 179 tools/testing/selftests/powerpc/alignment/alignment_handler.c offset, n); n 180 tools/testing/selftests/powerpc/alignment/alignment_handler.c dumpdata(s1c, s2c, n, test_name); n 25 tools/testing/selftests/powerpc/mm/segv_errors.c static void segv_handler(int n, siginfo_t *info, void *ctxt_v) n 51 tools/testing/selftests/powerpc/mm/wild_bctr.c #define POISONED_REG(n) ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \ n 52 tools/testing/selftests/powerpc/mm/wild_bctr.c (((unsigned long)REG_POISON) << 16) | (n)) n 56 tools/testing/selftests/powerpc/mm/wild_bctr.c #define POISON_REG(n) \ n 57 tools/testing/selftests/powerpc/mm/wild_bctr.c "lis " __stringify(n) "," __stringify(REG_POISON) ";" \ n 58 tools/testing/selftests/powerpc/mm/wild_bctr.c "addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";" \ n 59 tools/testing/selftests/powerpc/mm/wild_bctr.c "sldi " __stringify(n) "," __stringify(n) ", 32 ;" \ n 60 tools/testing/selftests/powerpc/mm/wild_bctr.c "oris " __stringify(n) "," __stringify(n) "," __stringify(REG_POISON) ";" \ n 61 tools/testing/selftests/powerpc/mm/wild_bctr.c "addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";" n 77 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_GPR(n, base) std n,GPR0+8*(n)(base) n 78 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_GPR(n, base) ld n,GPR0+8*(n)(base) n 82 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base) n 83 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_GPR(n, base) lwz n,GPR0+4*(n)(base) n 88 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) n 89 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) n 90 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) n 91 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base) n 92 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base) n 93 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base) n 94 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) n 95 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) n 97 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) n 98 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) n 99 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) n 100 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) n 101 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) n 102 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) n 103 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base) n 104 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) n 105 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) n 106 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) n 107 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) n 108 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) n 110 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b n 111 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) n 112 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) n 113 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) n 114 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) n 115 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) n 116 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_VR(n,b,base) li b,16*(n); lvx n,base,b n 117 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) n 118 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) n 119 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) n 120 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) n 121 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) n 124 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base) n 125 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base) n 127 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \ n 128 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h STXVD2X(n,b,base); \ n 129 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h XXSWAPD(n,n) n 131 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \ n 132 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h XXSWAPD(n,n) n 135 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b) n 136 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) n 137 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) n 138 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) n 139 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) n 140 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) n 141 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b) n 142 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) n 143 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) n 144 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) n 145 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) n 146 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) n 152 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b) n 153 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o) n 154 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o) n 155 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o) n 156 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o) n 157 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o) n 158 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n n 159 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o) n 160 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o) n 161 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o) n 162 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o) n 163 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o) n 179 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) n 180 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define VCPU_GPR(n) __VCPU_GPR(__REG_##n) n 243 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define _ENTRY(n) \ n 244 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h .globl n; \ n 245 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h n: n 247 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define _GLOBAL(n) \ n 248 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h .stabs __stringify(n:F-1),N_FUN,0,0,n;\ n 249 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h .globl n; \ n 250 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h n: n 102 tools/testing/selftests/powerpc/ptrace/ptrace.h int n) n 110 tools/testing/selftests/powerpc/ptrace/ptrace.h iov.iov_len = n * sizeof(unsigned long); n 122 tools/testing/selftests/powerpc/ptrace/ptrace.h int n) n 130 tools/testing/selftests/powerpc/ptrace/ptrace.h iov.iov_len = n * sizeof(unsigned long); n 29 tools/testing/selftests/powerpc/stringloops/memcmp.c int test_memcmp(const void *s1, const void *s2, size_t n); n 33 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h #define __this_cpu_sub(pcp, n) __this_cpu_add(pcp, -(typeof(pcp)) (n)) n 37 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h #define this_cpu_sub(pcp, n) this_cpu_add(pcp, -(typeof(pcp)) (n)) n 66 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h #define __this_cpu_add(pcp, n) \ n 70 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h (typeof(pcp)) (n)); \ n 73 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h #define this_cpu_add(pcp, n) \ n 77 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h (typeof(pcp)) (n)); \ n 84 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h #define __WORK_INITIALIZER(n, f) { \ n 86 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h .entry = { &(n).entry, &(n).entry }, \ n 91 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ n 92 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h .work = __WORK_INITIALIZER((n).work, (f)), \ n 95 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h #define DECLARE_WORK(n, f) \ n 96 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h struct workqueue_struct n = __WORK_INITIALIZER n 98 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h #define DECLARE_DELAYED_WORK(n, f) \ n 99 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) n 66 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_ASM(n) \ n 67 tools/testing/selftests/rseq/param_test.c "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \ n 84 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_ASM(n) \ n 85 tools/testing/selftests/rseq/param_test.c "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \ n 109 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_ASM(n) \ n 110 tools/testing/selftests/rseq/param_test.c "l %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ n 133 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_ASM(n) \ n 134 tools/testing/selftests/rseq/param_test.c "ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ n 154 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_ASM(n) \ n 155 tools/testing/selftests/rseq/param_test.c " ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n" \ n 177 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_ASM(n) \ n 178 tools/testing/selftests/rseq/param_test.c "lwz %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ n 201 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_ASM(n) \ n 202 tools/testing/selftests/rseq/param_test.c "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ n 216 tools/testing/selftests/rseq/param_test.c #define RSEQ_INJECT_C(n) \ n 218 tools/testing/selftests/rseq/param_test.c int loc_i, loc_nr_loops = loop_cnt[n]; \ n 27 tools/testing/selftests/rseq/rseq.h #define RSEQ_INJECT_ASM(n) n 31 tools/testing/selftests/rseq/rseq.h #define RSEQ_INJECT_C(n) n 112 tools/testing/selftests/sync/sync_stress_consumer.c int n = test_data_mpsc.threads; n 116 tools/testing/selftests/sync/sync_stress_consumer.c for (i = 1; i < n; i++) { n 140 tools/testing/selftests/sync/sync_stress_consumer.c ASSERT(test_data_mpsc.counter == n * it, n 156 tools/testing/selftests/sync/sync_stress_consumer.c int n = 5; n 158 tools/testing/selftests/sync/sync_stress_consumer.c int producer_timelines[n]; n 160 tools/testing/selftests/sync/sync_stress_consumer.c pthread_t threads[n]; n 163 tools/testing/selftests/sync/sync_stress_consumer.c for (i = 0; i < n; i++) n 169 tools/testing/selftests/sync/sync_stress_consumer.c test_data_mpsc.threads = n; n 173 tools/testing/selftests/sync/sync_stress_consumer.c for (i = 0; i < n; i++) { n 181 tools/testing/selftests/sync/sync_stress_consumer.c for (i = 0; i < n; i++) n 111 tools/testing/selftests/timers/freq-step.c static void regress(struct sample *samples, int n, double *intercept, n 119 tools/testing/selftests/timers/freq-step.c for (i = 0; i < n; i++) { n 129 tools/testing/selftests/timers/freq-step.c *slope = (xy_sum - x_sum * y_sum / n) / (x2_sum - x_sum * x_sum / n); n 130 tools/testing/selftests/timers/freq-step.c *intercept = (y_sum - *slope * x_sum) / n; n 134 tools/testing/selftests/timers/freq-step.c for (i = 0; i < n; i++) { n 143 tools/testing/selftests/timers/freq-step.c *r_stddev = sqrt(r2_sum / n); n 66 tools/testing/selftests/vDSO/vdso_standalone_test_x86.c void to_base10(char *lastdig, time_t n) n 68 tools/testing/selftests/vDSO/vdso_standalone_test_x86.c while (n) { n 69 tools/testing/selftests/vDSO/vdso_standalone_test_x86.c *lastdig = (n % 10) + '0'; n 70 tools/testing/selftests/vDSO/vdso_standalone_test_x86.c n /= 10; n 275 tools/testing/selftests/vm/userfaultfd.c static int my_bcmp(char *str1, char *str2, size_t n) n 278 tools/testing/selftests/vm/userfaultfd.c for (i = 0; i < n; i++) n 152 tools/testing/vsock/vsock_diag_test.c int n = 0; n 155 tools/testing/vsock/vsock_diag_test.c n++; n 157 tools/testing/vsock/vsock_diag_test.c if (n != expected) { n 159 tools/testing/vsock/vsock_diag_test.c expected, n); n 540 tools/testing/vsock/vsock_diag_test.c unsigned long int n; n 543 tools/testing/vsock/vsock_diag_test.c n = strtoul(str, &endptr, 10); n 548 tools/testing/vsock/vsock_diag_test.c return n; n 283 tools/thermal/tmon/sysfs.c int i, j, n, k = 0; n 298 tools/thermal/tmon/sysfs.c n = scandir(tz_name, &namelist, 0, alphasort); n 299 tools/thermal/tmon/sysfs.c if (n < 0) n 308 tools/thermal/tmon/sysfs.c while (n--) { n 311 tools/thermal/tmon/sysfs.c if (find_tzone_tp(tz_name, namelist[n]->d_name, n 314 tools/thermal/tmon/sysfs.c temp_str = strstr(namelist[n]->d_name, "cdev"); n 316 tools/thermal/tmon/sysfs.c free(namelist[n]); n 319 tools/thermal/tmon/sysfs.c if (!find_tzone_cdev(namelist[n], tz_name, n 322 tools/thermal/tmon/sysfs.c free(namelist[n]); n 341 tools/thermal/tmon/sysfs.c int i, n, k = 0; n 360 tools/thermal/tmon/sysfs.c n = scandir(cdev_name, &namelist, 0, alphasort); n 361 tools/thermal/tmon/sysfs.c if (n < 0) n 370 tools/thermal/tmon/sysfs.c while (n--) n 371 tools/thermal/tmon/sysfs.c free(namelist[n]); n 385 tools/thermal/tmon/sysfs.c int n; n 392 tools/thermal/tmon/sysfs.c n = scandir(THERMAL_SYSFS, &namelist, 0, alphasort); n 393 tools/thermal/tmon/sysfs.c if (n < 0) n 397 tools/thermal/tmon/sysfs.c while (n--) { n 400 tools/thermal/tmon/sysfs.c if (strstr(namelist[n]->d_name, CDEV)) { n 401 tools/thermal/tmon/sysfs.c inst = get_instance_id(namelist[n]->d_name, 1, n 410 tools/thermal/tmon/sysfs.c namelist[n]->d_name, n 414 tools/thermal/tmon/sysfs.c } else if (strstr(namelist[n]->d_name, TZONE)) { n 415 tools/thermal/tmon/sysfs.c inst = get_instance_id(namelist[n]->d_name, 1, n 421 tools/thermal/tmon/sysfs.c namelist[n]->d_name, n 426 tools/thermal/tmon/sysfs.c free(namelist[n]); n 208 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c void init_bufs(struct io_buffer *iobuf, unsigned n, unsigned len) n 211 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c iobuf->buf = malloc(n*sizeof(*iobuf->buf)); n 212 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c iobuf->iocb = malloc(n*sizeof(*iobuf->iocb)); n 213 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c iobuf->cnt = n; n 216 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c for (i = 0; i < n; ++i) { n 220 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c iobuf->cnt = n; n 601 tools/usb/ffs-test.c size_t n; n 605 tools/usb/ffs-test.c for (n = nbytes / sizeof *event; n; --n, ++event) n 120 tools/usb/usbip/libsrc/list.h #define list_for_each_safe(pos, n, head) \ n 121 tools/usb/usbip/libsrc/list.h for (pos = (head)->next, n = pos->next; pos != (head); \ n 122 tools/usb/usbip/libsrc/list.h pos = n, n = pos->next) n 475 tools/usb/usbip/libsrc/names.c int names_init(char *n) n 479 tools/usb/usbip/libsrc/names.c f = fopen(n, "r"); n 23 tools/usb/usbip/libsrc/names.h extern int names_init(char *n); n 160 tools/usb/usbip/libsrc/vhci_driver.c int n; n 166 tools/usb/usbip/libsrc/vhci_driver.c n = scandir(udev_device_get_syspath(platform), &namelist, vhci_hcd_filter, NULL); n 167 tools/usb/usbip/libsrc/vhci_driver.c if (n < 0) n 170 tools/usb/usbip/libsrc/vhci_driver.c for (int i = 0; i < n; i++) n 175 tools/usb/usbip/libsrc/vhci_driver.c return n; n 59 tools/virtio/linux/kernel.h static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp) n 61 tools/virtio/linux/kernel.h return kmalloc(n * s, gfp); n 31 tools/virtio/linux/uaccess.h unsigned long n) n 33 tools/virtio/linux/uaccess.h while (n--) n 38 tools/virtio/linux/uaccess.h unsigned long n) n 40 tools/virtio/linux/uaccess.h __chk_user_ptr(from, n); n 41 tools/virtio/linux/uaccess.h volatile_memcpy(to, from, n); n 46 tools/virtio/linux/uaccess.h unsigned long n) n 48 tools/virtio/linux/uaccess.h __chk_user_ptr(to, n); n 49 tools/virtio/linux/uaccess.h volatile_memcpy(to, from, n); n 43 tools/virtio/ringtest/ptr_ring.c static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) n 45 tools/virtio/ringtest/ptr_ring.c if (size != 0 && n > SIZE_MAX / size) n 47 tools/virtio/ringtest/ptr_ring.c return kmalloc(n * size, flags); n 50 tools/virtio/ringtest/ptr_ring.c static inline void *kcalloc(size_t n, size_t size, gfp_t flags) n 52 tools/virtio/ringtest/ptr_ring.c return kmalloc_array(n, size, flags | __GFP_ZERO); n 349 tools/vm/page-types.c size_t i, n; n 351 tools/vm/page-types.c for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) { n 355 tools/vm/page-types.c n += snprintf(buf + n, sizeof(buf) - n, "%s,", n 358 tools/vm/page-types.c if (n) n 359 tools/vm/page-types.c n--; n 360 tools/vm/page-types.c buf[n] = '\0'; n 876 tools/vm/page-types.c unsigned long long n; n 878 tools/vm/page-types.c n = strtoll(str, NULL, 0); n 880 tools/vm/page-types.c if (n == 0 && str[0] != '0') n 883 tools/vm/page-types.c return n; n 910 tools/vm/page-types.c int n; n 912 tools/vm/page-types.c n = sscanf(buf, "%lx-%lx %c%c%c%c %llx %x:%x %lu", n 919 tools/vm/page-types.c if (n < 10) { n 203 tools/vm/slabinfo.c static void set_obj(struct slabinfo *s, const char *name, int n) n 213 tools/vm/slabinfo.c fprintf(f, "%d\n", n); n 244 tools/vm/slabinfo.c int n; n 260 tools/vm/slabinfo.c n = sprintf(buffer, "%ld",value); n 262 tools/vm/slabinfo.c buffer[n] = trailer; n 263 tools/vm/slabinfo.c n++; n 264 tools/vm/slabinfo.c buffer[n] = 0; n 267 tools/vm/slabinfo.c memmove(buffer + n - 2, buffer + n - 3, 4); n 268 tools/vm/slabinfo.c buffer[n-2] = '.'; n 269 tools/vm/slabinfo.c n++; n 271 tools/vm/slabinfo.c return n; n 1145 virt/kvm/arm/arm.c unsigned n; n 1158 virt/kvm/arm/arm.c n = reg_list.n; n 1159 virt/kvm/arm/arm.c reg_list.n = kvm_arm_num_regs(vcpu); n 1163 virt/kvm/arm/arm.c if (n < reg_list.n) n 113 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) n 115 virt/kvm/arm/hyp/vgic-v3-sr.c switch (n) { n 131 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) n 133 virt/kvm/arm/hyp/vgic-v3-sr.c switch (n) { n 149 virt/kvm/arm/hyp/vgic-v3-sr.c static u32 __hyp_text __vgic_v3_read_ap0rn(int n) n 153 virt/kvm/arm/hyp/vgic-v3-sr.c switch (n) { n 173 virt/kvm/arm/hyp/vgic-v3-sr.c static u32 __hyp_text __vgic_v3_read_ap1rn(int n) n 177 virt/kvm/arm/hyp/vgic-v3-sr.c switch (n) { n 843 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n) n 848 virt/kvm/arm/hyp/vgic-v3-sr.c val = __vgic_v3_read_ap0rn(n); n 850 virt/kvm/arm/hyp/vgic-v3-sr.c val = __vgic_v3_read_ap1rn(n); n 855 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n) n 860 virt/kvm/arm/hyp/vgic-v3-sr.c __vgic_v3_write_ap0rn(val, n); n 862 virt/kvm/arm/hyp/vgic-v3-sr.c __vgic_v3_write_ap1rn(val, n); n 354 virt/kvm/arm/vgic/vgic-mmio-v2.c int n; /* which APRn is this */ n 356 virt/kvm/arm/vgic/vgic-mmio-v2.c n = (addr >> 2) & 0x3; n 360 virt/kvm/arm/vgic/vgic-mmio-v2.c if (n != 0) n 366 virt/kvm/arm/vgic/vgic-mmio-v2.c if (n > vgic_v3_max_apr_idx(vcpu)) n 369 virt/kvm/arm/vgic/vgic-mmio-v2.c n = array_index_nospec(n, 4); n 372 virt/kvm/arm/vgic/vgic-mmio-v2.c return vgicv3->vgic_ap1r[n]; n 380 virt/kvm/arm/vgic/vgic-mmio-v2.c int n; /* which APRn is this */ n 382 virt/kvm/arm/vgic/vgic-mmio-v2.c n = (addr >> 2) & 0x3; n 386 virt/kvm/arm/vgic/vgic-mmio-v2.c if (n != 0) n 392 virt/kvm/arm/vgic/vgic-mmio-v2.c if (n > vgic_v3_max_apr_idx(vcpu)) n 395 virt/kvm/arm/vgic/vgic-mmio-v2.c n = array_index_nospec(n, 4); n 398 virt/kvm/arm/vgic/vgic-mmio-v2.c vgicv3->vgic_ap1r[n] = val; n 27 virt/kvm/irqchip.c int n = 0; n 33 virt/kvm/irqchip.c entries[n] = *e; n 34 virt/kvm/irqchip.c ++n; n 38 virt/kvm/irqchip.c return n; n 109 virt/kvm/irqchip.c struct hlist_node *n; n 111 virt/kvm/irqchip.c hlist_for_each_entry_safe(e, n, &rt->map[i], link) { n 1181 virt/kvm/kvm_main.c unsigned long n; n 1194 virt/kvm/kvm_main.c n = kvm_dirty_bitmap_bytes(memslot); n 1196 virt/kvm/kvm_main.c for (i = 0; !any && i < n/sizeof(long); ++i) n 1199 virt/kvm/kvm_main.c if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) n 1237 virt/kvm/kvm_main.c unsigned long n; n 1253 virt/kvm/kvm_main.c n = kvm_dirty_bitmap_bytes(memslot); n 1267 virt/kvm/kvm_main.c memset(dirty_bitmap_buffer, 0, n); n 1270 virt/kvm/kvm_main.c for (i = 0; i < n / sizeof(long); i++) { n 1288 virt/kvm/kvm_main.c if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) n 1308 virt/kvm/kvm_main.c unsigned long i, n; n 1327 virt/kvm/kvm_main.c n = ALIGN(log->num_pages, BITS_PER_LONG) / 8; n 1336 virt/kvm/kvm_main.c if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) n 1341 virt/kvm/kvm_main.c n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;