c 59 Documentation/usb/usbdevfs-drop-permissions.c int c, fd; c 90 Documentation/usb/usbdevfs-drop-permissions.c while (scanf("%d", &c) == 1) { c 91 Documentation/usb/usbdevfs-drop-permissions.c switch (c) { c 69 arch/alpha/boot/misc.c # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} c 70 arch/alpha/boot/misc.c # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} c 76 arch/alpha/boot/misc.c # define Tracec(c,x) c 77 arch/alpha/boot/misc.c # define Tracecv(c,x) c 125 arch/alpha/boot/misc.c ulg c = crc; c 133 arch/alpha/boot/misc.c c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); c 135 arch/alpha/boot/misc.c crc = c; c 28 arch/alpha/boot/stdio.c int i, c; c 30 arch/alpha/boot/stdio.c for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) c 31 arch/alpha/boot/stdio.c i = i*10 + c - '0'; c 45 arch/alpha/boot/stdio.c char c,sign,tmp[66]; c 55 arch/alpha/boot/stdio.c c = (type & ZEROPAD) ? '0' : ' '; c 100 arch/alpha/boot/stdio.c *str++ = c; c 219 arch/alpha/include/asm/atomic.h int c, new, old; c 232 arch/alpha/include/asm/atomic.h : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) c 251 arch/alpha/include/asm/atomic.h s64 c, new, old; c 264 arch/alpha/include/asm/atomic.h : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) c 149 arch/alpha/include/asm/core_lca.h #define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c))) c 196 arch/alpha/include/asm/core_lca.h struct el_common * c; c 518 arch/alpha/include/asm/io.h static inline void memset_io(volatile void __iomem *addr, u8 c, long len) c 520 arch/alpha/include/asm/io.h _memset_c_io(addr, 0x0101010101010101UL * c, len); c 524 arch/alpha/include/asm/io.h static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) c 526 arch/alpha/include/asm/io.h _memset_c_io(addr, 0x0001000100010001UL * c, len); c 70 arch/alpha/include/asm/local.h long c, old; \ c 71 arch/alpha/include/asm/local.h c = local_read(l); \ c 73 arch/alpha/include/asm/local.h if (unlikely(c == (u))) \ c 75 arch/alpha/include/asm/local.h old = local_cmpxchg((l), c, c + (a)); \ c 76 arch/alpha/include/asm/local.h if (likely(old == c)) \ c 78 arch/alpha/include/asm/local.h c = old; \ c 80 arch/alpha/include/asm/local.h c != (u); \ c 220 arch/alpha/include/asm/mmu_context.h # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) c 224 arch/alpha/include/asm/mmu_context.h # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) c 227 arch/alpha/include/asm/mmu_context.h # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) c 33 arch/alpha/include/asm/string.h extern inline void *__memset(void *s, int c, size_t n) c 35 arch/alpha/include/asm/string.h if (__builtin_constant_p(c)) { c 37 arch/alpha/include/asm/string.h return __builtin_memset(s, c, n); c 39 arch/alpha/include/asm/string.h unsigned long c8 = (c & 0xff) * 0x0101010101010101UL; c 43 arch/alpha/include/asm/string.h return ___memset(s, c, n); c 33 arch/alpha/include/asm/vga.h static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) c 36 arch/alpha/include/asm/vga.h memsetw_io((u16 __iomem *) s, c, count); c 38 arch/alpha/include/asm/vga.h memset16(s, c, count / 2); c 22 arch/alpha/include/asm/word-at-a-time.h static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) c 29 arch/alpha/include/asm/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) c 397 arch/alpha/kernel/core_lca.c el.c = (struct el_common *) la_ptr; c 402 arch/alpha/kernel/core_lca.c vector, get_irq_regs()->pc, (unsigned int) el.c->code); c 411 arch/alpha/kernel/core_lca.c switch ((unsigned int) el.c->code) { c 429 arch/alpha/kernel/core_lca.c switch (el.c->size) { c 433 arch/alpha/kernel/core_lca.c reason, el.c->retry ? ", retryable" : "", c 445 arch/alpha/kernel/core_lca.c reason, el.c->retry ? ", retryable" : ""); c 459 arch/alpha/kernel/core_lca.c printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size); c 467 arch/alpha/kernel/core_lca.c for (i = 0; i < el.c->size / sizeof(long); i += 2) { c 25 arch/alpha/kernel/err_impl.h #define SUBPACKET_ANNOTATION(c, t, r, d, a) {NULL, (c), (t), (r), (d), (a)} c 32 arch/alpha/kernel/err_impl.h #define SUBPACKET_HANDLER_INIT(c, h) {NULL, (c), (h)} c 529 arch/alpha/kernel/io.c void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) c 533 arch/alpha/kernel/io.c __raw_writeb(c, to); c 540 arch/alpha/kernel/io.c __raw_writew(c, to); c 547 arch/alpha/kernel/io.c __raw_writel(c, to); c 557 arch/alpha/kernel/io.c __raw_writeq(c, to); c 566 arch/alpha/kernel/io.c __raw_writel(c, to); c 573 arch/alpha/kernel/io.c __raw_writew(c, to); c 580 arch/alpha/kernel/io.c __raw_writeb(c, to); c 40 arch/alpha/kernel/srmcons.c unsigned long c :61; c 56 arch/alpha/kernel/srmcons.c tty_insert_flip_char(port, (char)result.bits.c, 0); c 95 arch/alpha/kernel/srmcons.c long c, remaining = count; c 106 arch/alpha/kernel/srmcons.c for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++) c 107 arch/alpha/kernel/srmcons.c if (cur[c] == '\n') c 110 arch/alpha/kernel/srmcons.c while (c > 0) { c 111 arch/alpha/kernel/srmcons.c result.as_long = callback_puts(0, cur, c); c 112 arch/alpha/kernel/srmcons.c c -= result.bits.c; c 113 arch/alpha/kernel/srmcons.c remaining -= result.bits.c; c 114 arch/alpha/kernel/srmcons.c cur += result.bits.c; c 125 arch/alpha/kernel/srmcons.c if (result.bits.c > 0) c 204 arch/arc/include/asm/io.h #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) c 205 arch/arc/include/asm/io.h #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) c 206 arch/arc/include/asm/io.h #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) c 211 arch/arc/include/asm/io.h #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) c 212 arch/arc/include/asm/io.h #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) c 213 arch/arc/include/asm/io.h #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) c 227 arch/arc/include/asm/io.h #define readb_relaxed(c) __raw_readb(c) c 228 arch/arc/include/asm/io.h #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ c 229 arch/arc/include/asm/io.h __raw_readw(c)); __r; }) c 230 arch/arc/include/asm/io.h #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ c 231 arch/arc/include/asm/io.h __raw_readl(c)); __r; }) c 233 arch/arc/include/asm/io.h #define writeb_relaxed(v,c) __raw_writeb(v,c) c 234 arch/arc/include/asm/io.h #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) c 235 arch/arc/include/asm/io.h #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) c 41 arch/arc/include/asm/perf_event.h unsigned int m:8, c:8, r:5, i:1, s:2, v:8; c 43 arch/arc/include/asm/perf_event.h unsigned int v:8, s:2, i:1, r:5, c:8, m:8; c 49 arch/arc/include/asm/perf_event.h unsigned int c:16, r:8, v:8; c 51 arch/arc/include/asm/perf_event.h unsigned int v:8, r:8, c:16; c 29 arch/arc/include/asm/string.h extern char *strchr(const char *s, int c); c 151 arch/arc/include/asm/unwind.h #define unwind_add_table(a, b, c) c 126 arch/arc/kernel/mcip.c unsigned int cpu, c; c 147 arch/arc/kernel/mcip.c c = __ffs(cpu); /* 0,1,2,3 */ c 148 arch/arc/kernel/mcip.c __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); c 149 arch/arc/kernel/mcip.c cpu &= ~(1U << c); c 583 arch/arc/kernel/perf_event.c if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS)) c 594 arch/arc/kernel/perf_event.c arc_pmu->n_events = cc_bcr.c; c 601 arch/arc/kernel/perf_event.c arc_pmu->n_counters = pct_bcr.c; c 607 arch/arc/kernel/perf_event.c arc_pmu->n_counters, counter_size, cc_bcr.c, c 615 arch/arc/kernel/perf_event.c for (i = 0; i < cc_bcr.c; i++) { c 613 arch/arc/kernel/setup.c #define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c))) c 38 arch/arc/mm/cache.c char *arc_cache_mumbojumbo(int c, char *buf, int len) c 54 arch/arc/mm/cache.c PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); c 55 arch/arc/mm/cache.c PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); c 57 arch/arc/mm/cache.c p = &cpuinfo_arc700[c].slc; c 90 arch/arc/mm/cache.c unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; c 92 arch/arc/mm/cache.c unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; c 113 arch/arc/mm/cache.c if (cbcr.c) { c 20 arch/arm/boot/compressed/decompress.c # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} c 21 arch/arm/boot/compressed/decompress.c # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} c 27 arch/arm/boot/compressed/decompress.c # define Tracec(c,x) c 28 arch/arm/boot/compressed/decompress.c # define Tracecv(c,x) c 89 arch/arm/boot/compressed/misc.c char c; c 91 arch/arm/boot/compressed/misc.c while ((c = *ptr++) != '\0') { c 92 arch/arm/boot/compressed/misc.c if (c == '\n') c 94 arch/arm/boot/compressed/misc.c putc(c); c 106 arch/arm/boot/compressed/string.c void *memchr(const void *s, int c, size_t count) c 111 arch/arm/boot/compressed/string.c if ((unsigned char)c == *p++) c 116 arch/arm/boot/compressed/string.c char *strchr(const char *s, int c) c 118 arch/arm/boot/compressed/string.c while (*s != (char)c) c 124 arch/arm/boot/compressed/string.c char *strrchr(const char *s, int c) c 128 arch/arm/boot/compressed/string.c if (*s == (char)c) c 136 arch/arm/boot/compressed/string.c void *memset(void *s, int c, size_t count) c 140 arch/arm/boot/compressed/string.c *xs++ = c; c 85 arch/arm/common/mcpm_entry.c struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; c 88 arch/arm/common/mcpm_entry.c c->cluster = CLUSTER_GOING_DOWN; c 89 arch/arm/common/mcpm_entry.c sync_cache_w(&c->cluster); c 92 arch/arm/common/mcpm_entry.c sync_cache_r(&c->inbound); c 93 arch/arm/common/mcpm_entry.c if (c->inbound == INBOUND_COMING_UP) c 103 arch/arm/common/mcpm_entry.c sync_cache_r(&c->cpus); c 111 arch/arm/common/mcpm_entry.c cpustate = c->cpus[i].cpu; c 116 arch/arm/common/mcpm_entry.c sync_cache_r(&c->cpus[i].cpu); c 30 arch/arm/common/sharpsl_param.c #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) c 283 arch/arm/include/asm/arch_gicv3.h #define gic_write_irouter(v, c) __gic_writeq_nonatomic(v, c) c 288 arch/arm/include/asm/arch_gicv3.h #define gic_read_typer(c) __gic_readq_nonatomic(c) c 293 arch/arm/include/asm/arch_gicv3.h #define gits_read_baser(c) __gic_readq_nonatomic(c) c 294 arch/arm/include/asm/arch_gicv3.h #define gits_write_baser(v, c) __gic_writeq_nonatomic(v, c) c 300 arch/arm/include/asm/arch_gicv3.h #define gicr_read_propbaser(c) __gic_readq_nonatomic(c) c 301 arch/arm/include/asm/arch_gicv3.h #define gicr_write_propbaser(v, c) __gic_writeq_nonatomic(v, c) c 302 arch/arm/include/asm/arch_gicv3.h #define gicr_read_pendbaser(c) __gic_readq_nonatomic(c) c 303 arch/arm/include/asm/arch_gicv3.h #define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c) c 308 arch/arm/include/asm/arch_gicv3.h #define gic_read_lpir(c) readl_relaxed(c) c 309 arch/arm/include/asm/arch_gicv3.h #define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c) c 314 arch/arm/include/asm/arch_gicv3.h #define gits_read_typer(c) __gic_readq_nonatomic(c) c 319 arch/arm/include/asm/arch_gicv3.h #define gits_read_cbaser(c) __gic_readq_nonatomic(c) c 320 arch/arm/include/asm/arch_gicv3.h #define gits_write_cbaser(v, c) __gic_writeq_nonatomic(v, c) c 325 arch/arm/include/asm/arch_gicv3.h #define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) c 330 arch/arm/include/asm/arch_gicv3.h #define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c) c 353 arch/arm/include/asm/arch_gicv3.h #define gits_read_vpendbaser(c) __gic_readq_nonatomic(c) c 192 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo c 449 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo c 58 arch/arm/include/asm/cp15.h #define __read_sysreg(r, w, c, t) ({ \ c 60 arch/arm/include/asm/cp15.h asm volatile(r " " c : "=r" (__val)); \ c 65 arch/arm/include/asm/cp15.h #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) c 27 arch/arm/include/asm/dcc.h static inline void __dcc_putchar(char c) c 31 arch/arm/include/asm/dcc.h : "r" (c)); c 135 arch/arm/include/asm/glue-cache.h unsigned long b, unsigned int c) { } c 291 arch/arm/include/asm/io.h #define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) c 292 arch/arm/include/asm/io.h #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ c 293 arch/arm/include/asm/io.h __raw_readw(c)); __r; }) c 294 arch/arm/include/asm/io.h #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ c 295 arch/arm/include/asm/io.h __raw_readl(c)); __r; }) c 297 arch/arm/include/asm/io.h #define writeb_relaxed(v,c) __raw_writeb(v,c) c 298 arch/arm/include/asm/io.h #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) c 299 arch/arm/include/asm/io.h #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) c 301 arch/arm/include/asm/io.h #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) c 302 arch/arm/include/asm/io.h #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) c 303 arch/arm/include/asm/io.h #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) c 305 arch/arm/include/asm/io.h #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) c 306 arch/arm/include/asm/io.h #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) c 307 arch/arm/include/asm/io.h #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) c 318 arch/arm/include/asm/io.h static inline void memset_io(volatile void __iomem *dst, unsigned c, c 322 arch/arm/include/asm/io.h mmioset((void __force *)dst, c, count); c 324 arch/arm/include/asm/io.h #define memset_io(dst,c,count) memset_io(dst,c,count) c 343 arch/arm/include/asm/io.h #define memset_io(c,v,l) _memset_io(c,(v),(l)) c 344 arch/arm/include/asm/io.h #define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l)) c 345 arch/arm/include/asm/io.h #define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l)) c 61 arch/arm/include/asm/mach/map.h #define vm_reserve_area_early(a,s,c) do { } while (0) c 11 arch/arm/include/asm/string.h extern char * strrchr(const char * s, int c); c 14 arch/arm/include/asm/string.h extern char * strchr(const char * s, int c); c 13 arch/arm/include/asm/timex.h #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) c 20 arch/arm/include/asm/word-at-a-time.h const struct word_at_a_time *c) c 22 arch/arm/include/asm/word-at-a-time.h unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; c 27 arch/arm/include/asm/word-at-a-time.h #define prep_zero_mask(a, bits, c) (bits) c 3 arch/arm/include/debug/uncompress.h extern void putc(int c); c 5 arch/arm/include/debug/uncompress.h static inline void putc(int c) {} c 50 arch/arm/kernel/hw_breakpoint.c ARM_DBG_READ(c0, c ## M, OP2, VAL); \ c 55 arch/arm/kernel/hw_breakpoint.c ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \ c 77 arch/arm/kernel/io.c void _memset_io(volatile void __iomem *dst, int c, size_t count) c 81 arch/arm/kernel/io.c writeb(c, dst); c 160 arch/arm/kernel/setup.c static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; c 141 arch/arm/kernel/vdso.c char name[MAX_SYMNAME], *c; c 147 arch/arm/kernel/vdso.c c = strchr(name, '@'); c 148 arch/arm/kernel/vdso.c if (c) c 149 arch/arm/kernel/vdso.c *c = 0; c 464 arch/arm/mach-davinci/board-da850-evm.c unsigned ngpio, void *c) c 523 arch/arm/mach-davinci/board-da850-evm.c unsigned gpio, unsigned ngpio, void *c) c 673 arch/arm/mach-davinci/board-da850-evm.c void *c) c 704 arch/arm/mach-davinci/board-da850-evm.c unsigned gpio, unsigned ngpio, void *c) c 341 arch/arm/mach-davinci/board-dm644x-evm.c evm_led_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c) c 368 arch/arm/mach-davinci/board-dm644x-evm.c evm_led_teardown(struct i2c_client *client, int gpio, unsigned ngpio, void *c) c 400 arch/arm/mach-davinci/board-dm644x-evm.c evm_u18_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c) c 430 arch/arm/mach-davinci/board-dm644x-evm.c evm_u18_teardown(struct i2c_client *client, int gpio, unsigned ngpio, void *c) c 454 arch/arm/mach-davinci/board-dm644x-evm.c evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c) c 490 arch/arm/mach-davinci/board-dm644x-evm.c evm_u35_teardown(struct i2c_client *client, int gpio, unsigned ngpio, void *c) c 220 arch/arm/mach-davinci/board-dm646x-evm.c unsigned int ngpio, void *c) c 244 arch/arm/mach-davinci/board-dm646x-evm.c unsigned ngpio, void *c) c 256 arch/arm/mach-davinci/board-dm646x-evm.c unsigned ngpio, void *c) c 295 arch/arm/mach-davinci/board-dm646x-evm.c unsigned ngpio, void *c) c 310 arch/arm/mach-davinci/board-dm646x-evm.c unsigned int ngpio, void *c) c 317 arch/arm/mach-davinci/board-dm646x-evm.c status = evm_sw_setup(client, gpio, 4, c); c 321 arch/arm/mach-davinci/board-dm646x-evm.c return evm_led_setup(client, gpio+4, 4, c); c 325 arch/arm/mach-davinci/board-dm646x-evm.c unsigned int ngpio, void *c) c 329 arch/arm/mach-davinci/board-dm646x-evm.c evm_sw_teardown(client, gpio, 4, c); c 330 arch/arm/mach-davinci/board-dm646x-evm.c evm_led_teardown(client, gpio+4, 4, c); c 33 arch/arm/mach-davinci/include/mach/uncompress.h static inline void putc(char c) c 40 arch/arm/mach-davinci/include/mach/uncompress.h uart[UART_TX] = c; c 14 arch/arm/mach-dove/include/mach/uncompress.h static void putc(const char c) c 24 arch/arm/mach-dove/include/mach/uncompress.h *UART_THR = c; c 15 arch/arm/mach-ebsa110/include/mach/uncompress.h static inline void putc(int c) c 24 arch/arm/mach-ebsa110/include/mach/uncompress.h base[UART_TX << 2] = c; c 35 arch/arm/mach-ep93xx/include/mach/uncompress.h static inline void putc(int c) c 45 arch/arm/mach-ep93xx/include/mach/uncompress.h __raw_writeb(c, PHYS_UART_DATA); c 63 arch/arm/mach-ep93xx/timer-ep93xx.c u64 ep93xx_clocksource_read(struct clocksource *c) c 52 arch/arm/mach-footbridge/dc21285-timer.c struct clock_event_device *c) c 61 arch/arm/mach-footbridge/dc21285-timer.c static int ckevt_dc21285_shutdown(struct clock_event_device *c) c 67 arch/arm/mach-footbridge/dc21285-timer.c static int ckevt_dc21285_set_periodic(struct clock_event_device *c) c 15 arch/arm/mach-footbridge/include/mach/uncompress.h static inline void putc(char c) c 20 arch/arm/mach-footbridge/include/mach/uncompress.h SER0_BASE[0] = c; c 23 arch/arm/mach-footbridge/include/mach/uncompress.h DC21285_BASE[0] = c; c 25 arch/arm/mach-highbank/sysregs.h #define SREG_CPU_PWR_CTRL(c) (0x200 + ((c) * 4)) c 14 arch/arm/mach-iop32x/include/mach/uncompress.h static inline void putc(char c) c 18 arch/arm/mach-iop32x/include/mach/uncompress.h uart_base[UART_TX] = c; c 225 arch/arm/mach-ixp4xx/include/mach/io.h #define memset_io(c,v,l) _memset_io((c),(v),(l)) c 226 arch/arm/mach-ixp4xx/include/mach/io.h #define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l)) c 227 arch/arm/mach-ixp4xx/include/mach/io.h #define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l)) c 374 arch/arm/mach-ixp4xx/include/mach/io.h #define ioread8_rep(p, v, c) ioread8_rep(p, v, c) c 402 arch/arm/mach-ixp4xx/include/mach/io.h #define ioread16_rep(p, v, c) ioread16_rep(p, v, c) c 432 arch/arm/mach-ixp4xx/include/mach/io.h #define ioread32_rep(p, v, c) ioread32_rep(p, v, c) c 461 arch/arm/mach-ixp4xx/include/mach/io.h #define iowrite8_rep(p, v, c) iowrite8_rep(p, v, c) c 490 arch/arm/mach-ixp4xx/include/mach/io.h #define iowrite16_rep(p, v, c) iowrite16_rep(p, v, c) c 519 arch/arm/mach-ixp4xx/include/mach/io.h #define iowrite32_rep(p, v, c) iowrite32_rep(p, v, c) c 20 arch/arm/mach-ixp4xx/include/mach/uncompress.h static inline void putc(int c) c 27 arch/arm/mach-ixp4xx/include/mach/uncompress.h *uart_base = c; c 23 arch/arm/mach-meson/platsmp.c #define MESON_SMP_SRAM_CPU_CTRL_ADDR_REG(c) (0x04 + ((c - 1) << 2)) c 29 arch/arm/mach-meson/platsmp.c #define MESON_CPU_PWR_A9_CNTL0_M(c) (0x03 << ((c * 2) + 16)) c 30 arch/arm/mach-meson/platsmp.c #define MESON_CPU_PWR_A9_CNTL1_M(c) (0x03 << ((c + 1) << 1)) c 31 arch/arm/mach-meson/platsmp.c #define MESON_CPU_PWR_A9_MEM_PD0_M(c) (0x0f << (32 - (c * 4))) c 32 arch/arm/mach-meson/platsmp.c #define MESON_CPU_PWR_A9_CNTL1_ST(c) (0x01 << (c + 16)) c 68 arch/arm/mach-mmp/time.c struct clock_event_device *c = dev_id; c 80 arch/arm/mach-mmp/time.c c->event_handler(c); c 849 arch/arm/mach-omap1/clock.c struct clk *c; c 854 arch/arm/mach-omap1/clock.c list_for_each_entry(c, &clocks, node) { c 855 arch/arm/mach-omap1/clock.c if (!strcmp(c->name, name)) { c 856 arch/arm/mach-omap1/clock.c ret = c; c 868 arch/arm/mach-omap1/clock.c struct clk *c; c 873 arch/arm/mach-omap1/clock.c list_for_each_entry(c, &clocks, node) c 874 arch/arm/mach-omap1/clock.c if (c->ops->allow_idle) c 875 arch/arm/mach-omap1/clock.c c->ops->allow_idle(c); c 884 arch/arm/mach-omap1/clock.c struct clk *c; c 889 arch/arm/mach-omap1/clock.c list_for_each_entry(c, &clocks, node) c 890 arch/arm/mach-omap1/clock.c if (c->ops->deny_idle) c 891 arch/arm/mach-omap1/clock.c c->ops->deny_idle(c); c 970 arch/arm/mach-omap1/clock.c struct clk *c; c 977 arch/arm/mach-omap1/clock.c list_for_each_entry(c, &clocks, node) { c 978 arch/arm/mach-omap1/clock.c pa = c->parent; c 980 arch/arm/mach-omap1/clock.c c->name, pa ? pa->name : "none", c->rate, c 981 arch/arm/mach-omap1/clock.c c->usecount); c 990 arch/arm/mach-omap1/clock.c static void clk_debugfs_register_one(struct clk *c) c 993 arch/arm/mach-omap1/clock.c struct clk *pa = c->parent; c 995 arch/arm/mach-omap1/clock.c d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root); c 996 arch/arm/mach-omap1/clock.c c->dent = d; c 998 arch/arm/mach-omap1/clock.c debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount); c 999 arch/arm/mach-omap1/clock.c debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate); c 1000 arch/arm/mach-omap1/clock.c debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags); c 1003 arch/arm/mach-omap1/clock.c static void clk_debugfs_register(struct clk *c) c 1005 arch/arm/mach-omap1/clock.c struct clk *pa = c->parent; c 1010 arch/arm/mach-omap1/clock.c if (!c->dent) c 1011 arch/arm/mach-omap1/clock.c clk_debugfs_register_one(c); c 1016 arch/arm/mach-omap1/clock.c struct clk *c; c 1022 arch/arm/mach-omap1/clock.c list_for_each_entry(c, &clocks, node) c 1023 arch/arm/mach-omap1/clock.c clk_debugfs_register(c); c 765 arch/arm/mach-omap1/clock_data.c struct omap_clk *c; c 786 arch/arm/mach-omap1/clock_data.c for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) c 787 arch/arm/mach-omap1/clock_data.c clk_preinit(c->lk.clk); c 801 arch/arm/mach-omap1/clock_data.c for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) c 802 arch/arm/mach-omap1/clock_data.c if (c->cpu & cpu_mask) { c 803 arch/arm/mach-omap1/clock_data.c clkdev_add(&c->lk); c 804 arch/arm/mach-omap1/clock_data.c clk_register(c->lk.clk); c 48 arch/arm/mach-omap1/include/mach/uncompress.h static inline void putc(int c) c 59 arch/arm/mach-omap1/include/mach/uncompress.h uart_base[UART_TX << uart_shift] = c; c 198 arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c struct clk *c; c 200 arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c c = clk_get(NULL, "sys_ck"); c 201 arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c if (IS_ERR(c)) { c 204 arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c sys_ck_rate = clk_get_rate(c); c 205 arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c clk_put(c); c 395 arch/arm/mach-omap2/clockdomain.c struct clockdomain **c = NULL; c 403 arch/arm/mach-omap2/clockdomain.c for (c = cs; *c; c++) c 404 arch/arm/mach-omap2/clockdomain.c _clkdm_register(*c); c 184 arch/arm/mach-omap2/clockdomain.h int clkdm_register_clkdms(struct clockdomain **c); c 43 arch/arm/mach-omap2/cm2xxx.c static void _write_clktrctrl(u8 c, s16 module, u32 mask) c 49 arch/arm/mach-omap2/cm2xxx.c v |= c << __ffs(mask); c 129 arch/arm/mach-omap2/cm33xx.c static void _clktrctrl_write(u8 c, u16 inst, u16 cdoffs) c 135 arch/arm/mach-omap2/cm33xx.c v |= c << AM33XX_CLKTRCTRL_SHIFT; c 32 arch/arm/mach-omap2/cm3xxx.c static void _write_clktrctrl(u8 c, s16 module, u32 mask) c 38 arch/arm/mach-omap2/cm3xxx.c v |= c << __ffs(mask); c 180 arch/arm/mach-omap2/cminst44xx.c static void _clktrctrl_write(u8 c, u8 part, u16 inst, u16 cdoffs) c 186 arch/arm/mach-omap2/cminst44xx.c v |= c << OMAP4430_CLKTRCTRL_SHIFT; c 379 arch/arm/mach-omap2/display.c int c = 0; c 407 arch/arm/mach-omap2/display.c MAX_MODULE_SOFTRESET_WAIT, c); c 409 arch/arm/mach-omap2/display.c if (c == MAX_MODULE_SOFTRESET_WAIT) c 418 arch/arm/mach-omap2/display.c r = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0; c 40 arch/arm/mach-omap2/hdq1w.c int c = 0; c 54 arch/arm/mach-omap2/hdq1w.c MAX_MODULE_SOFTRESET_WAIT, c); c 56 arch/arm/mach-omap2/hdq1w.c if (c == MAX_MODULE_SOFTRESET_WAIT) c 61 arch/arm/mach-omap2/hdq1w.c oh->name, c); c 29 arch/arm/mach-omap2/hsmmc.c static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, c 38 arch/arm/mach-omap2/hsmmc.c snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", c->mmc, 1); c 40 arch/arm/mach-omap2/hsmmc.c mmc->caps = c->caps; c 48 arch/arm/mach-omap2/hsmmc.c void omap_hsmmc_late_init(struct omap2_hsmmc_info *c) c 58 arch/arm/mach-omap2/hsmmc.c for (; c->mmc; c++) { c 59 arch/arm/mach-omap2/hsmmc.c pdev = c->pdev; c 40 arch/arm/mach-omap2/i2c.c int c = 0; c 64 arch/arm/mach-omap2/i2c.c MAX_MODULE_SOFTRESET_WAIT, c); c 66 arch/arm/mach-omap2/i2c.c if (c == MAX_MODULE_SOFTRESET_WAIT) c 71 arch/arm/mach-omap2/i2c.c oh->name, c); c 48 arch/arm/mach-omap2/msdi.c int c = 0; c 61 arch/arm/mach-omap2/msdi.c MAX_MODULE_SOFTRESET_WAIT, c); c 63 arch/arm/mach-omap2/msdi.c if (c == MAX_MODULE_SOFTRESET_WAIT) c 68 arch/arm/mach-omap2/msdi.c oh->name, c); c 307 arch/arm/mach-omap2/omap-smp.c static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) c 344 arch/arm/mach-omap2/omap-smp.c if (!needs_reset || !c->cpu1_rstctrl_va) c 350 arch/arm/mach-omap2/omap-smp.c writel_relaxed(1, c->cpu1_rstctrl_va); c 351 arch/arm/mach-omap2/omap-smp.c readl_relaxed(c->cpu1_rstctrl_va); c 352 arch/arm/mach-omap2/omap-smp.c writel_relaxed(0, c->cpu1_rstctrl_va); c 357 arch/arm/mach-omap2/omap-smp.c const struct omap_smp_config *c = NULL; c 360 arch/arm/mach-omap2/omap-smp.c c = &omap443x_cfg; c 362 arch/arm/mach-omap2/omap-smp.c c = &omap446x_cfg; c 364 arch/arm/mach-omap2/omap-smp.c c = &omap5_cfg; c 366 arch/arm/mach-omap2/omap-smp.c if (!c) { c 372 arch/arm/mach-omap2/omap-smp.c cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; c 373 arch/arm/mach-omap2/omap-smp.c cfg.startup_addr = c->startup_addr; c 495 arch/arm/mach-omap2/omap_hwmod.c int c = 0; c 502 arch/arm/mach-omap2/omap_hwmod.c MAX_MODULE_SOFTRESET_WAIT, c); c 507 arch/arm/mach-omap2/omap_hwmod.c MAX_MODULE_SOFTRESET_WAIT, c); c 510 arch/arm/mach-omap2/omap_hwmod.c return c; c 904 arch/arm/mach-omap2/omap_hwmod.c struct clk *c; c 911 arch/arm/mach-omap2/omap_hwmod.c c = clk_get(NULL, os->clk); c 912 arch/arm/mach-omap2/omap_hwmod.c if (IS_ERR(c)) { c 918 arch/arm/mach-omap2/omap_hwmod.c os->_clk = c; c 943 arch/arm/mach-omap2/omap_hwmod.c struct clk *c; c 948 arch/arm/mach-omap2/omap_hwmod.c c = clk_get(NULL, oc->clk); c 949 arch/arm/mach-omap2/omap_hwmod.c if (IS_ERR(c)) { c 955 arch/arm/mach-omap2/omap_hwmod.c oc->_clk = c; c 1726 arch/arm/mach-omap2/omap_hwmod.c int c = 0; c 1756 arch/arm/mach-omap2/omap_hwmod.c c = _wait_softreset_complete(oh); c 1757 arch/arm/mach-omap2/omap_hwmod.c if (c == MAX_MODULE_SOFTRESET_WAIT) { c 1763 arch/arm/mach-omap2/omap_hwmod.c pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); c 3814 arch/arm/mach-omap2/omap_hwmod.c struct clk *c; c 3826 arch/arm/mach-omap2/omap_hwmod.c c = oh->_clk; c 3831 arch/arm/mach-omap2/omap_hwmod.c c = oi->_clk; c 3834 arch/arm/mach-omap2/omap_hwmod.c clk = to_clk_hw_omap(__clk_get_hw(c)); c 134 arch/arm/mach-omap2/pm34xx.c int c; c 136 arch/arm/mach-omap2/pm34xx.c c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK | c 139 arch/arm/mach-omap2/pm34xx.c return c ? IRQ_HANDLED : IRQ_NONE; c 144 arch/arm/mach-omap2/pm34xx.c int c; c 151 arch/arm/mach-omap2/pm34xx.c c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK | c 153 arch/arm/mach-omap2/pm34xx.c c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0); c 154 arch/arm/mach-omap2/pm34xx.c c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0); c 156 arch/arm/mach-omap2/pm34xx.c c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0); c 157 arch/arm/mach-omap2/pm34xx.c c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0); c 160 arch/arm/mach-omap2/pm34xx.c return c ? IRQ_HANDLED : IRQ_NONE; c 546 arch/arm/mach-omap2/powerdomains3xxx_data.c u32 c = 0; c 552 arch/arm/mach-omap2/powerdomains3xxx_data.c (c++ < PWRDM_TRANSITION_BAILOUT)) c 555 arch/arm/mach-omap2/powerdomains3xxx_data.c if (c > PWRDM_TRANSITION_BAILOUT) { c 561 arch/arm/mach-omap2/powerdomains3xxx_data.c pr_debug("powerdomain: completed transition in %d loops\n", c); c 86 arch/arm/mach-omap2/prm2xxx_3xxx.c int c; c 102 arch/arm/mach-omap2/prm2xxx_3xxx.c MAX_MODULE_HARDRESET_WAIT, c); c 104 arch/arm/mach-omap2/prm2xxx_3xxx.c return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; c 170 arch/arm/mach-omap2/prm2xxx_3xxx.c u32 c = 0; c 181 arch/arm/mach-omap2/prm2xxx_3xxx.c (c++ < PWRDM_TRANSITION_BAILOUT)) c 184 arch/arm/mach-omap2/prm2xxx_3xxx.c if (c > PWRDM_TRANSITION_BAILOUT) { c 190 arch/arm/mach-omap2/prm2xxx_3xxx.c pr_debug("powerdomain: completed transition in %d loops\n", c); c 126 arch/arm/mach-omap2/prm33xx.c int c; c 144 arch/arm/mach-omap2/prm33xx.c MAX_MODULE_HARDRESET_WAIT, c); c 146 arch/arm/mach-omap2/prm33xx.c return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; c 297 arch/arm/mach-omap2/prm33xx.c u32 c = 0; c 308 arch/arm/mach-omap2/prm33xx.c (c++ < PWRDM_TRANSITION_BAILOUT)) c 311 arch/arm/mach-omap2/prm33xx.c if (c > PWRDM_TRANSITION_BAILOUT) { c 317 arch/arm/mach-omap2/prm33xx.c pr_debug("powerdomain: completed transition in %d loops\n", c); c 235 arch/arm/mach-omap2/prm3xxx.c int c = 0; c 256 arch/arm/mach-omap2/prm3xxx.c c++; c 262 arch/arm/mach-omap2/prm3xxx.c return c; c 641 arch/arm/mach-omap2/prm44xx.c u32 c = 0; c 654 arch/arm/mach-omap2/prm44xx.c (c++ < PWRDM_TRANSITION_BAILOUT)) c 657 arch/arm/mach-omap2/prm44xx.c if (c > PWRDM_TRANSITION_BAILOUT) { c 663 arch/arm/mach-omap2/prm44xx.c pr_debug("powerdomain: completed transition in %d loops\n", c); c 155 arch/arm/mach-omap2/prminst44xx.c int c; c 172 arch/arm/mach-omap2/prminst44xx.c MAX_MODULE_HARDRESET_WAIT, c); c 174 arch/arm/mach-omap2/prminst44xx.c return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; c 240 arch/arm/mach-omap2/vc.c struct omap3_vc_timings *c = vc.timings; c 253 arch/arm/mach-omap2/vc.c voltsetup2 = c->voltsetup2; c 255 arch/arm/mach-omap2/vc.c voltsetup1 = c->voltsetup1; c 259 arch/arm/mach-omap2/vc.c c++; c 263 arch/arm/mach-omap2/vc.c voltsetup1 = c->voltsetup1; c 272 arch/arm/mach-omap2/vc.c vd->write(c->voltsetup1, c 277 arch/arm/mach-omap2/vc.c vd->write(c->voltsetup2, c 331 arch/arm/mach-omap2/vc.c struct omap3_vc_timings *c, u32 idle) c 338 arch/arm/mach-omap2/vc.c c->voltsetup1 &= ~voltdm->vfsm->voltsetup_mask; c 339 arch/arm/mach-omap2/vc.c c->voltsetup1 |= val; c 360 arch/arm/mach-omap2/vc.c struct omap3_vc_timings *c = vc.timings; c 363 arch/arm/mach-omap2/vc.c omap3_init_voltsetup1(voltdm, c, voltdm->vc_param->off); c 364 arch/arm/mach-omap2/vc.c c++; c 366 arch/arm/mach-omap2/vc.c omap3_init_voltsetup1(voltdm, c, voltdm->vc_param->ret); c 386 arch/arm/mach-omap2/vc.c struct omap3_vc_timings *c = vc.timings; c 389 arch/arm/mach-omap2/vc.c if (c->voltsetup2) c 408 arch/arm/mach-omap2/vc.c c->voltsetup2 = clksetup - voltoffset; c 77 arch/arm/mach-omap2/wd_timer.c int c = 0; c 86 arch/arm/mach-omap2/wd_timer.c MAX_MODULE_SOFTRESET_WAIT, c); c 91 arch/arm/mach-omap2/wd_timer.c if (c == MAX_MODULE_SOFTRESET_WAIT) c 96 arch/arm/mach-omap2/wd_timer.c oh->name, c); c 98 arch/arm/mach-omap2/wd_timer.c return (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : c 36 arch/arm/mach-pxa/include/mach/uncompress.h static inline void putc(char c) c 44 arch/arm/mach-pxa/include/mach/uncompress.h uart_write(c, UART_TX); c 52 arch/arm/mach-pxa/mfp-pxa2xx.c static int __mfp_config_gpio(unsigned gpio, unsigned long c) c 58 arch/arm/mach-pxa/mfp-pxa2xx.c int fn = MFP_AF(c); c 59 arch/arm/mach-pxa/mfp-pxa2xx.c int is_out = (c & MFP_DIR_OUT) ? 1 : 0; c 79 arch/arm/mach-pxa/mfp-pxa2xx.c switch (c & MFP_LPM_STATE_MASK) { c 106 arch/arm/mach-pxa/mfp-pxa2xx.c if ((c & MFP_LPM_CAN_WAKEUP) && !gpio_desc[gpio].can_wakeup) { c 111 arch/arm/mach-pxa/mfp-pxa2xx.c if ((c & MFP_LPM_CAN_WAKEUP) && is_out) { c 134 arch/arm/mach-pxa/mfp-pxa2xx.c unsigned long *c; c 137 arch/arm/mach-pxa/mfp-pxa2xx.c for (i = 0, c = mfp_cfgs; i < num; i++, c++) { c 139 arch/arm/mach-pxa/mfp-pxa2xx.c gpio = __mfp_validate(MFP_PIN(*c)); c 145 arch/arm/mach-pxa/mfp-pxa2xx.c gpio_desc[gpio].config = *c; c 146 arch/arm/mach-pxa/mfp-pxa2xx.c __mfp_config_gpio(gpio, *c); c 154 arch/arm/mach-pxa/mfp-pxa2xx.c unsigned long flags, c; c 163 arch/arm/mach-pxa/mfp-pxa2xx.c c = gpio_desc[gpio].config; c 164 arch/arm/mach-pxa/mfp-pxa2xx.c c = (c & ~MFP_LPM_STATE_MASK) | lpm; c 165 arch/arm/mach-pxa/mfp-pxa2xx.c __mfp_config_gpio(gpio, c); c 173 arch/arm/mach-pxa/mfp-pxa2xx.c unsigned long c, mux_taken; c 179 arch/arm/mach-pxa/mfp-pxa2xx.c c = d->config; c 200 arch/arm/mach-pxa/mfp-pxa2xx.c if (d->can_wakeup && (c & MFP_LPM_CAN_WAKEUP)) { c 204 arch/arm/mach-pxa/mfp-pxa2xx.c if (c & MFP_LPM_EDGE_RISE) c 209 arch/arm/mach-pxa/mfp-pxa2xx.c if (c & MFP_LPM_EDGE_FALL) c 702 arch/arm/mach-pxa/mioa701.c static void mioa701_restart(enum reboot_mode c, const char *cmd) c 76 arch/arm/mach-rpc/include/mach/uncompress.h static inline void putc(int c) c 78 arch/arm/mach-rpc/include/mach/uncompress.h extern void ll_write_char(char *, char c, char white); c 85 arch/arm/mach-rpc/include/mach/uncompress.h if (c == '\n') { c 88 arch/arm/mach-rpc/include/mach/uncompress.h } else if (c == '\r') { c 92 arch/arm/mach-rpc/include/mach/uncompress.h ll_write_char(ptr, c, white); c 23 arch/arm/mach-sa1100/include/mach/uncompress.h static inline void putc(int c) c 42 arch/arm/mach-sa1100/include/mach/uncompress.h UART(UTDR) = c; c 23 arch/arm/mach-spear/include/mach/uncompress.h static inline void putc(int c) c 30 arch/arm/mach-spear/include/mach/uncompress.h writel_relaxed(c, base + UART01x_DR); c 38 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_CTRL_REG0(c) (0x10 * (c)) c 43 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_CTRL_REG1(c) (0x10 * (c) + 0x4) c 45 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_STATUS(c) (0x30 + 0x4 * (c)) c 48 arch/arm/mach-sunxi/mc_smp.c #define CPUCFG_CX_RST_CTRL(c) (0x80 + 0x4 * (c)) c 60 arch/arm/mach-sunxi/mc_smp.c #define PRCM_CPU_PO_RST_CTRL(c) (0x4 + 0x4 * (c)) c 63 arch/arm/mach-sunxi/mc_smp.c #define PRCM_PWROFF_GATING_REG(c) (0x100 + 0x4 * (c)) c 68 arch/arm/mach-sunxi/mc_smp.c #define PRCM_PWR_SWITCH_REG(c, cpu) (0x140 + 0x10 * (c) + 0x4 * (cpu)) c 72 arch/arm/mach-sunxi/mc_smp.c #define R_CPUCFG_CLUSTER_PO_RST_CTRL(c) (0x30 + (c) * 0x4) c 169 arch/arm/plat-pxa/mfp.c unsigned long tmp, c = *mfp_cfgs; c 173 arch/arm/plat-pxa/mfp.c pin = MFP_PIN(c); c 177 arch/arm/plat-pxa/mfp.c af = MFP_AF(c); c 178 arch/arm/plat-pxa/mfp.c drv = MFP_DS(c); c 179 arch/arm/plat-pxa/mfp.c lpm = MFP_LPM_STATE(c); c 180 arch/arm/plat-pxa/mfp.c edge = MFP_LPM_EDGE(c); c 181 arch/arm/plat-pxa/mfp.c pull = MFP_PULL(c); c 197 arch/arm/plat-pxa/mfp.c p->config = c; __mfp_config_run(p); c 56 arch/arm/plat-samsung/adc.c void (*select_cb)(struct s3c_adc_client *c, unsigned selected); c 57 arch/arm/plat-samsung/adc.c void (*convert_cb)(struct s3c_adc_client *c, c 122 arch/arm64/include/asm/arch_gicv3.h #define gic_read_typer(c) readq_relaxed(c) c 123 arch/arm64/include/asm/arch_gicv3.h #define gic_write_irouter(v, c) writeq_relaxed(v, c) c 124 arch/arm64/include/asm/arch_gicv3.h #define gic_read_lpir(c) readq_relaxed(c) c 125 arch/arm64/include/asm/arch_gicv3.h #define gic_write_lpir(v, c) writeq_relaxed(v, c) c 129 arch/arm64/include/asm/arch_gicv3.h #define gits_read_baser(c) readq_relaxed(c) c 130 arch/arm64/include/asm/arch_gicv3.h #define gits_write_baser(v, c) writeq_relaxed(v, c) c 132 arch/arm64/include/asm/arch_gicv3.h #define gits_read_cbaser(c) readq_relaxed(c) c 133 arch/arm64/include/asm/arch_gicv3.h #define gits_write_cbaser(v, c) writeq_relaxed(v, c) c 135 arch/arm64/include/asm/arch_gicv3.h #define gits_write_cwriter(v, c) writeq_relaxed(v, c) c 137 arch/arm64/include/asm/arch_gicv3.h #define gicr_read_propbaser(c) readq_relaxed(c) c 138 arch/arm64/include/asm/arch_gicv3.h #define gicr_write_propbaser(v, c) writeq_relaxed(v, c) c 140 arch/arm64/include/asm/arch_gicv3.h #define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) c 141 arch/arm64/include/asm/arch_gicv3.h #define gicr_read_pendbaser(c) readq_relaxed(c) c 143 arch/arm64/include/asm/arch_gicv3.h #define gits_write_vpropbaser(v, c) writeq_relaxed(v, c) c 145 arch/arm64/include/asm/arch_gicv3.h #define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) c 146 arch/arm64/include/asm/arch_gicv3.h #define gits_read_vpendbaser(c) readq_relaxed(c) c 25 arch/arm64/include/asm/dcc.h char c = read_sysreg(dbgdtrrx_el0); c 28 arch/arm64/include/asm/dcc.h return c; c 31 arch/arm64/include/asm/dcc.h static inline void __dcc_putchar(char c) c 37 arch/arm64/include/asm/dcc.h write_sysreg((unsigned char)c, dbgdtrtx_el0); c 119 arch/arm64/include/asm/io.h #define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) c 120 arch/arm64/include/asm/io.h #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) c 121 arch/arm64/include/asm/io.h #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) c 122 arch/arm64/include/asm/io.h #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) c 124 arch/arm64/include/asm/io.h #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) c 125 arch/arm64/include/asm/io.h #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) c 126 arch/arm64/include/asm/io.h #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) c 127 arch/arm64/include/asm/io.h #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) c 134 arch/arm64/include/asm/io.h #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; }) c 135 arch/arm64/include/asm/io.h #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; }) c 136 arch/arm64/include/asm/io.h #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; }) c 137 arch/arm64/include/asm/io.h #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; }) c 139 arch/arm64/include/asm/io.h #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) c 140 arch/arm64/include/asm/io.h #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) c 141 arch/arm64/include/asm/io.h #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) c 142 arch/arm64/include/asm/io.h #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) c 158 arch/arm64/include/asm/io.h #define memset_io(c,v,l) __memset_io((c),(v),(l)) c 159 arch/arm64/include/asm/io.h #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) c 160 arch/arm64/include/asm/io.h #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) c 10 arch/arm64/include/asm/string.h extern char *strrchr(const char *, int c); c 13 arch/arm64/include/asm/string.h extern char *strchr(const char *, int c); c 60 arch/arm64/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) c 21 arch/arm64/include/asm/word-at-a-time.h const struct word_at_a_time *c) c 23 arch/arm64/include/asm/word-at-a-time.h unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; c 28 arch/arm64/include/asm/word-at-a-time.h #define prep_zero_mask(a, bits, c) (bits) c 71 arch/arm64/kernel/io.c void __memset_io(volatile void __iomem *dst, int c, size_t count) c 73 arch/arm64/kernel/io.c u64 qc = (u8)c; c 80 arch/arm64/kernel/io.c __raw_writeb(c, dst); c 92 arch/arm64/kernel/io.c __raw_writeb(c, dst); c 346 arch/c6x/platforms/pll.c struct clk_lookup *c; c 350 arch/c6x/platforms/pll.c for (c = clocks; c->clk; c++) { c 351 arch/c6x/platforms/pll.c clk = c->clk; c 137 arch/csky/abiv2/fpu.c #define STW_FPU_REGS(a, b, c, d) \ c 140 arch/csky/abiv2/fpu.c "stw %2, (%4, "#c")\n" \ c 143 arch/csky/abiv2/fpu.c #define LDW_FPU_REGS(a, b, c, d) \ c 146 arch/csky/abiv2/fpu.c "ldw %2, (%4, "#c")\n" \ c 22 arch/csky/include/asm/io.h #define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; }) c 23 arch/csky/include/asm/io.h #define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; }) c 24 arch/csky/include/asm/io.h #define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; }) c 27 arch/csky/include/asm/io.h #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); }) c 28 arch/csky/include/asm/io.h #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); }) c 29 arch/csky/include/asm/io.h #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); }) c 31 arch/csky/include/asm/io.h #define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); }) c 32 arch/csky/include/asm/io.h #define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); }) c 33 arch/csky/include/asm/io.h #define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); }) c 44 arch/csky/include/asm/page.h extern void *memset(void *dest, int c, size_t l); c 44 arch/h8300/boot/compressed/misc.c void *memset(void *s, int c, size_t n) c 50 arch/h8300/boot/compressed/misc.c ss[i] = c; c 11 arch/h8300/include/asm/string.h extern void *memset(void *s, int c, size_t count); c 15 arch/hexagon/include/asm/string.h extern void *memset(void *__to, int c, size_t __n); c 25 arch/hexagon/lib/checksum.c #define VR_NEGATE(a, b, c, d) (SIGN(a, 48) + SIGN(b, 32) + SIGN(c, 16) \ c 27 arch/hexagon/lib/checksum.c #define VR_CARRY(a, b, c, d) (CARRY(a, 48) + CARRY(b, 32) + CARRY(c, 16) \ c 29 arch/hexagon/lib/checksum.c #define VR_SELECT(a, b, c, d) (SELECT(a, 48) + SELECT(b, 32) + SELECT(c, 16) \ c 61 arch/ia64/hp/common/aml_nfw.c static void aml_nfw_execute(struct ia64_nfw_context *c) c 66 arch/ia64/hp/common/aml_nfw.c virt_entry.ip = virt_map(c->ip); c 67 arch/ia64/hp/common/aml_nfw.c virt_entry.gp = virt_map(c->gp); c 71 arch/ia64/hp/common/aml_nfw.c IA64_FW_CALL(entry, c->ret, c 72 arch/ia64/hp/common/aml_nfw.c c->arg[0], c->arg[1], c->arg[2], c->arg[3], c 73 arch/ia64/hp/common/aml_nfw.c c->arg[4], c->arg[5], c->arg[6], c->arg[7]); c 276 arch/ia64/include/asm/io.h extern void memset_io(volatile void __iomem *s, int c, long n); c 35 arch/ia64/include/asm/kprobes.h unsigned long long c : 1; c 157 arch/ia64/kernel/kprobes.c &&(cmp_inst.f.c == 1)) c 161 arch/ia64/kernel/kprobes.c if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) c 1175 arch/ia64/kernel/mca.c int c, i , wait; c 1182 arch/ia64/kernel/mca.c for_each_online_cpu(c) { c 1183 arch/ia64/kernel/mca.c if (c == monarch) c 1185 arch/ia64/kernel/mca.c if (ia64_mc_info.imi_rendez_checkin[c] c 1201 arch/ia64/kernel/mca.c for_each_online_cpu(c) { c 1202 arch/ia64/kernel/mca.c if (c == monarch) c 1204 arch/ia64/kernel/mca.c if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) c 1205 arch/ia64/kernel/mca.c mprintk(" %d", c); c 1599 arch/ia64/kernel/mca.c int c; c 1617 arch/ia64/kernel/mca.c for_each_online_cpu(c) { c 1619 arch/ia64/kernel/mca.c t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); c 123 arch/ia64/kernel/perfmon.c #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0) c 124 arch/ia64/kernel/perfmon.c #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling) c 131 arch/ia64/kernel/perfmon.c #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL) c 165 arch/ia64/kernel/perfmon.c #define PROTECT_CTX(c, f) \ c 167 arch/ia64/kernel/perfmon.c DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \ c 168 arch/ia64/kernel/perfmon.c spin_lock_irqsave(&(c)->ctx_lock, f); \ c 169 arch/ia64/kernel/perfmon.c DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \ c 172 arch/ia64/kernel/perfmon.c #define UNPROTECT_CTX(c, f) \ c 174 arch/ia64/kernel/perfmon.c DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \ c 175 arch/ia64/kernel/perfmon.c spin_unlock_irqrestore(&(c)->ctx_lock, f); \ c 178 arch/ia64/kernel/perfmon.c #define PROTECT_CTX_NOPRINT(c, f) \ c 180 arch/ia64/kernel/perfmon.c spin_lock_irqsave(&(c)->ctx_lock, f); \ c 184 arch/ia64/kernel/perfmon.c #define UNPROTECT_CTX_NOPRINT(c, f) \ c 186 arch/ia64/kernel/perfmon.c spin_unlock_irqrestore(&(c)->ctx_lock, f); \ c 190 arch/ia64/kernel/perfmon.c #define PROTECT_CTX_NOIRQ(c) \ c 192 arch/ia64/kernel/perfmon.c spin_lock(&(c)->ctx_lock); \ c 195 arch/ia64/kernel/perfmon.c #define UNPROTECT_CTX_NOIRQ(c) \ c 197 arch/ia64/kernel/perfmon.c spin_unlock(&(c)->ctx_lock); \ c 205 arch/ia64/kernel/perfmon.c #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION() c 213 arch/ia64/kernel/perfmon.c #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0) c 658 arch/ia64/kernel/setup.c # define lpj c->loops_per_jiffy c 659 arch/ia64/kernel/setup.c # define cpunum c->cpu c 673 arch/ia64/kernel/setup.c struct cpuinfo_ia64 *c = v; c 678 arch/ia64/kernel/setup.c mask = c->features; c 701 arch/ia64/kernel/setup.c proc_freq = c->proc_freq / 1000; c 718 arch/ia64/kernel/setup.c cpunum, c->vendor, c->family, c->model, c 719 arch/ia64/kernel/setup.c c->model_name, c->revision, c->archrev, c 720 arch/ia64/kernel/setup.c features, c->ppn, c->number, c 722 arch/ia64/kernel/setup.c c->itc_freq / 1000000, c->itc_freq % 1000000, c 727 arch/ia64/kernel/setup.c if (c->socket_id != -1) c 728 arch/ia64/kernel/setup.c seq_printf(m, "physical id: %u\n", c->socket_id); c 729 arch/ia64/kernel/setup.c if (c->threads_per_core > 1 || c->cores_per_socket > 1) c 733 arch/ia64/kernel/setup.c c->core_id, c->thread_id); c 803 arch/ia64/kernel/setup.c identify_cpu (struct cpuinfo_ia64 *c) c 834 arch/ia64/kernel/setup.c memcpy(c->vendor, cpuid.field.vendor, 16); c 836 arch/ia64/kernel/setup.c c->cpu = smp_processor_id(); c 841 arch/ia64/kernel/setup.c c->threads_per_core = c->cores_per_socket = c->num_log = 1; c 842 arch/ia64/kernel/setup.c c->socket_id = -1; c 844 arch/ia64/kernel/setup.c identify_siblings(c); c 846 arch/ia64/kernel/setup.c if (c->threads_per_core > smp_num_siblings) c 847 arch/ia64/kernel/setup.c smp_num_siblings = c->threads_per_core; c 849 arch/ia64/kernel/setup.c c->ppn = cpuid.field.ppn; c 850 arch/ia64/kernel/setup.c c->number = cpuid.field.number; c 851 arch/ia64/kernel/setup.c c->revision = cpuid.field.revision; c 852 arch/ia64/kernel/setup.c c->model = cpuid.field.model; c 853 arch/ia64/kernel/setup.c c->family = cpuid.field.family; c 854 arch/ia64/kernel/setup.c c->archrev = cpuid.field.archrev; c 855 arch/ia64/kernel/setup.c c->features = cpuid.field.features; c 856 arch/ia64/kernel/setup.c c->model_name = get_model_name(c->family, c->model); c 863 arch/ia64/kernel/setup.c c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); c 864 arch/ia64/kernel/setup.c c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); c 789 arch/ia64/kernel/smpboot.c void identify_siblings(struct cpuinfo_ia64 *c) c 818 arch/ia64/kernel/smpboot.c c->socket_id = (pltid << 8) | info.overview_ppid; c 823 arch/ia64/kernel/smpboot.c c->cores_per_socket = info.overview_cpp; c 824 arch/ia64/kernel/smpboot.c c->threads_per_core = info.overview_tpc; c 825 arch/ia64/kernel/smpboot.c c->num_log = info.overview_num_log; c 827 arch/ia64/kernel/smpboot.c c->core_id = info.log1_cid; c 828 arch/ia64/kernel/smpboot.c c->thread_id = info.log1_tid; c 1160 arch/ia64/kernel/unwind.c #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) c 1184 arch/ia64/kernel/unwind.c #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) c 41 arch/ia64/lib/io.c void memset_io(volatile void __iomem *dst, int c, long count) c 43 arch/ia64/lib/io.c unsigned char ch = (char)(c & 0xff); c 674 arch/m68k/amiga/config.c static void amiga_serial_putc(char c) c 676 arch/m68k/amiga/config.c amiga_custom.serdat = (unsigned char)c | 0x100; c 35 arch/m68k/atari/debug.c static inline void ata_mfp_out(char c) c 39 arch/m68k/atari/debug.c st_mfp.usart_dta = c; c 52 arch/m68k/atari/debug.c static inline void ata_scc_out(char c) c 58 arch/m68k/atari/debug.c atari_scc.cha_b_data = c; c 71 arch/m68k/atari/debug.c static inline void ata_midi_out(char c) c 75 arch/m68k/atari/debug.c acia.mid_data = c; c 88 arch/m68k/atari/debug.c static int ata_par_out(char c) c 100 arch/m68k/atari/debug.c sound_ym.wd_data = c; /* put char onto port */ c 43 arch/m68k/atari/nvram.c static void __nvram_write_byte(unsigned char c, int i) c 45 arch/m68k/atari/nvram.c CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); c 139 arch/m68k/include/asm/atomic.h char c; c 140 arch/m68k/include/asm/atomic.h __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); c 141 arch/m68k/include/asm/atomic.h return c != 0; c 147 arch/m68k/include/asm/atomic.h char c; c 150 arch/m68k/include/asm/atomic.h : "=d" (c), "=m" (*v) c 152 arch/m68k/include/asm/atomic.h return c != 0; c 157 arch/m68k/include/asm/atomic.h char c; c 158 arch/m68k/include/asm/atomic.h __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); c 159 arch/m68k/include/asm/atomic.h return c != 0; c 199 arch/m68k/include/asm/atomic.h char c; c 201 arch/m68k/include/asm/atomic.h : "=d" (c), "+m" (*v) c 203 arch/m68k/include/asm/atomic.h return c != 0; c 209 arch/m68k/include/asm/atomic.h char c; c 211 arch/m68k/include/asm/atomic.h : "=d" (c), "+m" (*v) c 213 arch/m68k/include/asm/atomic.h return c != 0; c 85 arch/m68k/include/asm/bootstd.h #define _bsc3(type,name,atype,a,btype,b,ctype,c) \ c 86 arch/m68k/include/asm/bootstd.h type name(atype a, btype b, ctype c) \ c 91 arch/m68k/include/asm/bootstd.h register long __c __asm__ ("%d3") = (long)c; \ c 100 arch/m68k/include/asm/bootstd.h #define _bsc4(type,name,atype,a,btype,b,ctype,c,dtype,d) \ c 101 arch/m68k/include/asm/bootstd.h type name(atype a, btype b, ctype c, dtype d) \ c 106 arch/m68k/include/asm/bootstd.h register long __c __asm__ ("%d3") = (long)c; \ c 116 arch/m68k/include/asm/bootstd.h #define _bsc5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \ c 117 arch/m68k/include/asm/bootstd.h type name(atype a, btype b, ctype c, dtype d, etype e) \ c 122 arch/m68k/include/asm/bootstd.h register long __c __asm__ ("%d3") = (long)c; \ c 16 arch/m68k/include/asm/nubus.h #define nubus_memset_io(a,b,c) memset((void *)(a),(b),(c)) c 17 arch/m68k/include/asm/nubus.h #define nubus_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) c 18 arch/m68k/include/asm/nubus.h #define nubus_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) c 68 arch/m68k/include/asm/string.h #define memset(d, c, n) __builtin_memset(d, c, n) c 75 arch/m68k/include/asm/sun3mmu.h unsigned char sfc, c; c 79 arch/m68k/include/asm/sun3mmu.h GET_CONTROL_BYTE (AC_BUS_ERROR, c); c 82 arch/m68k/include/asm/sun3mmu.h return c; c 89 arch/m68k/include/asm/sun3mmu.h unsigned char c, sfc; c 93 arch/m68k/include/asm/sun3mmu.h GET_CONTROL_BYTE (AC_SEGMAP | (addr & SUN3_CONTROL_MASK), c); c 95 arch/m68k/include/asm/sun3mmu.h entry = c; c 143 arch/m68k/include/asm/sun3mmu.h unsigned char sfc, c; c 147 arch/m68k/include/asm/sun3mmu.h GET_CONTROL_BYTE(AC_CONTEXT, c); c 150 arch/m68k/include/asm/sun3mmu.h return c; c 154 arch/m68k/include/asm/sun3mmu.h static inline void sun3_put_context(unsigned char c) c 159 arch/m68k/include/asm/sun3mmu.h SET_CONTROL_BYTE(AC_CONTEXT, c); c 16 arch/m68k/include/asm/zorro.h #define z_memset_io(a,b,c) memset((void *)(a),(b),(c)) c 17 arch/m68k/include/asm/zorro.h #define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) c 18 arch/m68k/include/asm/zorro.h #define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) c 20 arch/m68k/kernel/early_printk.c static void __ref debug_cons_write(struct console *c, c 26 arch/m68k/kernel/early_printk.c mvme16x_cons_write(c, s, n); c 850 arch/m68k/kernel/traps.c u16 c, *cp; c 925 arch/m68k/kernel/traps.c if (get_user(c, cp + i) && i >= 0) { c 930 arch/m68k/kernel/traps.c pr_cont(" %04x", c); c 932 arch/m68k/kernel/traps.c pr_cont(" <%04x>", c); c 10 arch/m68k/lib/memset.c void *memset(void *s, int c, size_t count) c 17 arch/m68k/lib/memset.c c &= 0xff; c 18 arch/m68k/lib/memset.c c |= c << 8; c 19 arch/m68k/lib/memset.c c |= c << 16; c 22 arch/m68k/lib/memset.c *cs++ = c; c 28 arch/m68k/lib/memset.c *ss++ = c; c 37 arch/m68k/lib/memset.c *ls++ = c; c 59 arch/m68k/lib/memset.c : "d" (c), "0" (ls), "1" (temp)); c 65 arch/m68k/lib/memset.c *ss++ = c; c 70 arch/m68k/lib/memset.c *cs = c; c 35 arch/m68k/sun3/prom/console.c prom_nbputchar(char c) c 41 arch/m68k/sun3/prom/console.c i = (*(romvec->pv_nbputchar))(c); c 57 arch/m68k/sun3/prom/console.c prom_putchar(char c) c 59 arch/m68k/sun3/prom/console.c while(prom_nbputchar(c) == -1) ; c 68 arch/m68k/sun3/prom/misc.c int c; c 69 arch/m68k/sun3/prom/misc.c GET_CONTROL_BYTE(SUN3_IDPROM_BASE + i, c); c 70 arch/m68k/sun3/prom/misc.c idbuf[i] = c; c 88 arch/microblaze/include/asm/delay.h #define muldiv(a, b, c) (((a)*(b))/(c)) c 35 arch/microblaze/include/asm/hash.h unsigned int b, c; c 39 arch/microblaze/include/asm/hash.h c = (a << 19) + a; c 40 arch/microblaze/include/asm/hash.h a = (a << 9) + c; c 47 arch/microblaze/include/asm/hash.h a += c; /* (a << 8) + (b << 3) + c */ c 62 arch/microblaze/include/asm/hash.h unsigned int b, c, d; c 65 arch/microblaze/include/asm/hash.h c = b << 1; /* 1 5 */ c 67 arch/microblaze/include/asm/hash.h c += b; /* 1 7 */ c 68 arch/microblaze/include/asm/hash.h c <<= 3; /* 3 10 */ c 69 arch/microblaze/include/asm/hash.h c -= a; /* 1 11 */ c 70 arch/microblaze/include/asm/hash.h d = c << 7; /* 7 18 */ c 77 arch/microblaze/include/asm/hash.h return d + c; /* 1 37 total instructions*/ c 32 arch/microblaze/include/asm/mmu.h unsigned long c:1; /* Changed */ c 23 arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c #define CI(c, p) { ci->c = PVR_##p(pvr); } c 36 arch/microblaze/lib/memcpy.c void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) c 42 arch/microblaze/lib/memcpy.c while (c--) c 48 arch/microblaze/lib/memcpy.c void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) c 62 arch/microblaze/lib/memcpy.c if (likely(c >= 4)) { c 70 arch/microblaze/lib/memcpy.c --c; c 73 arch/microblaze/lib/memcpy.c --c; c 76 arch/microblaze/lib/memcpy.c --c; c 87 arch/microblaze/lib/memcpy.c for (; c >= 4; c -= 4) c 99 arch/microblaze/lib/memcpy.c for (; c >= 4; c -= 4) { c 108 arch/microblaze/lib/memcpy.c for (; c >= 4; c -= 4) { c 125 arch/microblaze/lib/memcpy.c for (; c >= 4; c -= 4) { c 134 arch/microblaze/lib/memcpy.c for (; c >= 4; c -= 4) { c 151 arch/microblaze/lib/memcpy.c for (; c >= 4; c -= 4) { c 160 arch/microblaze/lib/memcpy.c for (; c >= 4; c -= 4) { c 176 arch/microblaze/lib/memcpy.c switch (c) { c 35 arch/microblaze/lib/memmove.c void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) c 40 arch/microblaze/lib/memmove.c if (!c) c 45 arch/microblaze/lib/memmove.c return memcpy(v_dst, v_src, c); c 48 arch/microblaze/lib/memmove.c src += c; c 49 arch/microblaze/lib/memmove.c dst += c; c 52 arch/microblaze/lib/memmove.c while (c--) c 58 arch/microblaze/lib/memmove.c void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) c 65 arch/microblaze/lib/memmove.c if (!c) c 70 arch/microblaze/lib/memmove.c return memcpy(v_dst, v_src, c); c 80 arch/microblaze/lib/memmove.c dst += c; c 81 arch/microblaze/lib/memmove.c src += c; c 83 arch/microblaze/lib/memmove.c if (c >= 4) { c 92 arch/microblaze/lib/memmove.c --c; c 95 arch/microblaze/lib/memmove.c --c; c 98 arch/microblaze/lib/memmove.c --c; c 109 arch/microblaze/lib/memmove.c for (; c >= 4; c -= 4) c 121 arch/microblaze/lib/memmove.c for (; c >= 4; c -= 4) { c 130 arch/microblaze/lib/memmove.c for (; c >= 4; c -= 4) { c 148 arch/microblaze/lib/memmove.c for (; c >= 4; c -= 4) { c 157 arch/microblaze/lib/memmove.c for (; c >= 4; c -= 4) { c 175 arch/microblaze/lib/memmove.c for (; c >= 4; c -= 4) { c 184 arch/microblaze/lib/memmove.c for (; c >= 4; c -= 4) { c 201 arch/microblaze/lib/memmove.c switch (c) { c 35 arch/microblaze/lib/memset.c void *memset(void *v_src, int c, __kernel_size_t n) c 40 arch/microblaze/lib/memset.c c = (c & 0xFF); c 44 arch/microblaze/lib/memset.c *src++ = c; c 49 arch/microblaze/lib/memset.c void *memset(void *v_src, int c, __kernel_size_t n) c 56 arch/microblaze/lib/memset.c c = (c & 0xFF); c 58 arch/microblaze/lib/memset.c if (unlikely(c)) { c 60 arch/microblaze/lib/memset.c w32 = c; c 70 arch/microblaze/lib/memset.c *src++ = c; c 73 arch/microblaze/lib/memset.c *src++ = c; c 76 arch/microblaze/lib/memset.c *src++ = c; c 91 arch/microblaze/lib/memset.c *src++ = c; c 51 arch/mips/alchemy/board-gpr.c void prom_putchar(char c) c 53 arch/mips/alchemy/board-gpr.c alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); c 56 arch/mips/alchemy/board-gpr.c static void gpr_reset(char *c) c 50 arch/mips/alchemy/board-mtx1.c void prom_putchar(char c) c 52 arch/mips/alchemy/board-mtx1.c alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); c 55 arch/mips/alchemy/board-mtx1.c static void mtx1_reset(char *c) c 46 arch/mips/alchemy/board-xxs1500.c void prom_putchar(char c) c 48 arch/mips/alchemy/board-xxs1500.c alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); c 51 arch/mips/alchemy/board-xxs1500.c static void xxs1500_reset(char *c) c 238 arch/mips/alchemy/common/clock.c struct clk *c; c 255 arch/mips/alchemy/common/clock.c c = clk_register(NULL, &a->hw); c 256 arch/mips/alchemy/common/clock.c if (!IS_ERR(c)) c 257 arch/mips/alchemy/common/clock.c clk_register_clkdev(c, name, NULL); c 261 arch/mips/alchemy/common/clock.c return c; c 269 arch/mips/alchemy/common/clock.c struct clk *c; c 271 arch/mips/alchemy/common/clock.c c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK, c 273 arch/mips/alchemy/common/clock.c if (!IS_ERR(c)) c 274 arch/mips/alchemy/common/clock.c clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL); c 275 arch/mips/alchemy/common/clock.c return c; c 283 arch/mips/alchemy/common/clock.c struct clk *c; c 285 arch/mips/alchemy/common/clock.c c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK, c 287 arch/mips/alchemy/common/clock.c if (!IS_ERR(c)) c 288 arch/mips/alchemy/common/clock.c clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL); c 289 arch/mips/alchemy/common/clock.c return c; c 298 arch/mips/alchemy/common/clock.c struct clk *c; c 319 arch/mips/alchemy/common/clock.c c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn, c 321 arch/mips/alchemy/common/clock.c if (!IS_ERR(c)) c 322 arch/mips/alchemy/common/clock.c clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL); c 323 arch/mips/alchemy/common/clock.c return c; c 337 arch/mips/alchemy/common/clock.c struct clk *c; c 348 arch/mips/alchemy/common/clock.c c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, c 350 arch/mips/alchemy/common/clock.c if (!IS_ERR(c)) c 351 arch/mips/alchemy/common/clock.c clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL); c 352 arch/mips/alchemy/common/clock.c return c; c 486 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 489 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 490 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 491 arch/mips/alchemy/common/clock.c v |= (1 << 1) << c->shift; c 492 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 493 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 500 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 501 arch/mips/alchemy/common/clock.c unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1); c 508 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 511 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 512 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 513 arch/mips/alchemy/common/clock.c v &= ~((1 << 1) << c->shift); c 514 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 515 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 520 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 523 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 524 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 526 arch/mips/alchemy/common/clock.c v |= (1 << c->shift); c 528 arch/mips/alchemy/common/clock.c v &= ~(1 << c->shift); c 529 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 530 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 537 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 539 arch/mips/alchemy/common/clock.c return (alchemy_rdsys(c->reg) >> c->shift) & 1; c 545 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 547 arch/mips/alchemy/common/clock.c int sh = c->shift + 2; c 552 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 553 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 556 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 557 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 565 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 566 arch/mips/alchemy/common/clock.c unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2); c 590 arch/mips/alchemy/common/clock.c static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c) c 592 arch/mips/alchemy/common/clock.c unsigned long v = alchemy_rdsys(c->reg); c 594 arch/mips/alchemy/common/clock.c v &= ~(3 << c->shift); c 595 arch/mips/alchemy/common/clock.c v |= (c->parent & 3) << c->shift; c 596 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 597 arch/mips/alchemy/common/clock.c c->isen = 1; c 602 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 606 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 607 arch/mips/alchemy/common/clock.c __alchemy_clk_fgv2_en(c); c 608 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 615 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 617 arch/mips/alchemy/common/clock.c return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0; c 622 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 625 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 626 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 627 arch/mips/alchemy/common/clock.c v &= ~(3 << c->shift); /* set input mux to "disabled" state */ c 628 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 629 arch/mips/alchemy/common/clock.c c->isen = 0; c 630 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 635 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 638 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 639 arch/mips/alchemy/common/clock.c c->parent = index + 1; /* value to write to register */ c 640 arch/mips/alchemy/common/clock.c if (c->isen) c 641 arch/mips/alchemy/common/clock.c __alchemy_clk_fgv2_en(c); c 642 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 649 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 652 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 653 arch/mips/alchemy/common/clock.c v = c->parent - 1; c 654 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 666 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 667 arch/mips/alchemy/common/clock.c int sh = c->shift + 2; c 673 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */ c 677 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 678 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 681 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 682 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 690 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 691 arch/mips/alchemy/common/clock.c int sh = c->shift + 2; c 694 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 705 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 708 arch/mips/alchemy/common/clock.c if (alchemy_rdsys(c->reg) & (1 << 30)) { c 745 arch/mips/alchemy/common/clock.c struct clk *c; c 799 arch/mips/alchemy/common/clock.c c = clk_register(NULL, &a->hw); c 800 arch/mips/alchemy/common/clock.c if (IS_ERR(c)) c 803 arch/mips/alchemy/common/clock.c clk_register_clkdev(c, id.name, NULL); c 814 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 815 arch/mips/alchemy/common/clock.c unsigned long v = alchemy_rdsys(c->reg); c 817 arch/mips/alchemy/common/clock.c return (((v >> c->shift) >> 2) & 7) != 0; c 820 arch/mips/alchemy/common/clock.c static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c) c 822 arch/mips/alchemy/common/clock.c unsigned long v = alchemy_rdsys(c->reg); c 824 arch/mips/alchemy/common/clock.c v &= ~((7 << 2) << c->shift); c 825 arch/mips/alchemy/common/clock.c v |= ((c->parent & 7) << 2) << c->shift; c 826 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 827 arch/mips/alchemy/common/clock.c c->isen = 1; c 832 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 836 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 837 arch/mips/alchemy/common/clock.c __alchemy_clk_csrc_en(c); c 838 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 845 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 848 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 849 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 850 arch/mips/alchemy/common/clock.c v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */ c 851 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 852 arch/mips/alchemy/common/clock.c c->isen = 0; c 853 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 858 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 861 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 862 arch/mips/alchemy/common/clock.c c->parent = index + 1; /* value to write to register */ c 863 arch/mips/alchemy/common/clock.c if (c->isen) c 864 arch/mips/alchemy/common/clock.c __alchemy_clk_csrc_en(c); c 865 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 872 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 874 arch/mips/alchemy/common/clock.c return c->parent - 1; c 880 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 881 arch/mips/alchemy/common/clock.c unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3; c 883 arch/mips/alchemy/common/clock.c return parent_rate / c->dt[v]; c 889 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 899 arch/mips/alchemy/common/clock.c if ((d == 3) && (c->dt[2] != 3)) c 903 arch/mips/alchemy/common/clock.c if (c->dt[i] == d) c 909 arch/mips/alchemy/common/clock.c spin_lock_irqsave(c->reglock, flags); c 910 arch/mips/alchemy/common/clock.c v = alchemy_rdsys(c->reg); c 911 arch/mips/alchemy/common/clock.c v &= ~(3 << c->shift); c 912 arch/mips/alchemy/common/clock.c v |= (i & 3) << c->shift; c 913 arch/mips/alchemy/common/clock.c alchemy_wrsys(v, c->reg); c 914 arch/mips/alchemy/common/clock.c spin_unlock_irqrestore(c->reglock, flags); c 922 arch/mips/alchemy/common/clock.c struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); c 923 arch/mips/alchemy/common/clock.c int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ c 956 arch/mips/alchemy/common/clock.c struct clk *c; c 1017 arch/mips/alchemy/common/clock.c c = clk_register(NULL, &a->hw); c 1018 arch/mips/alchemy/common/clock.c if (IS_ERR(c)) c 1021 arch/mips/alchemy/common/clock.c clk_register_clkdev(c, id.name, NULL); c 1043 arch/mips/alchemy/common/clock.c struct clk *c; c 1046 arch/mips/alchemy/common/clock.c c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL, c 1048 arch/mips/alchemy/common/clock.c ERRCK(c) c 1051 arch/mips/alchemy/common/clock.c c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype); c 1052 arch/mips/alchemy/common/clock.c ERRCK(c) c 1056 arch/mips/alchemy/common/clock.c c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK, c 1058 arch/mips/alchemy/common/clock.c ERRCK(c) c 1061 arch/mips/alchemy/common/clock.c c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, c 1064 arch/mips/alchemy/common/clock.c ERRCK(c) c 1068 arch/mips/alchemy/common/clock.c c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK); c 1069 arch/mips/alchemy/common/clock.c ERRCK(c) c 1072 arch/mips/alchemy/common/clock.c c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK); c 1073 arch/mips/alchemy/common/clock.c ERRCK(c) c 1076 arch/mips/alchemy/common/clock.c c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype); c 1077 arch/mips/alchemy/common/clock.c ERRCK(c) c 1080 arch/mips/alchemy/common/clock.c c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); c 1081 arch/mips/alchemy/common/clock.c ERRCK(c) c 105 arch/mips/alchemy/common/platform.c int c = alchemy_get_uarts(ctype); c 118 arch/mips/alchemy/common/platform.c ports = kcalloc(s, (c + 1), GFP_KERNEL); c 123 arch/mips/alchemy/common/platform.c memcpy(ports, au1x00_uart_data[ctype], s * c); c 127 arch/mips/alchemy/common/platform.c for (s = 0; s < c; s++) c 79 arch/mips/alchemy/common/prom.c static inline unsigned char str2hexnum(unsigned char c) c 81 arch/mips/alchemy/common/prom.c if (c >= '0' && c <= '9') c 82 arch/mips/alchemy/common/prom.c return c - '0'; c 83 arch/mips/alchemy/common/prom.c if (c >= 'a' && c <= 'f') c 84 arch/mips/alchemy/common/prom.c return c - 'a' + 10; c 85 arch/mips/alchemy/common/prom.c if (c >= 'A' && c <= 'F') c 86 arch/mips/alchemy/common/prom.c return c - 'A' + 10; c 396 arch/mips/alchemy/common/usb.c struct clk *c; c 399 arch/mips/alchemy/common/usb.c c = clk_get(NULL, "usbh_clk"); c 400 arch/mips/alchemy/common/usb.c if (IS_ERR(c)) c 402 arch/mips/alchemy/common/usb.c if (clk_round_rate(c, 48000000) != 48000000) { c 403 arch/mips/alchemy/common/usb.c clk_put(c); c 406 arch/mips/alchemy/common/usb.c if (clk_set_rate(c, 48000000)) { c 407 arch/mips/alchemy/common/usb.c clk_put(c); c 410 arch/mips/alchemy/common/usb.c clk_put(c); c 429 arch/mips/alchemy/common/usb.c struct clk *c = clk_get(NULL, "usbh_clk"); c 431 arch/mips/alchemy/common/usb.c if (IS_ERR(c)) c 435 arch/mips/alchemy/common/usb.c if (clk_prepare_enable(c)) c 452 arch/mips/alchemy/common/usb.c clk_disable_unprepare(c); c 455 arch/mips/alchemy/common/usb.c clk_put(c); c 450 arch/mips/alchemy/devboards/db1000.c struct clk *c, *p; c 482 arch/mips/alchemy/devboards/db1000.c c = clk_get(NULL, "lcd_intclk"); c 483 arch/mips/alchemy/devboards/db1000.c if (!IS_ERR(c) && !IS_ERR(p)) { c 484 arch/mips/alchemy/devboards/db1000.c clk_set_parent(c, p); c 485 arch/mips/alchemy/devboards/db1000.c clk_set_rate(c, clk_get_rate(p)); c 487 arch/mips/alchemy/devboards/db1000.c if (!IS_ERR(c)) c 488 arch/mips/alchemy/devboards/db1000.c clk_put(c); c 813 arch/mips/alchemy/devboards/db1200.c struct clk *c; c 834 arch/mips/alchemy/devboards/db1200.c c = clk_get(NULL, "psc0_intclk"); c 835 arch/mips/alchemy/devboards/db1200.c if (!IS_ERR(c)) { c 836 arch/mips/alchemy/devboards/db1200.c pfc = clk_round_rate(c, 50000000); c 840 arch/mips/alchemy/devboards/db1200.c clk_set_rate(c, pfc); c 841 arch/mips/alchemy/devboards/db1200.c clk_prepare_enable(c); c 842 arch/mips/alchemy/devboards/db1200.c clk_put(c); c 795 arch/mips/alchemy/devboards/db1300.c struct clk *c; c 829 arch/mips/alchemy/devboards/db1300.c c = clk_get(NULL, "psc3_intclk"); c 830 arch/mips/alchemy/devboards/db1300.c if (!IS_ERR(c)) { c 831 arch/mips/alchemy/devboards/db1300.c clk_set_rate(c, 50000000); c 832 arch/mips/alchemy/devboards/db1300.c clk_prepare_enable(c); c 833 arch/mips/alchemy/devboards/db1300.c clk_put(c); c 584 arch/mips/alchemy/devboards/db1550.c struct clk *c; c 593 arch/mips/alchemy/devboards/db1550.c c = clk_get(NULL, "psc0_intclk"); c 594 arch/mips/alchemy/devboards/db1550.c if (!IS_ERR(c)) { c 595 arch/mips/alchemy/devboards/db1550.c clk_set_rate(c, 50000000); c 596 arch/mips/alchemy/devboards/db1550.c clk_prepare_enable(c); c 597 arch/mips/alchemy/devboards/db1550.c clk_put(c); c 599 arch/mips/alchemy/devboards/db1550.c c = clk_get(NULL, "psc2_intclk"); c 600 arch/mips/alchemy/devboards/db1550.c if (!IS_ERR(c)) { c 601 arch/mips/alchemy/devboards/db1550.c clk_set_rate(c, db1550_spi_platdata.mainclk_hz); c 602 arch/mips/alchemy/devboards/db1550.c clk_prepare_enable(c); c 603 arch/mips/alchemy/devboards/db1550.c clk_put(c); c 40 arch/mips/alchemy/devboards/platform.c void prom_putchar(char c) c 43 arch/mips/alchemy/devboards/platform.c alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c); c 45 arch/mips/alchemy/devboards/platform.c alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); c 63 arch/mips/alchemy/devboards/platform.c static void db1x_reset(char *c) c 251 arch/mips/ar7/prom.c void prom_putchar(char c) c 255 arch/mips/ar7/prom.c serial_out(UART_TX, c); c 60 arch/mips/bcm47xx/prom.c struct cpuinfo_mips *c = ¤t_cpu_data; c 98 arch/mips/bcm47xx/prom.c if (c->cputype == CPU_74K && (mem == (128 << 20))) c 149 arch/mips/bcm47xx/setup.c struct cpuinfo_mips *c = ¤t_cpu_data; c 151 arch/mips/bcm47xx/setup.c if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) { c 25 arch/mips/bcm63xx/early_printk.c void prom_putchar(char c) c 28 arch/mips/bcm63xx/early_printk.c bcm_uart0_writel(c, UART_FIFO_REG); c 12 arch/mips/boot/compressed/dbg.c void __weak putc(char c) c 18 arch/mips/boot/compressed/dbg.c char c; c 19 arch/mips/boot/compressed/dbg.c while ((c = *s++) != '\0') { c 20 arch/mips/boot/compressed/dbg.c putc(c); c 21 arch/mips/boot/compressed/dbg.c if (c == '\n') c 21 arch/mips/boot/compressed/string.c void *memset(void *s, int c, size_t n) c 27 arch/mips/boot/compressed/string.c ss[i] = c; c 56 arch/mips/boot/compressed/uart-16550.c void putc(char c) c 63 arch/mips/boot/compressed/uart-16550.c serial_out(UART_TX, c); c 4 arch/mips/boot/compressed/uart-alchemy.c void putc(char c) c 6 arch/mips/boot/compressed/uart-alchemy.c alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c); c 4 arch/mips/boot/compressed/uart-prom.c void putc(char c) c 6 arch/mips/boot/compressed/uart-prom.c prom_putchar(c); c 1111 arch/mips/cavium-octeon/octeon-platform.c enum cvmx_helper_board_usb_clock_types c; c 1113 arch/mips/cavium-octeon/octeon-platform.c c = __cvmx_helper_board_usb_get_clock_type(); c 1114 arch/mips/cavium-octeon/octeon-platform.c switch (c) { c 1113 arch/mips/cavium-octeon/setup.c void prom_putchar(char c) c 1123 arch/mips/cavium-octeon/setup.c cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull); c 15 arch/mips/dec/prom/console.c unsigned int c) c 20 arch/mips/dec/prom/console.c while (c > 0) { c 21 arch/mips/dec/prom/console.c if (chunk > c) c 22 arch/mips/dec/prom/console.c chunk = c; c 27 arch/mips/dec/prom/console.c c -= chunk; c 49 arch/mips/fw/arc/init.c ArcRead(0, &c, 1, &cnt); c 24 arch/mips/fw/arc/promlib.c void prom_putchar(char c) c 27 arch/mips/fw/arc/promlib.c CHAR it = c; c 37 arch/mips/fw/arc/promlib.c CHAR c; c 40 arch/mips/fw/arc/promlib.c ArcRead(0, &c, 1, &cnt); c 43 arch/mips/fw/arc/promlib.c return c; c 68 arch/mips/fw/sni/sniprom.c void prom_putchar(char c) c 70 arch/mips/fw/sni/sniprom.c _prom_putchar(c); c 139 arch/mips/include/asm/local.h long c, old; \ c 140 arch/mips/include/asm/local.h c = local_read(l); \ c 141 arch/mips/include/asm/local.h while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \ c 142 arch/mips/include/asm/local.h c = old; \ c 143 arch/mips/include/asm/local.h c != (u); \ c 747 arch/mips/include/asm/mach-au1x00/au1000.h static inline void alchemy_uart_putchar(u32 uart_phys, u8 c) c 762 arch/mips/include/asm/mach-au1x00/au1000.h __raw_writel(c, base + 0x04); /* tx */ c 22 arch/mips/include/asm/mach-jazz/floppy.h unsigned char c; c 24 arch/mips/include/asm/mach-jazz/floppy.h c = *(volatile unsigned char *) port; c 27 arch/mips/include/asm/mach-jazz/floppy.h return c; c 121 arch/mips/include/asm/netlogic/xlp-hal/uart.h nlm_uart_outbyte(uint64_t base, char c) c 131 arch/mips/include/asm/netlogic/xlp-hal/uart.h nlm_write_uart_reg(base, UART_TX_DATA, (int)c); c 16 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_EN2_PPX_IP4(c) CVMX_CIU_ADDR(0xA400, c, 0x0F, 8) c 17 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_EN2_PPX_IP4_W1C(c) CVMX_CIU_ADDR(0xCC00, c, 0x0F, 8) c 18 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_EN2_PPX_IP4_W1S(c) CVMX_CIU_ADDR(0xAC00, c, 0x0F, 8) c 21 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_INTX_EN0(c) CVMX_CIU_ADDR(0x0200, c, 0x3F, 16) c 22 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_INTX_EN0_W1C(c) CVMX_CIU_ADDR(0x2200, c, 0x3F, 16) c 23 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_INTX_EN0_W1S(c) CVMX_CIU_ADDR(0x6200, c, 0x3F, 16) c 24 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_INTX_EN1(c) CVMX_CIU_ADDR(0x0208, c, 0x3F, 16) c 25 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_INTX_EN1_W1C(c) CVMX_CIU_ADDR(0x2208, c, 0x3F, 16) c 26 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_INTX_EN1_W1S(c) CVMX_CIU_ADDR(0x6208, c, 0x3F, 16) c 27 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_INTX_SUM0(c) CVMX_CIU_ADDR(0x0000, c, 0x3F, 8) c 41 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_SUM2_PPX_IP4(c) CVMX_CIU_ADDR(0x8C00, c, 0x0F, 8) c 43 arch/mips/include/asm/octeon/cvmx-ciu-defs.h #define CVMX_CIU_TIMX(c) CVMX_CIU_ADDR(0x0480, c, 0x0F, 8) c 463 arch/mips/include/asm/octeon/cvmx.h type c; \ c 465 arch/mips/include/asm/octeon/cvmx.h c.u64 = cvmx_read_csr(address); \ c 466 arch/mips/include/asm/octeon/cvmx.h if ((c.s.field) op(value)) { \ c 206 arch/mips/include/asm/pci/bridge.h u8 c[0x1000 / 1]; c 211 arch/mips/include/asm/pci/bridge.h u8 c[0x100 / 1]; c 220 arch/mips/include/asm/pci/bridge.h u8 c[0x1000 / 1]; c 230 arch/mips/include/asm/pci/bridge.h u8 c[8 / 1]; c 246 arch/mips/include/asm/pci/bridge.h u8 c[0x100000 / 1]; c 260 arch/mips/include/asm/pci/bridge.h u8 c[0x400000 / 1]; /* read-only */ c 53 arch/mips/include/asm/txx9/generic.h extern void (*txx9_prom_putchar)(char c); c 90 arch/mips/include/asm/txx9/generic.h int txx9_7segled_putc(unsigned int pos, char c); c 25 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) c 28 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) c 31 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) c 34 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) c 37 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) c 40 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) c 43 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, int a, int b, int c) c 46 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) c 49 arch/mips/include/asm/uasm.h void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ c 50 arch/mips/include/asm/vga.h #define scr_memcpyw(d, s, c) memcpy(d, s, c) c 51 arch/mips/include/asm/vga.h #define scr_memmovew(d, s, c) memmove(d, s, c) c 18 arch/mips/include/asm/watch.h void mips_probe_watch_registers(struct cpuinfo_mips *c); c 12 arch/mips/kernel/cacheinfo.c leaf->coherency_line_size = c->cache.linesz; \ c 13 arch/mips/kernel/cacheinfo.c leaf->number_of_sets = c->cache.sets; \ c 14 arch/mips/kernel/cacheinfo.c leaf->ways_of_associativity = c->cache.ways; \ c 15 arch/mips/kernel/cacheinfo.c leaf->size = c->cache.linesz * c->cache.sets * \ c 16 arch/mips/kernel/cacheinfo.c c->cache.ways; \ c 22 arch/mips/kernel/cacheinfo.c struct cpuinfo_mips *c = ¤t_cpu_data; c 30 arch/mips/kernel/cacheinfo.c if (c->dcache.waysize) c 36 arch/mips/kernel/cacheinfo.c leaves += (c->icache.waysize) ? 2 : 1; c 38 arch/mips/kernel/cacheinfo.c if (c->scache.waysize) { c 43 arch/mips/kernel/cacheinfo.c if (c->tcache.waysize) { c 74 arch/mips/kernel/cacheinfo.c struct cpuinfo_mips *c = ¤t_cpu_data; c 78 arch/mips/kernel/cacheinfo.c if (c->icache.waysize) { c 88 arch/mips/kernel/cacheinfo.c if (c->scache.waysize) { c 94 arch/mips/kernel/cacheinfo.c if (c->tcache.waysize) c 62 arch/mips/kernel/cpu-probe.c static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) c 66 arch/mips/kernel/cpu-probe.c fcsr = c->fpu_csr31; c 84 arch/mips/kernel/cpu-probe.c c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask; c 91 arch/mips/kernel/cpu-probe.c static void cpu_set_fpu_2008(struct cpuinfo_mips *c) c 93 arch/mips/kernel/cpu-probe.c if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | c 116 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_LEGACY; c 118 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_2008; c 121 arch/mips/kernel/cpu-probe.c c->fpu_msk31 &= ~FPU_CSR_ABS2008; c 123 arch/mips/kernel/cpu-probe.c c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008; c 126 arch/mips/kernel/cpu-probe.c c->fpu_msk31 &= ~FPU_CSR_NAN2008; c 128 arch/mips/kernel/cpu-probe.c c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008; c 130 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_LEGACY; c 135 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_LEGACY; c 151 arch/mips/kernel/cpu-probe.c static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) c 153 arch/mips/kernel/cpu-probe.c c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY); c 154 arch/mips/kernel/cpu-probe.c c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); c 155 arch/mips/kernel/cpu-probe.c c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); c 159 arch/mips/kernel/cpu-probe.c if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | c 162 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; c 164 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_LEGACY; c 165 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; c 169 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_LEGACY; c 170 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; c 173 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_2008; c 174 arch/mips/kernel/cpu-probe.c c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; c 175 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; c 178 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; c 187 arch/mips/kernel/cpu-probe.c static void cpu_set_nan_2008(struct cpuinfo_mips *c) c 245 arch/mips/kernel/cpu-probe.c static void cpu_set_nofpu_id(struct cpuinfo_mips *c) c 250 arch/mips/kernel/cpu-probe.c if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | c 254 arch/mips/kernel/cpu-probe.c if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | c 257 arch/mips/kernel/cpu-probe.c if (c->options & MIPS_CPU_NAN_2008) c 259 arch/mips/kernel/cpu-probe.c c->fpu_id = value; c 268 arch/mips/kernel/cpu-probe.c static void cpu_set_fpu_opts(struct cpuinfo_mips *c) c 270 arch/mips/kernel/cpu-probe.c c->fpu_id = cpu_get_fpu_id(); c 271 arch/mips/kernel/cpu-probe.c mips_nofpu_msk31 = c->fpu_msk31; c 273 arch/mips/kernel/cpu-probe.c if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | c 276 arch/mips/kernel/cpu-probe.c if (c->fpu_id & MIPS_FPIR_3D) c 277 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_MIPS3D; c 278 arch/mips/kernel/cpu-probe.c if (c->fpu_id & MIPS_FPIR_UFRP) c 279 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_UFR; c 280 arch/mips/kernel/cpu-probe.c if (c->fpu_id & MIPS_FPIR_FREP) c 281 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_FRE; c 284 arch/mips/kernel/cpu-probe.c cpu_set_fpu_fcsr_mask(c); c 285 arch/mips/kernel/cpu-probe.c cpu_set_fpu_2008(c); c 286 arch/mips/kernel/cpu-probe.c cpu_set_nan_2008(c); c 292 arch/mips/kernel/cpu-probe.c static void cpu_set_nofpu_opts(struct cpuinfo_mips *c) c 294 arch/mips/kernel/cpu-probe.c c->options &= ~MIPS_CPU_FPU; c 295 arch/mips/kernel/cpu-probe.c c->fpu_msk31 = mips_nofpu_msk31; c 297 arch/mips/kernel/cpu-probe.c cpu_set_nofpu_2008(c); c 298 arch/mips/kernel/cpu-probe.c cpu_set_nan_2008(c); c 299 arch/mips/kernel/cpu-probe.c cpu_set_nofpu_id(c); c 328 arch/mips/kernel/cpu-probe.c static void cpu_set_fpu_opts(struct cpuinfo_mips *c) c 333 arch/mips/kernel/cpu-probe.c static void cpu_set_nofpu_opts(struct cpuinfo_mips *c) c 387 arch/mips/kernel/cpu-probe.c static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags); c 444 arch/mips/kernel/cpu-probe.c static inline void cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c) c 447 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS; c 452 arch/mips/kernel/cpu-probe.c struct cpuinfo_mips *c = ¤t_cpu_data; c 461 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) c 502 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) c 507 arch/mips/kernel/cpu-probe.c c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); c 511 arch/mips/kernel/cpu-probe.c static void set_isa(struct cpuinfo_mips *c, unsigned int isa) c 515 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2; c 518 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1; c 521 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_V; c 524 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_IV; c 527 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; c 532 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; c 535 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_M32R6; c 539 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_M32R2; c 542 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_M32R1; c 545 arch/mips/kernel/cpu-probe.c c->isa_level |= MIPS_CPU_ISA_II; c 553 arch/mips/kernel/cpu-probe.c static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) c 556 arch/mips/kernel/cpu-probe.c unsigned int probability = c->tlbsize / c->tlbsizevtlb; c 579 arch/mips/kernel/cpu-probe.c static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags) c 584 arch/mips/kernel/cpu-probe.c switch (c->cputype) { c 598 arch/mips/kernel/cpu-probe.c config |= calculate_ftlb_probability(c) c 631 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_config0(struct cpuinfo_mips *c) c 643 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_TLB; c 645 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; c 652 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M32R1); c 655 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M32R2); c 658 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M32R6); c 667 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R1); c 670 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R2); c 673 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R6); c 689 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_config1(struct cpuinfo_mips *c) c 696 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_MDMX; c 698 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_PERF; c 700 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_WATCH; c 702 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_MIPS16; c 704 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_EJTAG; c 706 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_FPU; c 707 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_32FPR; c 710 arch/mips/kernel/cpu-probe.c c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; c 711 arch/mips/kernel/cpu-probe.c c->tlbsizevtlb = c->tlbsize; c 712 arch/mips/kernel/cpu-probe.c c->tlbsizeftlbsets = 0; c 718 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_config2(struct cpuinfo_mips *c) c 725 arch/mips/kernel/cpu-probe.c c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; c 730 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_config3(struct cpuinfo_mips *c) c 737 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_SMARTMIPS; c 738 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_RIXI | MIPS_CPU_CTXTC; c 741 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_RIXI; c 743 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_CTXTC; c 745 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_DSP; c 747 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_DSP2P; c 749 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_DSP3; c 752 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_VINT; c 754 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_VEIC; c 756 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_LPA; c 758 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_MIPSMT; c 760 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_ULRI; c 762 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_MICROMIPS; c 764 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_VZ; c 766 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_SEGMENTS; c 768 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_BADINSTR; c 770 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_BADINSTRP; c 772 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_MSA; c 774 arch/mips/kernel/cpu-probe.c c->htw_seq = 0; c 775 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_HTW; c 778 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_CDMM; c 780 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_SP; c 785 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_config4(struct cpuinfo_mips *c) c 797 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_TLBINV; c 813 arch/mips/kernel/cpu-probe.c c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; c 814 arch/mips/kernel/cpu-probe.c c->tlbsizevtlb = c->tlbsize; c 817 arch/mips/kernel/cpu-probe.c c->tlbsizevtlb += c 820 arch/mips/kernel/cpu-probe.c c->tlbsize = c->tlbsizevtlb; c 836 arch/mips/kernel/cpu-probe.c set_ftlb_enable(c, 0); c 840 arch/mips/kernel/cpu-probe.c c->tlbsizeftlbsets = 1 << c 843 arch/mips/kernel/cpu-probe.c c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >> c 845 arch/mips/kernel/cpu-probe.c c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets; c 851 arch/mips/kernel/cpu-probe.c c->kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST) c 857 arch/mips/kernel/cpu-probe.c set_cpu_asid_mask(c, asid_mask); c 864 arch/mips/kernel/cpu-probe.c WARN_ON(asid_mask != cpu_asid_mask(c)); c 869 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_config5(struct cpuinfo_mips *c) c 887 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_EVA; c 889 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_MAAR; c 891 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_RW_LLB; c 893 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_MVH; c 895 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_VP; c 897 arch/mips/kernel/cpu-probe.c c->ases |= MIPS_ASE_MIPS16E2; c 909 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_MMID; c 942 arch/mips/kernel/cpu-probe.c set_cpu_asid_mask(c, asid_mask); c 949 arch/mips/kernel/cpu-probe.c static void decode_configs(struct cpuinfo_mips *c) c 954 arch/mips/kernel/cpu-probe.c c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | c 957 arch/mips/kernel/cpu-probe.c c->scache.flags = MIPS_CACHE_NOT_PRESENT; c 960 arch/mips/kernel/cpu-probe.c set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN); c 962 arch/mips/kernel/cpu-probe.c ok = decode_config0(c); /* Read Config registers. */ c 965 arch/mips/kernel/cpu-probe.c ok = decode_config1(c); c 967 arch/mips/kernel/cpu-probe.c ok = decode_config2(c); c 969 arch/mips/kernel/cpu-probe.c ok = decode_config3(c); c 971 arch/mips/kernel/cpu-probe.c ok = decode_config4(c); c 973 arch/mips/kernel/cpu-probe.c ok = decode_config5(c); c 985 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_EBASE_WG; c 1004 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_EBASE_WG; c 1011 arch/mips/kernel/cpu-probe.c set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB); c 1013 arch/mips/kernel/cpu-probe.c mips_probe_watch_registers(c); c 1022 arch/mips/kernel/cpu-probe.c cpu_set_core(c, core); c 1055 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_guest_config0(struct cpuinfo_mips *c) c 1062 arch/mips/kernel/cpu-probe.c c->guest.conf |= BIT(1); c 1066 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_guest_config1(struct cpuinfo_mips *c) c 1075 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_FPU; c 1077 arch/mips/kernel/cpu-probe.c c->guest.options_dyn |= MIPS_CPU_FPU; c 1080 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_WATCH; c 1082 arch/mips/kernel/cpu-probe.c c->guest.options_dyn |= MIPS_CPU_WATCH; c 1085 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_PERF; c 1087 arch/mips/kernel/cpu-probe.c c->guest.options_dyn |= MIPS_CPU_PERF; c 1090 arch/mips/kernel/cpu-probe.c c->guest.conf |= BIT(2); c 1094 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_guest_config2(struct cpuinfo_mips *c) c 1101 arch/mips/kernel/cpu-probe.c c->guest.conf |= BIT(3); c 1105 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c) c 1114 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_CTXTC; c 1116 arch/mips/kernel/cpu-probe.c c->guest.options_dyn |= MIPS_CPU_CTXTC; c 1119 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_HTW; c 1122 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_ULRI; c 1125 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_SEGMENTS; c 1128 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_BADINSTR; c 1130 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_BADINSTRP; c 1133 arch/mips/kernel/cpu-probe.c c->guest.ases |= MIPS_ASE_MSA; c 1135 arch/mips/kernel/cpu-probe.c c->guest.ases_dyn |= MIPS_ASE_MSA; c 1138 arch/mips/kernel/cpu-probe.c c->guest.conf |= BIT(4); c 1142 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_guest_config4(struct cpuinfo_mips *c) c 1149 arch/mips/kernel/cpu-probe.c c->guest.kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST) c 1153 arch/mips/kernel/cpu-probe.c c->guest.conf |= BIT(5); c 1157 arch/mips/kernel/cpu-probe.c static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c) c 1165 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_MAAR; c 1167 arch/mips/kernel/cpu-probe.c c->guest.options_dyn |= MIPS_CPU_MAAR; c 1170 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_RW_LLB; c 1173 arch/mips/kernel/cpu-probe.c c->guest.options |= MIPS_CPU_MVH; c 1176 arch/mips/kernel/cpu-probe.c c->guest.conf |= BIT(6); c 1180 arch/mips/kernel/cpu-probe.c static inline void decode_guest_configs(struct cpuinfo_mips *c) c 1184 arch/mips/kernel/cpu-probe.c ok = decode_guest_config0(c); c 1186 arch/mips/kernel/cpu-probe.c ok = decode_guest_config1(c); c 1188 arch/mips/kernel/cpu-probe.c ok = decode_guest_config2(c); c 1190 arch/mips/kernel/cpu-probe.c ok = decode_guest_config3(c); c 1192 arch/mips/kernel/cpu-probe.c ok = decode_guest_config4(c); c 1194 arch/mips/kernel/cpu-probe.c decode_guest_config5(c); c 1197 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_guestctl0(struct cpuinfo_mips *c) c 1204 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_GUESTCTL0EXT; c 1206 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_GUESTCTL1; c 1208 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_GUESTCTL2; c 1210 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_GUESTID; c 1226 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_DRG; c 1231 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_guestctl1(struct cpuinfo_mips *c) c 1237 arch/mips/kernel/cpu-probe.c c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID) c 1243 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_gtoffset(struct cpuinfo_mips *c) c 1248 arch/mips/kernel/cpu-probe.c c->gtoffset_mask = read_c0_gtoffset(); c 1252 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_vz(struct cpuinfo_mips *c) c 1254 arch/mips/kernel/cpu-probe.c cpu_probe_guestctl0(c); c 1256 arch/mips/kernel/cpu-probe.c cpu_probe_guestctl1(c); c 1258 arch/mips/kernel/cpu-probe.c cpu_probe_gtoffset(c); c 1260 arch/mips/kernel/cpu-probe.c decode_guest_configs(c); c 1266 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c 1268 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1270 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R2000; c 1272 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c 1273 arch/mips/kernel/cpu-probe.c c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | c 1276 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_FPU; c 1277 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1280 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) { c 1282 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R3081E; c 1285 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R3000A; c 1289 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R3000; c 1292 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c 1293 arch/mips/kernel/cpu-probe.c c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | c 1296 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_FPU; c 1297 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1301 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_REV_MASK) >= c 1303 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R4400PC; c 1306 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R4000PC; c 1330 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_REV_MASK) >= c 1332 arch/mips/kernel/cpu-probe.c c->cputype = mc ? CPU_R4400MC : CPU_R4400SC; c 1335 arch/mips/kernel/cpu-probe.c c->cputype = mc ? CPU_R4000MC : CPU_R4000SC; c 1340 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1341 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1342 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | c 1345 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1348 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1349 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1350 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS; c 1351 arch/mips/kernel/cpu-probe.c c->tlbsize = 32; c 1352 arch/mips/kernel/cpu-probe.c switch (c->processor_id & 0xf0) { c 1354 arch/mips/kernel/cpu-probe.c c->cputype = CPU_VR4111; c 1358 arch/mips/kernel/cpu-probe.c c->cputype = CPU_VR4121; c 1362 arch/mips/kernel/cpu-probe.c if ((c->processor_id & 0xf) < 0x3) { c 1363 arch/mips/kernel/cpu-probe.c c->cputype = CPU_VR4122; c 1366 arch/mips/kernel/cpu-probe.c c->cputype = CPU_VR4181A; c 1371 arch/mips/kernel/cpu-probe.c if ((c->processor_id & 0xf) < 0x4) { c 1372 arch/mips/kernel/cpu-probe.c c->cputype = CPU_VR4131; c 1375 arch/mips/kernel/cpu-probe.c c->cputype = CPU_VR4133; c 1376 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_LLSC; c 1382 arch/mips/kernel/cpu-probe.c c->cputype = CPU_VR41XX; c 1388 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R4600; c 1390 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1391 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1392 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | c 1394 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1404 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R4650; c 1406 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1407 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1408 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; c 1409 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1413 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c 1414 arch/mips/kernel/cpu-probe.c c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; c 1416 arch/mips/kernel/cpu-probe.c if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { c 1417 arch/mips/kernel/cpu-probe.c c->cputype = CPU_TX3927; c 1419 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1421 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_REV_MASK) { c 1423 arch/mips/kernel/cpu-probe.c c->cputype = CPU_TX3912; c 1425 arch/mips/kernel/cpu-probe.c c->tlbsize = 32; c 1428 arch/mips/kernel/cpu-probe.c c->cputype = CPU_TX3922; c 1430 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1436 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R4700; c 1438 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1439 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1440 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | c 1442 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1445 arch/mips/kernel/cpu-probe.c c->cputype = CPU_TX49XX; c 1447 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1448 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1449 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_LLSC; c 1450 arch/mips/kernel/cpu-probe.c if (!(c->processor_id & 0x08)) c 1451 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR; c 1452 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1455 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R5000; c 1457 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_IV); c 1458 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | c 1460 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1463 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R5500; c 1465 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_IV); c 1466 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | c 1468 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1471 arch/mips/kernel/cpu-probe.c c->cputype = CPU_NEVADA; c 1473 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_IV); c 1474 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | c 1476 arch/mips/kernel/cpu-probe.c c->tlbsize = 48; c 1479 arch/mips/kernel/cpu-probe.c c->cputype = CPU_RM7000; c 1481 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_IV); c 1482 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | c 1492 arch/mips/kernel/cpu-probe.c c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; c 1495 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R10000; c 1497 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_IV); c 1498 arch/mips/kernel/cpu-probe.c c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | c 1502 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1505 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R12000; c 1507 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_IV); c 1508 arch/mips/kernel/cpu-probe.c c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | c 1512 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1515 arch/mips/kernel/cpu-probe.c if (((c->processor_id >> 4) & 0x0f) > 2) { c 1516 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R16000; c 1519 arch/mips/kernel/cpu-probe.c c->cputype = CPU_R14000; c 1522 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_IV); c 1523 arch/mips/kernel/cpu-probe.c c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | c 1527 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1530 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_REV_MASK) { c 1532 arch/mips/kernel/cpu-probe.c c->cputype = CPU_LOONGSON2; c 1535 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1536 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1539 arch/mips/kernel/cpu-probe.c c->cputype = CPU_LOONGSON2; c 1542 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_III); c 1543 arch/mips/kernel/cpu-probe.c c->fpu_msk31 |= FPU_CSR_CONDX; c 1546 arch/mips/kernel/cpu-probe.c c->cputype = CPU_LOONGSON3; c 1549 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R1); c 1550 arch/mips/kernel/cpu-probe.c c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | c 1555 arch/mips/kernel/cpu-probe.c c->cputype = CPU_LOONGSON3; c 1558 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R1); c 1559 arch/mips/kernel/cpu-probe.c c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | c 1564 arch/mips/kernel/cpu-probe.c c->options = R4K_OPTS | c 1567 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1568 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED_ACCELERATED; c 1571 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1573 arch/mips/kernel/cpu-probe.c c->cputype = CPU_LOONGSON1; c 1575 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_REV_MASK) { c 1585 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c 1587 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED_ACCELERATED; c 1588 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1590 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1591 arch/mips/kernel/cpu-probe.c c->cputype = CPU_QEMU_GENERIC; c 1595 arch/mips/kernel/cpu-probe.c c->cputype = CPU_4KC; c 1596 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1601 arch/mips/kernel/cpu-probe.c c->cputype = CPU_4KEC; c 1602 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1607 arch/mips/kernel/cpu-probe.c c->cputype = CPU_4KSC; c 1608 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1612 arch/mips/kernel/cpu-probe.c c->cputype = CPU_5KC; c 1613 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1617 arch/mips/kernel/cpu-probe.c c->cputype = CPU_5KE; c 1618 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1622 arch/mips/kernel/cpu-probe.c c->cputype = CPU_20KC; c 1623 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1627 arch/mips/kernel/cpu-probe.c c->cputype = CPU_24K; c 1628 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1632 arch/mips/kernel/cpu-probe.c c->cputype = CPU_24K; c 1633 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1637 arch/mips/kernel/cpu-probe.c c->cputype = CPU_25KF; c 1638 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1642 arch/mips/kernel/cpu-probe.c c->cputype = CPU_34K; c 1643 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1645 arch/mips/kernel/cpu-probe.c cpu_set_mt_per_tc_perf(c); c 1648 arch/mips/kernel/cpu-probe.c c->cputype = CPU_74K; c 1649 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1653 arch/mips/kernel/cpu-probe.c c->cputype = CPU_M14KC; c 1654 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1658 arch/mips/kernel/cpu-probe.c c->cputype = CPU_M14KEC; c 1659 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1663 arch/mips/kernel/cpu-probe.c c->cputype = CPU_1004K; c 1664 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1666 arch/mips/kernel/cpu-probe.c cpu_set_mt_per_tc_perf(c); c 1669 arch/mips/kernel/cpu-probe.c c->cputype = CPU_1074K; c 1670 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 1674 arch/mips/kernel/cpu-probe.c c->cputype = CPU_INTERAPTIV; c 1676 arch/mips/kernel/cpu-probe.c cpu_set_mt_per_tc_perf(c); c 1679 arch/mips/kernel/cpu-probe.c c->cputype = CPU_INTERAPTIV; c 1681 arch/mips/kernel/cpu-probe.c cpu_set_mt_per_tc_perf(c); c 1684 arch/mips/kernel/cpu-probe.c c->cputype = CPU_PROAPTIV; c 1688 arch/mips/kernel/cpu-probe.c c->cputype = CPU_PROAPTIV; c 1692 arch/mips/kernel/cpu-probe.c c->cputype = CPU_P5600; c 1696 arch/mips/kernel/cpu-probe.c c->cputype = CPU_P6600; c 1700 arch/mips/kernel/cpu-probe.c c->cputype = CPU_I6400; c 1704 arch/mips/kernel/cpu-probe.c c->cputype = CPU_I6500; c 1708 arch/mips/kernel/cpu-probe.c c->cputype = CPU_M5150; c 1712 arch/mips/kernel/cpu-probe.c c->cputype = CPU_M6250; c 1717 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1721 arch/mips/kernel/cpu-probe.c switch (__get_cpu_type(c->cputype)) { c 1723 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES; c 1726 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_SHARED_FTLB_RAM; c 1733 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) c 1735 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1736 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1739 arch/mips/kernel/cpu-probe.c c->cputype = CPU_ALCHEMY; c 1740 arch/mips/kernel/cpu-probe.c switch ((c->processor_id >> 24) & 0xff) { c 1755 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_REV_MASK) == 2) c 1769 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu) c 1771 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1773 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED_ACCELERATED; c 1774 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1776 arch/mips/kernel/cpu-probe.c c->cputype = CPU_SB1; c 1779 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_REV_MASK) < 0x02) c 1780 arch/mips/kernel/cpu-probe.c c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); c 1783 arch/mips/kernel/cpu-probe.c c->cputype = CPU_SB1A; c 1789 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu) c 1791 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1792 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1794 arch/mips/kernel/cpu-probe.c c->cputype = CPU_SR71000; c 1796 arch/mips/kernel/cpu-probe.c c->scache.ways = 8; c 1797 arch/mips/kernel/cpu-probe.c c->tlbsize = 64; c 1802 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu) c 1804 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1805 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1807 arch/mips/kernel/cpu-probe.c c->cputype = CPU_PR4450; c 1809 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M32R1); c 1814 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) c 1816 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1817 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1820 arch/mips/kernel/cpu-probe.c c->cputype = CPU_BMIPS32; c 1827 arch/mips/kernel/cpu-probe.c c->cputype = CPU_BMIPS3300; c 1832 arch/mips/kernel/cpu-probe.c int rev = c->processor_id & PRID_REV_MASK; c 1836 arch/mips/kernel/cpu-probe.c c->cputype = CPU_BMIPS4380; c 1839 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_RIXI; c 1841 arch/mips/kernel/cpu-probe.c c->cputype = CPU_BMIPS4350; c 1849 arch/mips/kernel/cpu-probe.c c->cputype = CPU_BMIPS5000; c 1850 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200) c 1855 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI; c 1860 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) c 1862 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1863 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1867 arch/mips/kernel/cpu-probe.c c->cputype = CPU_CAVIUM_OCTEON; c 1874 arch/mips/kernel/cpu-probe.c c->cputype = CPU_CAVIUM_OCTEON_PLUS; c 1884 arch/mips/kernel/cpu-probe.c c->cputype = CPU_CAVIUM_OCTEON2; c 1892 arch/mips/kernel/cpu-probe.c c->cputype = CPU_CAVIUM_OCTEON3; c 1898 arch/mips/kernel/cpu-probe.c c->cputype = CPU_UNKNOWN; c 1903 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) c 1905 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1907 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_REV_MASK) { c 1910 arch/mips/kernel/cpu-probe.c c->cputype = CPU_LOONGSON3; c 1913 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R2); c 1917 arch/mips/kernel/cpu-probe.c c->cputype = CPU_LOONGSON3; c 1920 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R2); c 1924 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1925 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; c 1926 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED_ACCELERATED; c 1927 arch/mips/kernel/cpu-probe.c c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | c 1936 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) c 1938 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1944 arch/mips/kernel/cpu-probe.c decode_config3(c); c 1947 arch/mips/kernel/cpu-probe.c c->options &= ~MIPS_CPU_COUNTER; c 1950 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 1952 arch/mips/kernel/cpu-probe.c c->cputype = CPU_XBURST; c 1953 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED_ACCELERATED; c 1973 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_COMP_MASK) == PRID_COMP_INGENIC_D0) c 1974 arch/mips/kernel/cpu-probe.c c->isa_level &= ~MIPS_CPU_ISA_M32R2; c 1977 arch/mips/kernel/cpu-probe.c static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) c 1979 arch/mips/kernel/cpu-probe.c decode_configs(c); c 1981 arch/mips/kernel/cpu-probe.c if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) { c 1982 arch/mips/kernel/cpu-probe.c c->cputype = CPU_ALCHEMY; c 1988 arch/mips/kernel/cpu-probe.c c->options = (MIPS_CPU_TLB | c 1996 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_IMP_MASK) { c 2000 arch/mips/kernel/cpu-probe.c c->cputype = CPU_XLP; c 2006 arch/mips/kernel/cpu-probe.c c->cputype = CPU_XLP; c 2018 arch/mips/kernel/cpu-probe.c c->cputype = CPU_XLR; c 2035 arch/mips/kernel/cpu-probe.c c->cputype = CPU_XLR; c 2041 arch/mips/kernel/cpu-probe.c c->processor_id); c 2042 arch/mips/kernel/cpu-probe.c c->cputype = CPU_XLR; c 2046 arch/mips/kernel/cpu-probe.c if (c->cputype == CPU_XLP) { c 2047 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R2); c 2048 arch/mips/kernel/cpu-probe.c c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK); c 2050 arch/mips/kernel/cpu-probe.c c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; c 2052 arch/mips/kernel/cpu-probe.c set_isa(c, MIPS_CPU_ISA_M64R1); c 2053 arch/mips/kernel/cpu-probe.c c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; c 2055 arch/mips/kernel/cpu-probe.c c->kscratch_mask = 0xf; c 2069 arch/mips/kernel/cpu-probe.c struct cpuinfo_mips *c = ¤t_cpu_data; c 2078 arch/mips/kernel/cpu-probe.c c->processor_id = PRID_IMP_UNKNOWN; c 2079 arch/mips/kernel/cpu-probe.c c->fpu_id = FPIR_IMP_NONE; c 2080 arch/mips/kernel/cpu-probe.c c->cputype = CPU_UNKNOWN; c 2081 arch/mips/kernel/cpu-probe.c c->writecombine = _CACHE_UNCACHED; c 2083 arch/mips/kernel/cpu-probe.c c->fpu_csr31 = FPU_CSR_RN; c 2084 arch/mips/kernel/cpu-probe.c c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008; c 2086 arch/mips/kernel/cpu-probe.c c->processor_id = read_c0_prid(); c 2087 arch/mips/kernel/cpu-probe.c switch (c->processor_id & PRID_COMP_MASK) { c 2089 arch/mips/kernel/cpu-probe.c cpu_probe_legacy(c, cpu); c 2092 arch/mips/kernel/cpu-probe.c cpu_probe_mips(c, cpu); c 2095 arch/mips/kernel/cpu-probe.c cpu_probe_alchemy(c, cpu); c 2098 arch/mips/kernel/cpu-probe.c cpu_probe_sibyte(c, cpu); c 2101 arch/mips/kernel/cpu-probe.c cpu_probe_broadcom(c, cpu); c 2104 arch/mips/kernel/cpu-probe.c cpu_probe_sandcraft(c, cpu); c 2107 arch/mips/kernel/cpu-probe.c cpu_probe_nxp(c, cpu); c 2110 arch/mips/kernel/cpu-probe.c cpu_probe_cavium(c, cpu); c 2113 arch/mips/kernel/cpu-probe.c cpu_probe_loongson(c, cpu); c 2118 arch/mips/kernel/cpu-probe.c cpu_probe_ingenic(c, cpu); c 2121 arch/mips/kernel/cpu-probe.c cpu_probe_netlogic(c, cpu); c 2126 arch/mips/kernel/cpu-probe.c BUG_ON(c->cputype == CPU_UNKNOWN); c 2133 arch/mips/kernel/cpu-probe.c BUG_ON(current_cpu_type() != c->cputype); c 2141 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_RIXIEX; c 2145 arch/mips/kernel/cpu-probe.c c->options &= ~MIPS_CPU_FPU; c 2148 arch/mips/kernel/cpu-probe.c c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); c 2151 arch/mips/kernel/cpu-probe.c c->options &= ~MIPS_CPU_HTW; c 2156 arch/mips/kernel/cpu-probe.c if (c->options & MIPS_CPU_FPU) c 2157 arch/mips/kernel/cpu-probe.c cpu_set_fpu_opts(c); c 2159 arch/mips/kernel/cpu-probe.c cpu_set_nofpu_opts(c); c 2166 arch/mips/kernel/cpu-probe.c c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; c 2168 arch/mips/kernel/cpu-probe.c c->options |= MIPS_CPU_PCI; c 2171 arch/mips/kernel/cpu-probe.c c->srsets = 1; c 2177 arch/mips/kernel/cpu-probe.c c->msa_id = cpu_get_msa_id(); c 2178 arch/mips/kernel/cpu-probe.c WARN(c->msa_id & MSA_IR_WRPF, c 2217 arch/mips/kernel/cpu-probe.c cpu_probe_vz(c); c 2219 arch/mips/kernel/cpu-probe.c cpu_probe_vmbits(c); c 2229 arch/mips/kernel/cpu-probe.c struct cpuinfo_mips *c = ¤t_cpu_data; c 2232 arch/mips/kernel/cpu-probe.c smp_processor_id(), c->processor_id, cpu_name_string()); c 2233 arch/mips/kernel/cpu-probe.c if (c->options & MIPS_CPU_FPU) c 2234 arch/mips/kernel/cpu-probe.c printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); c 2236 arch/mips/kernel/cpu-probe.c pr_info("MSA revision is: %08x\n", c->msa_id); c 34 arch/mips/kernel/early_printk_8250.c void prom_putchar(char c) c 53 arch/mips/kernel/early_printk_8250.c serial_out(UART_TX, c); c 309 arch/mips/kernel/elf.c struct cpuinfo_mips *c = &boot_cpu_data; c 312 arch/mips/kernel/elf.c t->thread.fpu.fcr31 = c->fpu_csr31; c 317 arch/mips/kernel/elf.c if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) c 319 arch/mips/kernel/elf.c if (!(c->fpu_msk31 & FPU_CSR_ABS2008)) c 128 arch/mips/kernel/idle.c struct cpuinfo_mips *c = ¤t_cpu_data; c 182 arch/mips/kernel/idle.c if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0) c 220 arch/mips/kernel/idle.c if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) c 236 arch/mips/kernel/idle.c if ((c->processor_id & 0xff) <= 0x64) c 32 arch/mips/kernel/smp-cmp.c struct cpuinfo_mips *c __maybe_unused = ¤t_cpu_data; c 42 arch/mips/kernel/smp-cmp.c cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & c 50 arch/mips/kernel/smp-cps.c int cl, c, v; c 62 arch/mips/kernel/smp-cps.c for (c = 0; c < ncores; c++) { c 63 arch/mips/kernel/smp-cps.c core_vpes = core_vpe_count(cl, c); c 65 arch/mips/kernel/smp-cps.c if (c > 0) c 70 arch/mips/kernel/smp-cps.c if (!cl && !c) c 75 arch/mips/kernel/smp-cps.c cpu_set_core(&cpu_data[nvpes + v], c); c 120 arch/mips/kernel/smp-cps.c unsigned ncores, core_vpes, c, cca; c 143 arch/mips/kernel/smp-cps.c for_each_present_cpu(c) { c 144 arch/mips/kernel/smp-cps.c if (cpus_are_siblings(smp_processor_id(), c)) c 147 arch/mips/kernel/smp-cps.c set_cpu_present(c, false); c 180 arch/mips/kernel/smp-cps.c for (c = 0; c < ncores; c++) { c 181 arch/mips/kernel/smp-cps.c core_vpes = core_vpe_count(0, c); c 182 arch/mips/kernel/smp-cps.c mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, c 183 arch/mips/kernel/smp-cps.c sizeof(*mips_cps_core_bootcfg[c].vpe_config), c 185 arch/mips/kernel/smp-cps.c if (!mips_cps_core_bootcfg[c].vpe_config) { c 200 arch/mips/kernel/smp-cps.c for (c = 0; c < ncores; c++) c 201 arch/mips/kernel/smp-cps.c kfree(mips_cps_core_bootcfg[c].vpe_config); c 207 arch/mips/kernel/smp-cps.c for_each_possible_cpu(c) { c 208 arch/mips/kernel/smp-cps.c if (c == 0) c 210 arch/mips/kernel/smp-cps.c set_cpu_present(c, false); c 21 arch/mips/kernel/topology.c struct cpu *c = &per_cpu(cpu_devices, i); c 23 arch/mips/kernel/topology.c c->hotpluggable = 1; c 24 arch/mips/kernel/topology.c ret = register_cpu(c, i); c 118 arch/mips/kernel/watch.c void mips_probe_watch_registers(struct cpuinfo_mips *c) c 122 arch/mips/kernel/watch.c if ((c->options & MIPS_CPU_WATCH) == 0) c 132 arch/mips/kernel/watch.c c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW; c 136 arch/mips/kernel/watch.c c->watch_reg_count = 1; c 137 arch/mips/kernel/watch.c c->watch_reg_use_cnt = 1; c 142 arch/mips/kernel/watch.c c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK); c 150 arch/mips/kernel/watch.c c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW; c 152 arch/mips/kernel/watch.c c->watch_reg_count = 2; c 153 arch/mips/kernel/watch.c c->watch_reg_use_cnt = 2; c 158 arch/mips/kernel/watch.c c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK); c 166 arch/mips/kernel/watch.c c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW; c 168 arch/mips/kernel/watch.c c->watch_reg_count = 3; c 169 arch/mips/kernel/watch.c c->watch_reg_use_cnt = 3; c 174 arch/mips/kernel/watch.c c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK); c 182 arch/mips/kernel/watch.c c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW; c 184 arch/mips/kernel/watch.c c->watch_reg_count = 4; c 185 arch/mips/kernel/watch.c c->watch_reg_use_cnt = 4; c 190 arch/mips/kernel/watch.c c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK); c 195 arch/mips/kernel/watch.c c->watch_reg_count = 5; c 200 arch/mips/kernel/watch.c c->watch_reg_count = 6; c 205 arch/mips/kernel/watch.c c->watch_reg_count = 7; c 210 arch/mips/kernel/watch.c c->watch_reg_count = 8; c 21 arch/mips/lantiq/early_printk.c void prom_putchar(char c) c 27 arch/mips/lantiq/early_printk.c if (c == '\n') c 29 arch/mips/lantiq/early_printk.c ltq_w8(c, LTQ_ASC_TBUF); c 83 arch/mips/lasat/lasat_board.c int c; c 236 arch/mips/lasat/lasat_board.c c = lasat_board_info.li_prid; c 237 arch/mips/lasat/lasat_board.c if (c >= i_n_prids) { c 241 arch/mips/lasat/lasat_board.c ppi = &vendor_info_table[0].vi_product_info[c]; c 246 arch/mips/lasat/lasat_board.c sprintf(lasat_board_info.li_typestr, "%d", 10 * c); c 36 arch/mips/lasat/prom.c static void null_prom_putc(char c) c 41 arch/mips/lasat/prom.c static void (*__prom_putc)(char c) = null_prom_putc; c 43 arch/mips/lasat/prom.c void prom_putchar(char c) c 45 arch/mips/lasat/prom.c __prom_putc(c); c 84 arch/mips/lasat/sysctl.c char *p, c; c 98 arch/mips/lasat/sysctl.c if (get_user(c, p++)) c 100 arch/mips/lasat/sysctl.c if (c == 0 || c == '\n') c 42 arch/mips/loongson32/common/prom.c char *c = &(arcs_cmdline[0]); c 46 arch/mips/loongson32/common/prom.c strcpy(c, prom_argv[i]); c 47 arch/mips/loongson32/common/prom.c c += strlen(prom_argv[i]); c 49 arch/mips/loongson32/common/prom.c *c++ = ' '; c 51 arch/mips/loongson32/common/prom.c *c = 0; c 25 arch/mips/loongson64/common/early_printk.c void prom_putchar(char c) c 37 arch/mips/loongson64/common/early_printk.c serial_out(uart_base, UART_TX, c); c 18 arch/mips/loongson64/common/platform.c struct cpuinfo_mips *c = ¤t_cpu_data; c 21 arch/mips/loongson64/common/platform.c if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON2F) c 144 arch/mips/math-emu/ieee754.h __BITFIELD_FIELD(unsigned c:1, /* condition[0] */ c 173 arch/mips/mm/c-octeon.c struct cpuinfo_mips *c = ¤t_cpu_data; c 180 arch/mips/mm/c-octeon.c c->icache.linesz = 2 << ((config1 >> 19) & 7); c 181 arch/mips/mm/c-octeon.c c->icache.sets = 64 << ((config1 >> 22) & 7); c 182 arch/mips/mm/c-octeon.c c->icache.ways = 1 + ((config1 >> 16) & 7); c 183 arch/mips/mm/c-octeon.c c->icache.flags |= MIPS_CACHE_VTAG; c 185 arch/mips/mm/c-octeon.c c->icache.sets * c->icache.ways * c->icache.linesz; c 186 arch/mips/mm/c-octeon.c c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; c 187 arch/mips/mm/c-octeon.c c->dcache.linesz = 128; c 189 arch/mips/mm/c-octeon.c c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ c 191 arch/mips/mm/c-octeon.c c->dcache.sets = 1; /* CN3XXX has one Dcache set */ c 192 arch/mips/mm/c-octeon.c c->dcache.ways = 64; c 194 arch/mips/mm/c-octeon.c c->dcache.sets * c->dcache.ways * c->dcache.linesz; c 195 arch/mips/mm/c-octeon.c c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1; c 196 arch/mips/mm/c-octeon.c c->options |= MIPS_CPU_PREFETCH; c 200 arch/mips/mm/c-octeon.c c->icache.linesz = 2 << ((config1 >> 19) & 7); c 201 arch/mips/mm/c-octeon.c c->icache.sets = 8; c 202 arch/mips/mm/c-octeon.c c->icache.ways = 37; c 203 arch/mips/mm/c-octeon.c c->icache.flags |= MIPS_CACHE_VTAG; c 204 arch/mips/mm/c-octeon.c icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; c 206 arch/mips/mm/c-octeon.c c->dcache.linesz = 128; c 207 arch/mips/mm/c-octeon.c c->dcache.ways = 32; c 208 arch/mips/mm/c-octeon.c c->dcache.sets = 8; c 209 arch/mips/mm/c-octeon.c dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; c 210 arch/mips/mm/c-octeon.c c->options |= MIPS_CPU_PREFETCH; c 214 arch/mips/mm/c-octeon.c c->icache.linesz = 128; c 215 arch/mips/mm/c-octeon.c c->icache.sets = 16; c 216 arch/mips/mm/c-octeon.c c->icache.ways = 39; c 217 arch/mips/mm/c-octeon.c c->icache.flags |= MIPS_CACHE_VTAG; c 218 arch/mips/mm/c-octeon.c icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; c 220 arch/mips/mm/c-octeon.c c->dcache.linesz = 128; c 221 arch/mips/mm/c-octeon.c c->dcache.ways = 32; c 222 arch/mips/mm/c-octeon.c c->dcache.sets = 8; c 223 arch/mips/mm/c-octeon.c dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; c 224 arch/mips/mm/c-octeon.c c->options |= MIPS_CPU_PREFETCH; c 233 arch/mips/mm/c-octeon.c c->icache.waysize = icache_size / c->icache.ways; c 234 arch/mips/mm/c-octeon.c c->dcache.waysize = dcache_size / c->dcache.ways; c 236 arch/mips/mm/c-octeon.c c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); c 237 arch/mips/mm/c-octeon.c c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); c 245 arch/mips/mm/c-octeon.c c->icache.ways, c->icache.sets, c->icache.linesz); c 249 arch/mips/mm/c-octeon.c dcache_size >> 10, c->dcache.ways, c 250 arch/mips/mm/c-octeon.c c->dcache.sets, c->dcache.linesz); c 1021 arch/mips/mm/c-r4k.c static inline int alias_74k_erratum(struct cpuinfo_mips *c) c 1023 arch/mips/mm/c-r4k.c unsigned int imp = c->processor_id & PRID_IMP_MASK; c 1024 arch/mips/mm/c-r4k.c unsigned int rev = c->processor_id & PRID_REV_MASK; c 1076 arch/mips/mm/c-r4k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 1089 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1090 arch/mips/mm/c-r4k.c c->icache.ways = 2; c 1091 arch/mips/mm/c-r4k.c c->icache.waybit = __ffs(icache_size/2); c 1094 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1095 arch/mips/mm/c-r4k.c c->dcache.ways = 2; c 1096 arch/mips/mm/c-r4k.c c->dcache.waybit= __ffs(dcache_size/2); c 1098 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_P; c 1103 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1104 arch/mips/mm/c-r4k.c c->icache.ways = 2; c 1105 arch/mips/mm/c-r4k.c c->icache.waybit= 0; c 1108 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1109 arch/mips/mm/c-r4k.c c->dcache.ways = 2; c 1110 arch/mips/mm/c-r4k.c c->dcache.waybit = 0; c 1112 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; c 1117 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1118 arch/mips/mm/c-r4k.c c->icache.ways = 4; c 1119 arch/mips/mm/c-r4k.c c->icache.waybit= 0; c 1122 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1123 arch/mips/mm/c-r4k.c c->dcache.ways = 4; c 1124 arch/mips/mm/c-r4k.c c->dcache.waybit = 0; c 1126 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_P; c 1127 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_PREFETCH; c 1137 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1138 arch/mips/mm/c-r4k.c c->icache.ways = 1; c 1139 arch/mips/mm/c-r4k.c c->icache.waybit = 0; /* doesn't matter */ c 1142 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1143 arch/mips/mm/c-r4k.c c->dcache.ways = 1; c 1144 arch/mips/mm/c-r4k.c c->dcache.waybit = 0; /* does not matter */ c 1146 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_P; c 1154 arch/mips/mm/c-r4k.c c->icache.linesz = 64; c 1155 arch/mips/mm/c-r4k.c c->icache.ways = 2; c 1156 arch/mips/mm/c-r4k.c c->icache.waybit = 0; c 1159 arch/mips/mm/c-r4k.c c->dcache.linesz = 32; c 1160 arch/mips/mm/c-r4k.c c->dcache.ways = 2; c 1161 arch/mips/mm/c-r4k.c c->dcache.waybit = 0; c 1163 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_PREFETCH; c 1171 arch/mips/mm/c-r4k.c if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || c 1172 arch/mips/mm/c-r4k.c c->processor_id == 0x0c82U) { c 1174 arch/mips/mm/c-r4k.c if (c->processor_id == 0x0c80U) c 1178 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_P; c 1181 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1182 arch/mips/mm/c-r4k.c c->icache.ways = 2; c 1183 arch/mips/mm/c-r4k.c c->icache.waybit = __ffs(icache_size/2); c 1186 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1187 arch/mips/mm/c-r4k.c c->dcache.ways = 2; c 1188 arch/mips/mm/c-r4k.c c->dcache.waybit = __ffs(dcache_size/2); c 1198 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1199 arch/mips/mm/c-r4k.c c->icache.ways = 1; c 1200 arch/mips/mm/c-r4k.c c->icache.waybit = 0; /* doesn't matter */ c 1203 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1204 arch/mips/mm/c-r4k.c c->dcache.ways = 1; c 1205 arch/mips/mm/c-r4k.c c->dcache.waybit = 0; /* does not matter */ c 1207 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_P; c 1214 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1215 arch/mips/mm/c-r4k.c c->icache.ways = 4; c 1216 arch/mips/mm/c-r4k.c c->icache.waybit = __ffs(icache_size / c->icache.ways); c 1219 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1220 arch/mips/mm/c-r4k.c c->dcache.ways = 4; c 1221 arch/mips/mm/c-r4k.c c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); c 1223 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_P; c 1224 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_PREFETCH; c 1229 arch/mips/mm/c-r4k.c c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c 1231 arch/mips/mm/c-r4k.c c->icache.ways = 4; c 1233 arch/mips/mm/c-r4k.c c->icache.ways = 2; c 1234 arch/mips/mm/c-r4k.c c->icache.waybit = 0; c 1237 arch/mips/mm/c-r4k.c c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c 1239 arch/mips/mm/c-r4k.c c->dcache.ways = 4; c 1241 arch/mips/mm/c-r4k.c c->dcache.ways = 2; c 1242 arch/mips/mm/c-r4k.c c->dcache.waybit = 0; c 1249 arch/mips/mm/c-r4k.c c->icache.linesz = 2 << lsize; c 1251 arch/mips/mm/c-r4k.c c->icache.linesz = 0; c 1252 arch/mips/mm/c-r4k.c c->icache.sets = 64 << ((config1 >> 22) & 7); c 1253 arch/mips/mm/c-r4k.c c->icache.ways = 1 + ((config1 >> 16) & 7); c 1254 arch/mips/mm/c-r4k.c icache_size = c->icache.sets * c 1255 arch/mips/mm/c-r4k.c c->icache.ways * c 1256 arch/mips/mm/c-r4k.c c->icache.linesz; c 1257 arch/mips/mm/c-r4k.c c->icache.waybit = 0; c 1261 arch/mips/mm/c-r4k.c c->dcache.linesz = 2 << lsize; c 1263 arch/mips/mm/c-r4k.c c->dcache.linesz = 0; c 1264 arch/mips/mm/c-r4k.c c->dcache.sets = 64 << ((config1 >> 13) & 7); c 1265 arch/mips/mm/c-r4k.c c->dcache.ways = 1 + ((config1 >> 7) & 7); c 1266 arch/mips/mm/c-r4k.c dcache_size = c->dcache.sets * c 1267 arch/mips/mm/c-r4k.c c->dcache.ways * c 1268 arch/mips/mm/c-r4k.c c->dcache.linesz; c 1269 arch/mips/mm/c-r4k.c c->dcache.waybit = 0; c 1271 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_PREFETCH; c 1276 arch/mips/mm/c-r4k.c c->icache.linesz = 128; c 1277 arch/mips/mm/c-r4k.c c->icache.sets = 16; c 1278 arch/mips/mm/c-r4k.c c->icache.ways = 8; c 1279 arch/mips/mm/c-r4k.c c->icache.flags |= MIPS_CACHE_VTAG; c 1280 arch/mips/mm/c-r4k.c icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; c 1282 arch/mips/mm/c-r4k.c c->dcache.linesz = 128; c 1283 arch/mips/mm/c-r4k.c c->dcache.ways = 8; c 1284 arch/mips/mm/c-r4k.c c->dcache.sets = 8; c 1285 arch/mips/mm/c-r4k.c dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; c 1286 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_PREFETCH; c 1305 arch/mips/mm/c-r4k.c c->icache.linesz = lsize ? 2 << lsize : 0; c 1307 arch/mips/mm/c-r4k.c c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); c 1308 arch/mips/mm/c-r4k.c c->icache.ways = 1 + ((config1 >> 16) & 7); c 1310 arch/mips/mm/c-r4k.c icache_size = c->icache.sets * c 1311 arch/mips/mm/c-r4k.c c->icache.ways * c 1312 arch/mips/mm/c-r4k.c c->icache.linesz; c 1313 arch/mips/mm/c-r4k.c c->icache.waybit = __ffs(icache_size/c->icache.ways); c 1316 arch/mips/mm/c-r4k.c c->icache.flags |= MIPS_CACHE_VTAG; c 1321 arch/mips/mm/c-r4k.c c->dcache.flags = 0; c 1329 arch/mips/mm/c-r4k.c c->dcache.linesz = lsize ? 2 << lsize : 0; c 1331 arch/mips/mm/c-r4k.c c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); c 1332 arch/mips/mm/c-r4k.c c->dcache.ways = 1 + ((config1 >> 7) & 7); c 1334 arch/mips/mm/c-r4k.c dcache_size = c->dcache.sets * c 1335 arch/mips/mm/c-r4k.c c->dcache.ways * c 1336 arch/mips/mm/c-r4k.c c->dcache.linesz; c 1337 arch/mips/mm/c-r4k.c c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); c 1339 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_PREFETCH; c 1353 arch/mips/mm/c-r4k.c !(config & CONF_SC) && c->icache.linesz != 16 && c 1358 arch/mips/mm/c-r4k.c c->icache.waysize = icache_size / c->icache.ways; c 1359 arch/mips/mm/c-r4k.c c->dcache.waysize = dcache_size / c->dcache.ways; c 1361 arch/mips/mm/c-r4k.c c->icache.sets = c->icache.linesz ? c 1362 arch/mips/mm/c-r4k.c icache_size / (c->icache.linesz * c->icache.ways) : 0; c 1363 arch/mips/mm/c-r4k.c c->dcache.sets = c->dcache.linesz ? c 1364 arch/mips/mm/c-r4k.c dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; c 1380 arch/mips/mm/c-r4k.c c->dcache.flags |= MIPS_CACHE_PINDEX; c 1391 arch/mips/mm/c-r4k.c has_74k_erratum = alias_74k_erratum(c); c 1406 arch/mips/mm/c-r4k.c (c->icache.waysize > PAGE_SIZE)) c 1407 arch/mips/mm/c-r4k.c c->icache.flags |= MIPS_CACHE_ALIASES; c 1413 arch/mips/mm/c-r4k.c c->dcache.flags |= MIPS_CACHE_PINDEX; c 1418 arch/mips/mm/c-r4k.c if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE) c 1419 arch/mips/mm/c-r4k.c c->dcache.flags |= MIPS_CACHE_ALIASES; c 1423 arch/mips/mm/c-r4k.c if (c->dcache.flags & MIPS_CACHE_PINDEX) c 1424 arch/mips/mm/c-r4k.c c->dcache.flags &= ~MIPS_CACHE_ALIASES; c 1432 arch/mips/mm/c-r4k.c c->icache.flags |= MIPS_IC_SNOOPS_REMOTE; c 1440 arch/mips/mm/c-r4k.c c->icache.flags |= MIPS_CACHE_VTAG; c 1446 arch/mips/mm/c-r4k.c c->icache.flags |= MIPS_CACHE_IC_F_DC; c 1450 arch/mips/mm/c-r4k.c c->icache.flags |= MIPS_CACHE_IC_F_DC; c 1452 arch/mips/mm/c-r4k.c c->dcache.flags &= ~MIPS_CACHE_ALIASES; c 1460 arch/mips/mm/c-r4k.c c->icache.ways = 1; c 1465 arch/mips/mm/c-r4k.c c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", c 1466 arch/mips/mm/c-r4k.c way_string[c->icache.ways], c->icache.linesz); c 1469 arch/mips/mm/c-r4k.c dcache_size >> 10, way_string[c->dcache.ways], c 1470 arch/mips/mm/c-r4k.c (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", c 1471 arch/mips/mm/c-r4k.c (c->dcache.flags & MIPS_CACHE_ALIASES) ? c 1473 arch/mips/mm/c-r4k.c c->dcache.linesz); c 1478 arch/mips/mm/c-r4k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 1486 arch/mips/mm/c-r4k.c c->vcache.linesz = 2 << lsize; c 1488 arch/mips/mm/c-r4k.c c->vcache.linesz = lsize; c 1490 arch/mips/mm/c-r4k.c c->vcache.sets = 64 << ((config2 >> 24) & 15); c 1491 arch/mips/mm/c-r4k.c c->vcache.ways = 1 + ((config2 >> 16) & 15); c 1493 arch/mips/mm/c-r4k.c vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; c 1495 arch/mips/mm/c-r4k.c c->vcache.waybit = 0; c 1496 arch/mips/mm/c-r4k.c c->vcache.waysize = vcache_size / c->vcache.ways; c 1499 arch/mips/mm/c-r4k.c vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); c 1512 arch/mips/mm/c-r4k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 1556 arch/mips/mm/c-r4k.c c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); c 1557 arch/mips/mm/c-r4k.c c->scache.ways = 1; c 1558 arch/mips/mm/c-r4k.c c->scache.waybit = 0; /* does not matter */ c 1565 arch/mips/mm/c-r4k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 1568 arch/mips/mm/c-r4k.c c->scache.linesz = 32; c 1569 arch/mips/mm/c-r4k.c c->scache.ways = 4; c 1570 arch/mips/mm/c-r4k.c c->scache.waybit = 0; c 1571 arch/mips/mm/c-r4k.c c->scache.waysize = scache_size / (c->scache.ways); c 1572 arch/mips/mm/c-r4k.c c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); c 1574 arch/mips/mm/c-r4k.c scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); c 1576 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_INCLUSIVE_CACHES; c 1581 arch/mips/mm/c-r4k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 1587 arch/mips/mm/c-r4k.c c->scache.linesz = 2 << lsize; c 1589 arch/mips/mm/c-r4k.c c->scache.linesz = 0; c 1590 arch/mips/mm/c-r4k.c c->scache.sets = 64 << ((config2 >> 8) & 15); c 1591 arch/mips/mm/c-r4k.c c->scache.ways = 1 + (config2 & 15); c 1593 arch/mips/mm/c-r4k.c scache_size = c->scache.sets * c 1594 arch/mips/mm/c-r4k.c c->scache.ways * c 1595 arch/mips/mm/c-r4k.c c->scache.linesz; c 1598 arch/mips/mm/c-r4k.c c->scache.waybit = 0; c 1599 arch/mips/mm/c-r4k.c c->scache.waysize = scache_size / c->scache.ways; c 1601 arch/mips/mm/c-r4k.c scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); c 1603 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_INCLUSIVE_CACHES; c 1613 arch/mips/mm/c-r4k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 1629 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_CACHE_CDEX_S; c 1637 arch/mips/mm/c-r4k.c c->scache.linesz = 64 << ((config >> 13) & 1); c 1638 arch/mips/mm/c-r4k.c c->scache.ways = 2; c 1639 arch/mips/mm/c-r4k.c c->scache.waybit= 0; c 1670 arch/mips/mm/c-r4k.c if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | c 1675 arch/mips/mm/c-r4k.c scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; c 1678 arch/mips/mm/c-r4k.c way_string[c->scache.ways], c->scache.linesz); c 1681 arch/mips/mm/c-r4k.c if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) c 1693 arch/mips/mm/c-r4k.c c->scache.waysize = scache_size / c->scache.ways; c 1695 arch/mips/mm/c-r4k.c c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); c 1698 arch/mips/mm/c-r4k.c scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); c 1700 arch/mips/mm/c-r4k.c c->options |= MIPS_CPU_INCLUSIVE_CACHES; c 1822 arch/mips/mm/c-r4k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 1848 arch/mips/mm/c-r4k.c if (c->dcache.linesz && cpu_has_dc_aliases) c 1850 arch/mips/mm/c-r4k.c c->dcache.sets * c->dcache.linesz - 1, c 1914 arch/mips/mm/c-r4k.c if (c->scache.flags & MIPS_CACHE_NOT_PRESENT) c 37 arch/mips/mm/cerr-sb1.c #define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL)) c 52 arch/mips/mm/cerr-sb1.c #define CP0_CERRD_DPA_VALID(c) (!((c) & CP0_CERRD_EXTERNAL)) c 53 arch/mips/mm/cerr-sb1.c #define CP0_CERRD_IDX_VALID(c) \ c 54 arch/mips/mm/cerr-sb1.c (((c) & (CP0_CERRD_LOAD | CP0_CERRD_STORE)) ? (!((c) & CP0_CERRD_EXTERNAL)) : 0) c 78 arch/mips/mm/page.c #define _uasm_i_pref(a, b, c, d) \ c 81 arch/mips/mm/page.c if (c <= 0xff && c >= -0x100) \ c 82 arch/mips/mm/page.c uasm_i_pref(a, b, c, d);\ c 84 arch/mips/mm/page.c uasm_i_pref(a, b, c, d); \ c 121 arch/mips/mm/sc-mips.c static inline int mips_sc_is_activated(struct cpuinfo_mips *c) c 144 arch/mips/mm/sc-mips.c c->scache.linesz = 2 << tmp; c 152 arch/mips/mm/sc-mips.c struct cpuinfo_mips *c = ¤t_cpu_data; c 162 arch/mips/mm/sc-mips.c c->scache.sets = 64 << sets; c 167 arch/mips/mm/sc-mips.c c->scache.linesz = 2 << line_sz; c 171 arch/mips/mm/sc-mips.c c->scache.ways = assoc + 1; c 172 arch/mips/mm/sc-mips.c c->scache.waysize = c->scache.sets * c->scache.linesz; c 173 arch/mips/mm/sc-mips.c c->scache.waybit = __ffs(c->scache.waysize); c 175 arch/mips/mm/sc-mips.c if (c->scache.linesz) { c 176 arch/mips/mm/sc-mips.c c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; c 177 arch/mips/mm/sc-mips.c c->options |= MIPS_CPU_INCLUSIVE_CACHES; c 186 arch/mips/mm/sc-mips.c struct cpuinfo_mips *c = ¤t_cpu_data; c 191 arch/mips/mm/sc-mips.c c->scache.flags |= MIPS_CACHE_NOT_PRESENT; c 197 arch/mips/mm/sc-mips.c if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | c 209 arch/mips/mm/sc-mips.c if (!mips_sc_is_activated(c)) c 214 arch/mips/mm/sc-mips.c c->scache.sets = 64 << tmp; c 220 arch/mips/mm/sc-mips.c c->scache.ways = tmp + 1; c 231 arch/mips/mm/sc-mips.c c->scache.ways = 4; c 239 arch/mips/mm/sc-mips.c c->scache.sets = 256; c 240 arch/mips/mm/sc-mips.c c->scache.ways = 4; c 245 arch/mips/mm/sc-mips.c c->scache.waysize = c->scache.sets * c->scache.linesz; c 246 arch/mips/mm/sc-mips.c c->scache.waybit = __ffs(c->scache.waysize); c 248 arch/mips/mm/sc-mips.c c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; c 231 arch/mips/mm/sc-rm7k.c struct cpuinfo_mips *c = ¤t_cpu_data; c 237 arch/mips/mm/sc-rm7k.c c->scache.linesz = sc_lsize; c 238 arch/mips/mm/sc-rm7k.c c->scache.ways = 4; c 239 arch/mips/mm/sc-rm7k.c c->scache.waybit= __ffs(scache_size / c->scache.ways); c 240 arch/mips/mm/sc-rm7k.c c->scache.waysize = scache_size / c->scache.ways; c 241 arch/mips/mm/sc-rm7k.c c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); c 267 arch/mips/mm/sc-rm7k.c c->tcache.linesz = tc_lsize; c 268 arch/mips/mm/sc-rm7k.c c->tcache.ways = 1; c 32 arch/mips/mm/uasm-micromips.c #define M(a, b, c, d, e, f) \ c 35 arch/mips/mm/uasm-micromips.c | (c) << RS_SH \ c 32 arch/mips/mm/uasm-mips.c #define M(a, b, c, d, e, f) \ c 35 arch/mips/mm/uasm-mips.c | (c) << RT_SH \ c 41 arch/mips/mm/uasm-mips.c #define M6(a, b, c, d, e) \ c 44 arch/mips/mm/uasm-mips.c | (c) << RT_SH \ c 155 arch/mips/mm/uasm.c build_insn(buf, insn##op, a, b, c); \ c 162 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, c, a); \ c 169 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, a, c); \ c 176 arch/mips/mm/uasm.c build_insn(buf, insn##op, c, b, a); \ c 183 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, c, a); \ c 190 arch/mips/mm/uasm.c build_insn(buf, insn##op, a, b, c); \ c 197 arch/mips/mm/uasm.c build_insn(buf, insn##op, c, a, b); \ c 204 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, a, c); \ c 211 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, a, c+d-1, c); \ c 218 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, a, c+d-33, c); \ c 225 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, a, c+d-33, c-32); \ c 232 arch/mips/mm/uasm.c build_insn(buf, insn##op, b, a, d-1, c); \ c 395 arch/mips/mm/uasm.c unsigned int c) c 402 arch/mips/mm/uasm.c build_insn(buf, insn_pref, c, 28, b); c 404 arch/mips/mm/uasm.c build_insn(buf, insn_pref, c, a, b); c 51 arch/mips/netlogic/common/earlycons.c void prom_putchar(char c) c 62 arch/mips/netlogic/common/earlycons.c nlm_write_reg(uartbase, UART_TX, c); c 33 arch/mips/oprofile/op_model_mipsxx.c #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0) c 35 arch/mips/oprofile/op_model_mipsxx.c #define oprofile_skip_cpu(c) 0 c 17 arch/mips/paravirt/serial.c void prom_putchar(char c) c 20 arch/mips/paravirt/serial.c (unsigned long)&c, 1 /* len == 1 */); c 61 arch/mips/pci/fixup-fuloong2e.c unsigned char c; c 127 arch/mips/pci/fixup-fuloong2e.c pci_read_config_byte(pdev, 0x85, &c); c 128 arch/mips/pci/fixup-fuloong2e.c c &= ~(0x3 << 2); c 129 arch/mips/pci/fixup-fuloong2e.c pci_write_config_byte(pdev, 0x85, c); c 182 arch/mips/pci/fixup-fuloong2e.c unsigned char c; c 194 arch/mips/pci/fixup-fuloong2e.c pci_read_config_byte(pdev, 0x8, &c); c 369 arch/mips/pci/pci-alchemy.c struct clk *c; c 399 arch/mips/pci/pci-alchemy.c c = clk_get(&pdev->dev, "pci_clko"); c 400 arch/mips/pci/pci-alchemy.c if (IS_ERR(c)) { c 402 arch/mips/pci/pci-alchemy.c ret = PTR_ERR(c); c 406 arch/mips/pci/pci-alchemy.c ret = clk_prepare_enable(c); c 486 arch/mips/pci/pci-alchemy.c clk_get_rate(c) / 1000000); c 495 arch/mips/pci/pci-alchemy.c clk_disable_unprepare(c); c 497 arch/mips/pci/pci-alchemy.c clk_put(c); c 104 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID]; c 117 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; c 142 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; c 151 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type1_cfg.c[(fn << 8) | (where & ~3)]; c 155 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; c 187 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID]; c 200 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; c 228 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; c 241 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; c 153 arch/mips/pic32/pic32mzda/early_console.c void prom_putchar(char c) c 160 arch/mips/pic32/pic32mzda/early_console.c __raw_writel(c, uart_base + U_TXR(console_port)); c 82 arch/mips/pmcs-msp71xx/msp_prom.c static inline unsigned char str2hexnum(unsigned char c) c 84 arch/mips/pmcs-msp71xx/msp_prom.c if (c >= '0' && c <= '9') c 85 arch/mips/pmcs-msp71xx/msp_prom.c return c - '0'; c 86 arch/mips/pmcs-msp71xx/msp_prom.c if (c >= 'a' && c <= 'f') c 87 arch/mips/pmcs-msp71xx/msp_prom.c return c - 'a' + 10; c 186 arch/mips/pmcs-msp71xx/msp_prom.c static char test_feature(char c) c 191 arch/mips/pmcs-msp71xx/msp_prom.c if (*feature++ == c) c 20 arch/mips/pnx833x/common/prom.c char *c = &(arcs_cmdline[0]); c 24 arch/mips/pnx833x/common/prom.c strcpy(c, argv[i]); c 25 arch/mips/pnx833x/common/prom.c c += strlen(argv[i]); c 27 arch/mips/pnx833x/common/prom.c *c++ = ' '; c 29 arch/mips/pnx833x/common/prom.c *c = 0; c 102 arch/mips/sgi-ip22/ip22-eisa.c int i, c; c 111 arch/mips/sgi-ip22/ip22-eisa.c for (c = 0, i = 1; i <= IP22_EISA_MAX_SLOTS; i++) { c 115 arch/mips/sgi-ip22/ip22-eisa.c c++; c 118 arch/mips/sgi-ip22/ip22-eisa.c printk(KERN_INFO "EISA: Detected %d card%s.\n", c, c < 2 ? "" : "s"); c 123 arch/mips/sgi-ip22/ip22-time.c char c; c 128 arch/mips/sgi-ip22/ip22-time.c ArcRead(0, &c, 1, &cnt); c 34 arch/mips/sgi-ip27/ip27-console.c void prom_putchar(char c) c 40 arch/mips/sgi-ip27/ip27-console.c writeb(c, &uart->iu_thr); c 40 arch/mips/sgi-ip32/ip32-setup.c static inline unsigned char str2hexnum(unsigned char c) c 42 arch/mips/sgi-ip32/ip32-setup.c if (c >= '0' && c <= '9') c 43 arch/mips/sgi-ip32/ip32-setup.c return c - '0'; c 44 arch/mips/sgi-ip32/ip32-setup.c if (c >= 'a' && c <= 'f') c 45 arch/mips/sgi-ip32/ip32-setup.c return c - 'a' + 10; c 326 arch/mips/sibyte/common/cfe.c void prom_putchar(char c) c 330 arch/mips/sibyte/common/cfe.c while ((ret = cfe_write(cfe_cons_handle, &c, 1)) == 0) c 29 arch/mips/txx9/generic/7segled.c int txx9_7segled_putc(unsigned int pos, char c) c 33 arch/mips/txx9/generic/7segled.c c = map_to_seg7(&txx9_seg7map, c); c 34 arch/mips/txx9/generic/7segled.c if (c < 0) c 35 arch/mips/txx9/generic/7segled.c return c; c 36 arch/mips/txx9/generic/7segled.c tx_7segled_putc(pos, c); c 115 arch/mips/txx9/generic/pci.c struct pci_controller c; c 128 arch/mips/txx9/generic/pci.c new->c.mem_resource = new->r_mem; c 129 arch/mips/txx9/generic/pci.c new->c.io_resource = &new->r_io; c 130 arch/mips/txx9/generic/pci.c pcic = &new->c; c 464 arch/mips/txx9/generic/setup.c static void null_prom_putchar(char c) c 467 arch/mips/txx9/generic/setup.c void (*txx9_prom_putchar)(char c) = null_prom_putchar; c 469 arch/mips/txx9/generic/setup.c void prom_putchar(char c) c 471 arch/mips/txx9/generic/setup.c txx9_prom_putchar(c); c 476 arch/mips/txx9/generic/setup.c static void early_txx9_sio_putchar(char c) c 484 arch/mips/txx9/generic/setup.c __raw_writel(c, early_txx9_sio_port + TXX9_SITFIFO); c 69 arch/mips/txx9/generic/spi_eeprom.c int c = len < AT250X0_PAGE_SIZE ? len : AT250X0_PAGE_SIZE; c 72 arch/mips/txx9/generic/spi_eeprom.c stat = spi_write_then_read(spi, cmd, sizeof(cmd), buf, c); c 73 arch/mips/txx9/generic/spi_eeprom.c buf += c; c 74 arch/mips/txx9/generic/spi_eeprom.c len -= c; c 75 arch/mips/txx9/generic/spi_eeprom.c address += c; c 104 arch/mips/txx9/jmr3927/setup.c struct pci_controller *c; c 106 arch/mips/txx9/jmr3927/setup.c c = txx9_alloc_pci_controller(&txx9_primary_pcic, c 109 arch/mips/txx9/jmr3927/setup.c register_pci_controller(c); c 119 arch/mips/txx9/jmr3927/setup.c tx3927_pcic_setup(c, JMR3927_SDRAM_SIZE, extarb); c 65 arch/mips/txx9/rbtx4927/setup.c struct pci_controller *c = &txx9_primary_pcic; c 67 arch/mips/txx9/rbtx4927/setup.c register_pci_controller(c); c 88 arch/mips/txx9/rbtx4927/setup.c tx4927_pcic_setup(tx4927_pcicptr, c, extarb); c 91 arch/mips/txx9/rbtx4927/setup.c txx9_pci66_check(c, 0, 0)) { c 104 arch/mips/txx9/rbtx4927/setup.c tx4927_pcic_setup(tx4927_pcicptr, c, extarb); c 112 arch/mips/txx9/rbtx4927/setup.c struct pci_controller *c = &txx9_primary_pcic; c 114 arch/mips/txx9/rbtx4927/setup.c register_pci_controller(c); c 135 arch/mips/txx9/rbtx4927/setup.c tx4927_pcic_setup(tx4938_pcicptr, c, extarb); c 138 arch/mips/txx9/rbtx4927/setup.c txx9_pci66_check(c, 0, 0)) { c 151 arch/mips/txx9/rbtx4927/setup.c tx4927_pcic_setup(tx4938_pcicptr, c, extarb); c 44 arch/mips/txx9/rbtx4938/setup.c struct pci_controller *c = &txx9_primary_pcic; c 46 arch/mips/txx9/rbtx4938/setup.c register_pci_controller(c); c 67 arch/mips/txx9/rbtx4938/setup.c tx4927_pcic_setup(tx4938_pcicptr, c, extarb); c 70 arch/mips/txx9/rbtx4938/setup.c txx9_pci66_check(c, 0, 0)) { c 83 arch/mips/txx9/rbtx4938/setup.c tx4927_pcic_setup(tx4938_pcicptr, c, extarb); c 100 arch/mips/txx9/rbtx4938/setup.c c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000); c 101 arch/mips/txx9/rbtx4938/setup.c register_pci_controller(c); c 102 arch/mips/txx9/rbtx4938/setup.c tx4927_pcic_setup(tx4938_pcic1ptr, c, 0); c 61 arch/mips/txx9/rbtx4939/setup.c struct pci_controller *c = &txx9_primary_pcic; c 63 arch/mips/txx9/rbtx4939/setup.c register_pci_controller(c); c 66 arch/mips/txx9/rbtx4939/setup.c tx4927_pcic_setup(tx4939_pcicptr, c, extarb); c 73 arch/mips/txx9/rbtx4939/setup.c c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000); c 74 arch/mips/txx9/rbtx4939/setup.c register_pci_controller(c); c 75 arch/mips/txx9/rbtx4939/setup.c tx4927_pcic_setup(tx4939_pcic1ptr, c, 0); c 64 arch/nds32/include/asm/io.h #define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) c 65 arch/nds32/include/asm/io.h #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) c 66 arch/nds32/include/asm/io.h #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) c 67 arch/nds32/include/asm/io.h #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) c 68 arch/nds32/include/asm/io.h #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) c 69 arch/nds32/include/asm/io.h #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) c 75 arch/nds32/include/asm/io.h #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) c 76 arch/nds32/include/asm/io.h #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) c 77 arch/nds32/include/asm/io.h #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) c 79 arch/nds32/include/asm/io.h #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) c 80 arch/nds32/include/asm/io.h #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) c 81 arch/nds32/include/asm/io.h #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) c 371 arch/nds32/math-emu/fpuemu.c char c; c 375 arch/nds32/math-emu/fpuemu.c if (__get_user(c, (unsigned char *)addr++)) c 377 arch/nds32/math-emu/fpuemu.c *pc++ = c; c 59 arch/nios2/boot/compressed/misc.c # define Tracec(c, x) {if (verbose && (c)) fprintf x ; } c 60 arch/nios2/boot/compressed/misc.c # define Tracecv(c, x) {if (verbose > 1 && (c)) fprintf x ; } c 66 arch/nios2/boot/compressed/misc.c # define Tracec(c, x) c 67 arch/nios2/boot/compressed/misc.c # define Tracecv(c, x) c 94 arch/nios2/boot/compressed/misc.c void *memset(void *s, int c, size_t n) c 100 arch/nios2/boot/compressed/misc.c ss[i] = c; c 135 arch/nios2/boot/compressed/misc.c ulg c = crc; /* temporary variable */ c 143 arch/nios2/boot/compressed/misc.c c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); c 145 arch/nios2/boot/compressed/misc.c crc = c; c 18 arch/nios2/include/asm/string.h extern void *memset(void *s, int c, size_t count); c 13 arch/nios2/lib/memset.c void *memset(void *s, int c, size_t count) c 20 arch/nios2/lib/memset.c c &= 0xFF; c 26 arch/nios2/lib/memset.c *xs++ = c; c 72 arch/nios2/lib/memset.c : "r" (c), /* %5 Input */ c 30 arch/nios2/mm/mmu_context.c #define CTX_VERSION(c) (((c) >> VERSION_SHIFT) & VERSION_MASK) c 33 arch/nios2/mm/mmu_context.c #define CTX_PID(c) (((c) >> PID_SHIFT) & PID_MASK) c 6 arch/openrisc/include/asm/string.h extern void *memset(void *s, int c, __kernel_size_t n); c 35 arch/openrisc/kernel/time.c u32 c; c 41 arch/openrisc/kernel/time.c c = mfspr(SPR_TTCR); c 42 arch/openrisc/kernel/time.c c += delta; c 43 arch/openrisc/kernel/time.c c &= SPR_TTMR_TP; c 48 arch/openrisc/kernel/time.c mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c); c 113 arch/openrisc/kernel/traps.c unsigned char c; c 114 arch/openrisc/kernel/traps.c if (__get_user(c, &((unsigned char *)regs->pc)[i])) { c 121 arch/openrisc/kernel/traps.c printk("(%02x) ", c); c 123 arch/openrisc/kernel/traps.c printk("%02x ", c); c 194 arch/openrisc/kernel/traps.c unsigned char c; c 195 arch/openrisc/kernel/traps.c c = ((unsigned char *)(__pa(regs->pc)))[i]; c 198 arch/openrisc/kernel/traps.c printk("(%02x) ", c); c 200 arch/openrisc/kernel/traps.c printk("%02x ", c); c 82 arch/parisc/boot/compressed/misc.c void *memset(void *s, int c, size_t count) c 87 arch/parisc/boot/compressed/misc.c *xs++ = c; c 110 arch/parisc/boot/compressed/misc.c char *strchr(const char *s, int c) c 113 arch/parisc/boot/compressed/misc.c if (*s == (char)c) c 136 arch/parisc/boot/compressed/misc.c static int putchar(int c) c 140 arch/parisc/boot/compressed/misc.c buf[0] = c; c 143 arch/parisc/boot/compressed/misc.c return c; c 33 arch/parisc/include/asm/hash.h u32 a, b, c; c 41 arch/parisc/include/asm/hash.h c = x << 23; b += a; c 42 arch/parisc/include/asm/hash.h c += b; c 45 arch/parisc/include/asm/hash.h a += c << 3; b -= c; c 120 arch/parisc/include/asm/hash.h u64 b, c, d; c 131 arch/parisc/include/asm/hash.h _ASSIGN(b, a*5); c = a << 13; c 133 arch/parisc/include/asm/hash.h a = b + (a << 1); c += d; c 136 arch/parisc/include/asm/hash.h c += b; a += b; c 137 arch/parisc/include/asm/hash.h d -= c; c += a << 1; c 138 arch/parisc/include/asm/hash.h a += c << 3; _ASSIGN(b, b << (7+31), "X" (c), "X" (d)); c 80 arch/parisc/include/asm/psw.h unsigned int c:1; c 59 arch/parisc/kernel/pdc_cons.c int c; c 63 arch/parisc/kernel/pdc_cons.c c = pdc_iodc_getc(); c 66 arch/parisc/kernel/pdc_cons.c return c; c 198 arch/parisc/kernel/pdc_cons.c static struct tty_driver * pdc_console_device (struct console *c, int *index) c 200 arch/parisc/kernel/pdc_cons.c *index = c->index; c 42 arch/parisc/kernel/unaligned.c #define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6) c 166 arch/powerpc/boot/cpm-serial.c static void cpm_serial_putc(unsigned char c) c 173 arch/powerpc/boot/cpm-serial.c tbdf->addr[0] = c; c 186 arch/powerpc/boot/cpm-serial.c unsigned char c; c 192 arch/powerpc/boot/cpm-serial.c c = rbdf->addr[0]; c 196 arch/powerpc/boot/cpm-serial.c return c; c 36 arch/powerpc/boot/mpc52xx-psc.c static void psc_putc(unsigned char c) c 39 arch/powerpc/boot/mpc52xx-psc.c out_8(psc + MPC52xx_PSC_BUFFER, c); c 39 arch/powerpc/boot/ns16550.c static void ns16550_putc(unsigned char c) c 42 arch/powerpc/boot/ns16550.c out_8(reg_base, c); c 42 arch/powerpc/boot/opal.c static void opal_con_putc(unsigned char c) c 57 arch/powerpc/boot/opal.c opal_console_write(opal_con_id, &olen, &c); c 71 arch/powerpc/boot/ops.h void (*putc)(unsigned char c); c 20 arch/powerpc/boot/stdio.c char *strrchr(const char *s, int c) c 24 arch/powerpc/boot/stdio.c if (*s == (char)c) c 64 arch/powerpc/boot/stdio.c int i, c; c 66 arch/powerpc/boot/stdio.c for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) c 67 arch/powerpc/boot/stdio.c i = i*10 + c - '0'; c 81 arch/powerpc/boot/stdio.c char c,sign,tmp[66]; c 91 arch/powerpc/boot/stdio.c c = (type & ZEROPAD) ? '0' : ' '; c 136 arch/powerpc/boot/stdio.c *str++ = c; c 9 arch/powerpc/boot/string.h extern char *strchr(const char *s, int c); c 10 arch/powerpc/boot/string.h extern char *strrchr(const char *s, int c); c 16 arch/powerpc/boot/string.h extern void *memset(void *s, int c, size_t n); c 19 arch/powerpc/boot/string.h extern void *memchr(const void *s, int c, size_t n); c 38 arch/powerpc/boot/uartlite.c static void uartlite_putc(unsigned char c) c 43 arch/powerpc/boot/uartlite.c out_be32(reg_base + ULITE_TX, c); c 82 arch/powerpc/include/asm/book3s/32/mmu-hash.h unsigned long c:1; /* Changed */ c 24 arch/powerpc/include/asm/btext.h extern void btext_drawchar(char c); c 27 arch/powerpc/include/asm/btext.h extern void btext_drawtext(const char *c, unsigned int len); c 13 arch/powerpc/include/asm/ide.h #define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c)) c 14 arch/powerpc/include/asm/ide.h #define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c)) c 15 arch/powerpc/include/asm/ide.h #define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c)) c 16 arch/powerpc/include/asm/ide.h #define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c)) c 30 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c), c 31 arch/powerpc/include/asm/io-defs.h (a, b, c), mem, a) c 32 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c), c 33 arch/powerpc/include/asm/io-defs.h (a, b, c), mem, a) c 34 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c), c 35 arch/powerpc/include/asm/io-defs.h (a, b, c), mem, a) c 36 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c), c 37 arch/powerpc/include/asm/io-defs.h (a, b, c), mem, a) c 38 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c), c 39 arch/powerpc/include/asm/io-defs.h (a, b, c), mem, a) c 40 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c), c 41 arch/powerpc/include/asm/io-defs.h (a, b, c), mem, a) c 43 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), c 44 arch/powerpc/include/asm/io-defs.h (p, b, c), pio, p) c 45 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), c 46 arch/powerpc/include/asm/io-defs.h (p, b, c), pio, p) c 47 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), c 48 arch/powerpc/include/asm/io-defs.h (p, b, c), pio, p) c 49 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), c 50 arch/powerpc/include/asm/io-defs.h (p, b, c), pio, p) c 51 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), c 52 arch/powerpc/include/asm/io-defs.h (p, b, c), pio, p) c 53 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), c 54 arch/powerpc/include/asm/io-defs.h (p, b, c), pio, p) c 56 arch/powerpc/include/asm/io-defs.h DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n), c 57 arch/powerpc/include/asm/io-defs.h (a, c, n), mem, a) c 221 arch/powerpc/include/asm/io.h extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n); c 549 arch/powerpc/include/asm/io.h #define __do_memset_io(addr, c, n) \ c 550 arch/powerpc/include/asm/io.h _memset_io(PCI_FIX_ADDR(addr), c, n) c 157 arch/powerpc/include/asm/mmu_context.h int c; c 180 arch/powerpc/include/asm/mmu_context.h c = atomic_dec_if_positive(&mm->context.copros); c 182 arch/powerpc/include/asm/mmu_context.h WARN_ON(c < 0); c 184 arch/powerpc/include/asm/mmu_context.h if (c == 0) c 109 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE) c 117 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE) c 137 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE) c 138 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE) c 143 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE) c 161 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE) c 169 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE) c 175 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_RXCHAN_INCR(c) (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE) c 176 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_RXCHAN_BASEL(c) (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE) c 181 arch/powerpc/include/asm/pasemi_dma.h #define PAS_DMA_RXCHAN_BASEU(c) (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE) c 383 arch/powerpc/include/asm/ppc-opcode.h #define ___PPC_RC(c) (((c) & 0x1f) << 6) c 454 arch/powerpc/include/asm/ppc-opcode.h #define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHD | \ c 456 arch/powerpc/include/asm/ppc-opcode.h ___PPC_RB(b) | ___PPC_RC(c)) c 457 arch/powerpc/include/asm/ppc-opcode.h #define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHDU | \ c 459 arch/powerpc/include/asm/ppc-opcode.h ___PPC_RB(b) | ___PPC_RC(c)) c 460 arch/powerpc/include/asm/ppc-opcode.h #define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDLD | \ c 462 arch/powerpc/include/asm/ppc-opcode.h ___PPC_RB(b) | ___PPC_RC(c)) c 524 arch/powerpc/include/asm/ppc-opcode.h #define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ c 525 arch/powerpc/include/asm/ppc-opcode.h __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) c 33 arch/powerpc/include/asm/string.h void *__memset(void *s, int c, __kernel_size_t count); c 44 arch/powerpc/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) c 13 arch/powerpc/include/asm/udbg.h extern void (*udbg_putc)(char c); c 21 arch/powerpc/include/asm/word-at-a-time.h static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) c 23 arch/powerpc/include/asm/word-at-a-time.h unsigned long mask = (val & c->low_bits) + c->low_bits; c 37 arch/powerpc/include/asm/word-at-a-time.h static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) c 39 arch/powerpc/include/asm/word-at-a-time.h unsigned long rhs = val | c->low_bits; c 41 arch/powerpc/include/asm/word-at-a-time.h return (val + c->high_bits) & ~rhs; c 60 arch/powerpc/include/asm/word-at-a-time.h static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) c 71 arch/powerpc/include/asm/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) c 139 arch/powerpc/include/asm/word-at-a-time.h static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) c 141 arch/powerpc/include/asm/word-at-a-time.h unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; c 146 arch/powerpc/include/asm/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) c 473 arch/powerpc/kernel/btext.c static noinline void draw_byte(unsigned char c, long locX, long locY) c 476 arch/powerpc/kernel/btext.c unsigned char *font = &vga_font[((unsigned int)c) * 16]; c 496 arch/powerpc/kernel/btext.c void btext_drawchar(char c) c 505 arch/powerpc/kernel/btext.c switch (c) { c 522 arch/powerpc/kernel/btext.c draw_byte(c, g_loc_X++, g_loc_Y); c 546 arch/powerpc/kernel/btext.c void btext_drawstring(const char *c) c 550 arch/powerpc/kernel/btext.c while (*c) c 551 arch/powerpc/kernel/btext.c btext_drawchar(*c++); c 554 arch/powerpc/kernel/btext.c void btext_drawtext(const char *c, unsigned int len) c 559 arch/powerpc/kernel/btext.c btext_drawchar(*c++); c 123 arch/powerpc/kernel/io.c _memset_io(volatile void __iomem *addr, int c, unsigned long n) c 126 arch/powerpc/kernel/io.c u32 lc = c; c 132 arch/powerpc/kernel/io.c *((volatile u8 *)p) = c; c 142 arch/powerpc/kernel/io.c *((volatile u8 *)p) = c; c 76 arch/powerpc/kernel/kprobes.c const char *c; c 80 arch/powerpc/kernel/kprobes.c if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { c 81 arch/powerpc/kernel/kprobes.c c++; c 82 arch/powerpc/kernel/kprobes.c len = c - name; c 85 arch/powerpc/kernel/kprobes.c c = name; c 87 arch/powerpc/kernel/kprobes.c if (*c != '\0' && *c != '.') { c 91 arch/powerpc/kernel/kprobes.c ret = strscpy(dot_name + len, c, KSYM_NAME_LEN); c 694 arch/powerpc/kernel/prom_init.c #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ c 695 arch/powerpc/kernel/prom_init.c || ('a' <= (c) && (c) <= 'f') \ c 696 arch/powerpc/kernel/prom_init.c || ('A' <= (c) && (c) <= 'F')) c 698 arch/powerpc/kernel/prom_init.c #define isdigit(c) ('0' <= (c) && (c) <= '9') c 699 arch/powerpc/kernel/prom_init.c #define islower(c) ('a' <= (c) && (c) <= 'z') c 700 arch/powerpc/kernel/prom_init.c #define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) c 228 arch/powerpc/kernel/rtas-proc.c static void check_location_string(struct seq_file *m, const char *c); c 229 arch/powerpc/kernel/rtas-proc.c static void check_location(struct seq_file *m, const char *c); c 643 arch/powerpc/kernel/rtas-proc.c static void check_location(struct seq_file *m, const char *c) c 645 arch/powerpc/kernel/rtas-proc.c switch (c[0]) { c 647 arch/powerpc/kernel/rtas-proc.c seq_printf(m, "Planar #%c", c[1]); c 650 arch/powerpc/kernel/rtas-proc.c seq_printf(m, "CPU #%c", c[1]); c 653 arch/powerpc/kernel/rtas-proc.c seq_printf(m, "Fan #%c", c[1]); c 656 arch/powerpc/kernel/rtas-proc.c seq_printf(m, "Rack #%c", c[1]); c 659 arch/powerpc/kernel/rtas-proc.c seq_printf(m, "Voltage #%c", c[1]); c 662 arch/powerpc/kernel/rtas-proc.c seq_printf(m, "LCD #%c", c[1]); c 665 arch/powerpc/kernel/rtas-proc.c seq_printf(m, "- %c", c[1]); c 680 arch/powerpc/kernel/rtas-proc.c static void check_location_string(struct seq_file *m, const char *c) c 682 arch/powerpc/kernel/rtas-proc.c while (*c) { c 683 arch/powerpc/kernel/rtas-proc.c if (isalpha(*c) || *c == '.') c 684 arch/powerpc/kernel/rtas-proc.c check_location(m, c); c 685 arch/powerpc/kernel/rtas-proc.c else if (*c == '/' || *c == '-') c 687 arch/powerpc/kernel/rtas-proc.c c++; c 95 arch/powerpc/kernel/rtas.c static void call_rtas_display_status(unsigned char c) c 103 arch/powerpc/kernel/rtas.c rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c); c 107 arch/powerpc/kernel/rtas.c static void call_rtas_display_status_delay(char c) c 112 arch/powerpc/kernel/rtas.c if (c == '\n') { c 125 arch/powerpc/kernel/rtas.c call_rtas_display_status(c); c 145 arch/powerpc/kernel/rtas.c static void udbg_rtascon_putc(char c) c 153 arch/powerpc/kernel/rtas.c if (c == '\n') c 158 arch/powerpc/kernel/rtas.c if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0) c 166 arch/powerpc/kernel/rtas.c int c; c 171 arch/powerpc/kernel/rtas.c if (rtas_call(rtas_getchar_token, 0, 2, &c)) c 174 arch/powerpc/kernel/rtas.c return c; c 179 arch/powerpc/kernel/rtas.c int c; c 181 arch/powerpc/kernel/rtas.c while ((c = udbg_rtascon_getc_poll()) == -1) c 184 arch/powerpc/kernel/rtas.c return c; c 446 arch/powerpc/kernel/smp.c int c; c 448 arch/powerpc/kernel/smp.c for_each_online_cpu(c) { c 449 arch/powerpc/kernel/smp.c if (c == raw_smp_processor_id()) c 451 arch/powerpc/kernel/smp.c do_message_pass(c, PPC_MSG_NMI_IPI); c 991 arch/powerpc/kernel/smp.c int rc, c; c 1041 arch/powerpc/kernel/smp.c for (c = 50000; c && !cpu_callin_map[cpu]; c--) c 1049 arch/powerpc/kernel/smp.c for (c = 5000; c && !cpu_callin_map[cpu]; c--) c 738 arch/powerpc/kernel/sysfs.c struct cpu *c = &per_cpu(cpu_devices, cpu); c 739 arch/powerpc/kernel/sysfs.c struct device *s = &c->dev; c 829 arch/powerpc/kernel/sysfs.c struct cpu *c = &per_cpu(cpu_devices, cpu); c 830 arch/powerpc/kernel/sysfs.c struct device *s = &c->dev; c 834 arch/powerpc/kernel/sysfs.c BUG_ON(!c->hotpluggable); c 1054 arch/powerpc/kernel/sysfs.c struct cpu *c = &per_cpu(cpu_devices, cpu); c 1064 arch/powerpc/kernel/sysfs.c c->hotpluggable = 1; c 1066 arch/powerpc/kernel/sysfs.c if (cpu_online(cpu) || c->hotpluggable) { c 1067 arch/powerpc/kernel/sysfs.c register_cpu(c, cpu); c 1069 arch/powerpc/kernel/sysfs.c device_create_file(&c->dev, &dev_attr_physical_id); c 1154 arch/powerpc/kernel/time.c unsigned long a, b, c, d; c 1160 arch/powerpc/kernel/time.c c = dividend_low >> 32; c 1166 arch/powerpc/kernel/time.c rb = ((u64) do_div(ra, divisor) << 32) + c; c 16 arch/powerpc/kernel/udbg.c void (*udbg_putc)(char c); c 83 arch/powerpc/kernel/udbg.c char c; c 86 arch/powerpc/kernel/udbg.c while ((c = *s++) != '\0') c 87 arch/powerpc/kernel/udbg.c udbg_putc(c); c 103 arch/powerpc/kernel/udbg.c char c; c 109 arch/powerpc/kernel/udbg.c while (((c = *s++) != '\0') && (remain-- > 0)) { c 110 arch/powerpc/kernel/udbg.c udbg_putc(c); c 55 arch/powerpc/kernel/udbg_16550.c static void udbg_uart_putc(char c) c 60 arch/powerpc/kernel/udbg_16550.c if (c == '\n') c 63 arch/powerpc/kernel/udbg_16550.c udbg_uart_out(UART_THR, c); c 259 arch/powerpc/kernel/vdso.c char name[MAX_SYMNAME], *c; c 266 arch/powerpc/kernel/vdso.c c = strchr(name, '@'); c 267 arch/powerpc/kernel/vdso.c if (c) c 268 arch/powerpc/kernel/vdso.c *c = 0; c 365 arch/powerpc/kernel/vdso.c char name[MAX_SYMNAME], *c; c 372 arch/powerpc/kernel/vdso.c c = strchr(name, '@'); c 373 arch/powerpc/kernel/vdso.c if (c) c 374 arch/powerpc/kernel/vdso.c *c = 0; c 17 arch/powerpc/kernel/vecemu.c extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); c 18 arch/powerpc/kernel/vecemu.c extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); c 150 arch/powerpc/kernel/watchdog.c int c; c 172 arch/powerpc/kernel/watchdog.c for_each_cpu(c, &wd_smp_cpus_pending) { c 173 arch/powerpc/kernel/watchdog.c if (c == cpu) c 175 arch/powerpc/kernel/watchdog.c smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000); c 357 arch/powerpc/kvm/book3s_32_mmu_host.c #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) c 769 arch/powerpc/kvm/book3s_hv_rm_xics.c static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again) c 782 arch/powerpc/kvm/book3s_hv_rm_xics.c rc = pnv_opal_pci_msi_eoi(c, hwirq); c 2197 arch/powerpc/kvm/powerpc.c struct h_cpu_char_result c; c 2203 arch/powerpc/kvm/powerpc.c rc = plpar_get_cpu_characteristics(&c); c 2205 arch/powerpc/kvm/powerpc.c cp->character = c.character; c 2206 arch/powerpc/kvm/powerpc.c cp->behaviour = c.behaviour; c 166 arch/powerpc/kvm/timing.c char c; c 172 arch/powerpc/kvm/timing.c if (get_user(c, user_buf)) { c 177 arch/powerpc/kvm/timing.c if (c == 'c') { c 283 arch/powerpc/lib/sstep.c int c; c 285 arch/powerpc/lib/sstep.c for (; nb > 0; nb -= c) { c 286 arch/powerpc/lib/sstep.c c = max_align(ea); c 287 arch/powerpc/lib/sstep.c if (c > nb) c 288 arch/powerpc/lib/sstep.c c = max_align(nb); c 289 arch/powerpc/lib/sstep.c switch (c) { c 312 arch/powerpc/lib/sstep.c dest += c; c 313 arch/powerpc/lib/sstep.c ea += c; c 388 arch/powerpc/lib/sstep.c int c; c 390 arch/powerpc/lib/sstep.c for (; nb > 0; nb -= c) { c 391 arch/powerpc/lib/sstep.c c = max_align(ea); c 392 arch/powerpc/lib/sstep.c if (c > nb) c 393 arch/powerpc/lib/sstep.c c = max_align(nb); c 394 arch/powerpc/lib/sstep.c switch (c) { c 417 arch/powerpc/lib/sstep.c dest += c; c 418 arch/powerpc/lib/sstep.c ea += c; c 229 arch/powerpc/lib/test_emulate_step.c } c; c 238 arch/powerpc/lib/test_emulate_step.c c.a = 123.45; c 239 arch/powerpc/lib/test_emulate_step.c cached_b = c.b; c 241 arch/powerpc/lib/test_emulate_step.c regs.gpr[3] = (unsigned long) &c.a; c 255 arch/powerpc/lib/test_emulate_step.c c.a = 678.91; c 260 arch/powerpc/lib/test_emulate_step.c if (stepped == 1 && c.b == cached_b) c 272 arch/powerpc/lib/test_emulate_step.c } c; c 281 arch/powerpc/lib/test_emulate_step.c c.a = 123456.78; c 282 arch/powerpc/lib/test_emulate_step.c cached_b = c.b; c 284 arch/powerpc/lib/test_emulate_step.c regs.gpr[3] = (unsigned long) &c.a; c 298 arch/powerpc/lib/test_emulate_step.c c.a = 987654.32; c 303 arch/powerpc/lib/test_emulate_step.c if (stepped == 1 && c.b == cached_b) c 329 arch/powerpc/lib/test_emulate_step.c } c; c 338 arch/powerpc/lib/test_emulate_step.c cached_b[0] = c.b[0] = 923745; c 339 arch/powerpc/lib/test_emulate_step.c cached_b[1] = c.b[1] = 2139478; c 340 arch/powerpc/lib/test_emulate_step.c cached_b[2] = c.b[2] = 9012; c 341 arch/powerpc/lib/test_emulate_step.c cached_b[3] = c.b[3] = 982134; c 343 arch/powerpc/lib/test_emulate_step.c regs.gpr[3] = (unsigned long) &c.a; c 357 arch/powerpc/lib/test_emulate_step.c c.b[0] = 4987513; c 358 arch/powerpc/lib/test_emulate_step.c c.b[1] = 84313948; c 359 arch/powerpc/lib/test_emulate_step.c c.b[2] = 71; c 360 arch/powerpc/lib/test_emulate_step.c c.b[3] = 498532; c 365 arch/powerpc/lib/test_emulate_step.c if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && c 366 arch/powerpc/lib/test_emulate_step.c cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) c 386 arch/powerpc/lib/test_emulate_step.c } c; c 395 arch/powerpc/lib/test_emulate_step.c cached_b[0] = c.b[0] = 18233; c 396 arch/powerpc/lib/test_emulate_step.c cached_b[1] = c.b[1] = 34863571; c 397 arch/powerpc/lib/test_emulate_step.c cached_b[2] = c.b[2] = 834; c 398 arch/powerpc/lib/test_emulate_step.c cached_b[3] = c.b[3] = 6138911; c 400 arch/powerpc/lib/test_emulate_step.c regs.gpr[3] = (unsigned long) &c.a; c 418 arch/powerpc/lib/test_emulate_step.c c.b[0] = 21379463; c 419 arch/powerpc/lib/test_emulate_step.c c.b[1] = 87; c 420 arch/powerpc/lib/test_emulate_step.c c.b[2] = 374234; c 421 arch/powerpc/lib/test_emulate_step.c c.b[3] = 4; c 426 arch/powerpc/lib/test_emulate_step.c if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && c 427 arch/powerpc/lib/test_emulate_step.c cached_b[2] == c.b[2] && cached_b[3] == c.b[3] && c 108 arch/powerpc/mm/ptdump/ptdump.c #define pt_dump_seq_putc(m, c) \ c 111 arch/powerpc/mm/ptdump/ptdump.c seq_putc(m, c); \ c 36 arch/powerpc/perf/hv-24x7.c #define DOMAIN(n, v, x, c) \ c 50 arch/powerpc/perf/hv-24x7.c #define DOMAIN(n, v, x, c) \ c 52 arch/powerpc/perf/hv-24x7.c return c; c 30 arch/powerpc/perf/req-gen/perf.h #define CAT3_(a, b, c) a ## b ## c c 31 arch/powerpc/perf/req-gen/perf.h #define CAT3(a, b, c) CAT3_(a, b, c) c 129 arch/powerpc/platforms/85xx/mpc85xx_cds.c u_char c; c 137 arch/powerpc/platforms/85xx/mpc85xx_cds.c pci_read_config_byte(dev, 0x40, &c); c 138 arch/powerpc/platforms/85xx/mpc85xx_cds.c c |= 0x03; /* IDE: Chip Enable Bits */ c 139 arch/powerpc/platforms/85xx/mpc85xx_cds.c pci_write_config_byte(dev, 0x40, c); c 1063 arch/powerpc/platforms/cell/spufs/sched.c int a, b, c; c 1067 arch/powerpc/platforms/cell/spufs/sched.c c = spu_avenrun[2] + (FIXED_1/200); c 1077 arch/powerpc/platforms/cell/spufs/sched.c LOAD_INT(c), LOAD_FRAC(c), c 81 arch/powerpc/platforms/embedded6xx/ls_uart.c void avr_uart_send(const char c) c 86 arch/powerpc/platforms/embedded6xx/ls_uart.c out_8(avr_addr + UART_TX, c); c 87 arch/powerpc/platforms/embedded6xx/ls_uart.c out_8(avr_addr + UART_TX, c); c 88 arch/powerpc/platforms/embedded6xx/ls_uart.c out_8(avr_addr + UART_TX, c); c 89 arch/powerpc/platforms/embedded6xx/ls_uart.c out_8(avr_addr + UART_TX, c); c 176 arch/powerpc/platforms/fsl_uli1575.c unsigned char c; c 183 arch/powerpc/platforms/fsl_uli1575.c pci_read_config_byte(dev, 0x83, &c); c 184 arch/powerpc/platforms/fsl_uli1575.c pci_write_config_byte(dev, 0x83, c|0x80); c 191 arch/powerpc/platforms/fsl_uli1575.c pci_write_config_byte(dev, 0x83, c); c 194 arch/powerpc/platforms/fsl_uli1575.c pci_read_config_byte(dev, 0x84, &c); c 195 arch/powerpc/platforms/fsl_uli1575.c pci_write_config_byte(dev, 0x84, c & ~0x01); c 270 arch/powerpc/platforms/fsl_uli1575.c unsigned char c; c 275 arch/powerpc/platforms/fsl_uli1575.c pci_read_config_byte(dev, 0x83, &c); c 276 arch/powerpc/platforms/fsl_uli1575.c c |= 0x80; c 277 arch/powerpc/platforms/fsl_uli1575.c pci_write_config_byte(dev, 0x83, c); c 282 arch/powerpc/platforms/fsl_uli1575.c pci_read_config_byte(dev, 0x83, &c); c 283 arch/powerpc/platforms/fsl_uli1575.c c &= 0x7f; c 284 arch/powerpc/platforms/fsl_uli1575.c pci_write_config_byte(dev, 0x83, c); c 296 arch/powerpc/platforms/fsl_uli1575.c unsigned char c; c 301 arch/powerpc/platforms/fsl_uli1575.c pci_read_config_byte(dev, 0x4b, &c); c 302 arch/powerpc/platforms/fsl_uli1575.c c |= 0x10; c 303 arch/powerpc/platforms/fsl_uli1575.c pci_write_config_byte(dev, 0x4b, c); c 22 arch/powerpc/platforms/powermac/pfunc_core.c #define LOG_BLOB(t,b,c) c 30 arch/powerpc/platforms/powermac/udbg_adb.c static void (*udbg_adb_old_putc)(char c); c 143 arch/powerpc/platforms/powermac/udbg_adb.c static void udbg_adb_putc(char c) c 147 arch/powerpc/platforms/powermac/udbg_adb.c btext_drawchar(c); c 150 arch/powerpc/platforms/powermac/udbg_adb.c return udbg_adb_old_putc(c); c 23 arch/powerpc/platforms/powermac/udbg_scc.c static void udbg_scc_putc(char c) c 28 arch/powerpc/platforms/powermac/udbg_scc.c out_8(sccd, c); c 29 arch/powerpc/platforms/powermac/udbg_scc.c if (c == '\n') c 162 arch/powerpc/platforms/powermac/udbg_scc.c static void udbg_real_scc_putc(char c) c 166 arch/powerpc/platforms/powermac/udbg_scc.c real_writeb(c, sccd); c 167 arch/powerpc/platforms/powermac/udbg_scc.c if (c == '\n') c 116 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_insb(unsigned long p, void *b, unsigned long c) c 120 arch/powerpc/platforms/powernv/opal-lpc.c while(c--) c 124 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_insw(unsigned long p, void *b, unsigned long c) c 128 arch/powerpc/platforms/powernv/opal-lpc.c while(c--) c 132 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_insl(unsigned long p, void *b, unsigned long c) c 136 arch/powerpc/platforms/powernv/opal-lpc.c while(c--) c 140 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_outsb(unsigned long p, const void *b, unsigned long c) c 144 arch/powerpc/platforms/powernv/opal-lpc.c while(c--) c 148 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_outsw(unsigned long p, const void *b, unsigned long c) c 152 arch/powerpc/platforms/powernv/opal-lpc.c while(c--) c 156 arch/powerpc/platforms/powernv/opal-lpc.c static void opal_lpc_outsl(unsigned long p, const void *b, unsigned long c) c 160 arch/powerpc/platforms/powernv/opal-lpc.c while(c--) c 374 arch/powerpc/platforms/powernv/smp.c int c; c 384 arch/powerpc/platforms/powernv/smp.c for_each_online_cpu(c) { c 385 arch/powerpc/platforms/powernv/smp.c if (c == smp_processor_id()) c 389 arch/powerpc/platforms/powernv/smp.c get_hard_smp_processor_id(c)); c 379 arch/powerpc/platforms/ps3/mm.c static void _dma_dump_chunk (const struct dma_chunk* c, const char* func, c 383 arch/powerpc/platforms/ps3/mm.c c->region->dev->bus_id, c->region->dev->dev_id); c 384 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr); c 385 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size); c 386 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: r.len %lxh\n", func, line, c->region->len); c 387 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset); c 388 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr); c 389 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr); c 390 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: c.len %lxh\n", func, line, c->len); c 396 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 401 arch/powerpc/platforms/ps3/mm.c list_for_each_entry(c, &r->chunk_list.head, link) { c 403 arch/powerpc/platforms/ps3/mm.c if (aligned_bus >= c->bus_addr && c 404 arch/powerpc/platforms/ps3/mm.c aligned_bus + aligned_len <= c->bus_addr + c->len) c 405 arch/powerpc/platforms/ps3/mm.c return c; c 408 arch/powerpc/platforms/ps3/mm.c if (aligned_bus + aligned_len <= c->bus_addr) c 412 arch/powerpc/platforms/ps3/mm.c if (aligned_bus >= c->bus_addr + c->len) c 416 arch/powerpc/platforms/ps3/mm.c dma_dump_chunk(c); c 425 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 430 arch/powerpc/platforms/ps3/mm.c list_for_each_entry(c, &r->chunk_list.head, link) { c 432 arch/powerpc/platforms/ps3/mm.c if (c->lpar_addr <= aligned_lpar && c 433 arch/powerpc/platforms/ps3/mm.c aligned_lpar < c->lpar_addr + c->len) { c 434 arch/powerpc/platforms/ps3/mm.c if (aligned_lpar + aligned_len <= c->lpar_addr + c->len) c 435 arch/powerpc/platforms/ps3/mm.c return c; c 437 arch/powerpc/platforms/ps3/mm.c dma_dump_chunk(c); c 442 arch/powerpc/platforms/ps3/mm.c if (aligned_lpar + aligned_len <= c->lpar_addr) { c 446 arch/powerpc/platforms/ps3/mm.c if (c->lpar_addr + c->len <= aligned_lpar) { c 453 arch/powerpc/platforms/ps3/mm.c static int dma_sb_free_chunk(struct dma_chunk *c) c 457 arch/powerpc/platforms/ps3/mm.c if (c->bus_addr) { c 458 arch/powerpc/platforms/ps3/mm.c result = lv1_unmap_device_dma_region(c->region->dev->bus_id, c 459 arch/powerpc/platforms/ps3/mm.c c->region->dev->dev_id, c->bus_addr, c->len); c 463 arch/powerpc/platforms/ps3/mm.c kfree(c); c 467 arch/powerpc/platforms/ps3/mm.c static int dma_ioc0_free_chunk(struct dma_chunk *c) c 472 arch/powerpc/platforms/ps3/mm.c struct ps3_dma_region *r = c->region; c 475 arch/powerpc/platforms/ps3/mm.c for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) { c 479 arch/powerpc/platforms/ps3/mm.c c->bus_addr + offset, c 480 arch/powerpc/platforms/ps3/mm.c c->lpar_addr + offset, c 484 arch/powerpc/platforms/ps3/mm.c c->bus_addr + offset, c 485 arch/powerpc/platforms/ps3/mm.c c->lpar_addr + offset, c 493 arch/powerpc/platforms/ps3/mm.c kfree(c); c 513 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 515 arch/powerpc/platforms/ps3/mm.c c = kzalloc(sizeof(*c), GFP_ATOMIC); c 516 arch/powerpc/platforms/ps3/mm.c if (!c) { c 521 arch/powerpc/platforms/ps3/mm.c c->region = r; c 522 arch/powerpc/platforms/ps3/mm.c c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); c 523 arch/powerpc/platforms/ps3/mm.c c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr); c 524 arch/powerpc/platforms/ps3/mm.c c->len = len; c 527 arch/powerpc/platforms/ps3/mm.c result = lv1_map_device_dma_region(c->region->dev->bus_id, c 528 arch/powerpc/platforms/ps3/mm.c c->region->dev->dev_id, c->lpar_addr, c 529 arch/powerpc/platforms/ps3/mm.c c->bus_addr, c->len, iopte_flag); c 536 arch/powerpc/platforms/ps3/mm.c list_add(&c->link, &r->chunk_list.head); c 538 arch/powerpc/platforms/ps3/mm.c *c_out = c; c 542 arch/powerpc/platforms/ps3/mm.c kfree(c); c 554 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c, *last; c 560 arch/powerpc/platforms/ps3/mm.c c = kzalloc(sizeof(*c), GFP_ATOMIC); c 561 arch/powerpc/platforms/ps3/mm.c if (!c) { c 566 arch/powerpc/platforms/ps3/mm.c c->region = r; c 567 arch/powerpc/platforms/ps3/mm.c c->len = len; c 568 arch/powerpc/platforms/ps3/mm.c c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr); c 572 arch/powerpc/platforms/ps3/mm.c c->bus_addr = r->bus_addr; c 577 arch/powerpc/platforms/ps3/mm.c c->bus_addr = last->bus_addr + last->len; c 591 arch/powerpc/platforms/ps3/mm.c c->bus_addr + offset, c 592 arch/powerpc/platforms/ps3/mm.c c->lpar_addr + offset, c 601 arch/powerpc/platforms/ps3/mm.c iopage, c->bus_addr + offset, c->lpar_addr + offset, c 606 arch/powerpc/platforms/ps3/mm.c list_add(&c->link, &r->chunk_list.head); c 608 arch/powerpc/platforms/ps3/mm.c *c_out = c; c 615 arch/powerpc/platforms/ps3/mm.c c->bus_addr + offset, c 616 arch/powerpc/platforms/ps3/mm.c c->lpar_addr + offset, c 620 arch/powerpc/platforms/ps3/mm.c kfree(c); c 707 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 718 arch/powerpc/platforms/ps3/mm.c list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { c 719 arch/powerpc/platforms/ps3/mm.c list_del(&c->link); c 720 arch/powerpc/platforms/ps3/mm.c dma_sb_free_chunk(c); c 738 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c, *n; c 741 arch/powerpc/platforms/ps3/mm.c list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { c 742 arch/powerpc/platforms/ps3/mm.c list_del(&c->link); c 743 arch/powerpc/platforms/ps3/mm.c dma_ioc0_free_chunk(c); c 775 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 798 arch/powerpc/platforms/ps3/mm.c c = dma_find_chunk(r, *bus_addr, len); c 800 arch/powerpc/platforms/ps3/mm.c if (c) { c 802 arch/powerpc/platforms/ps3/mm.c dma_dump_chunk(c); c 803 arch/powerpc/platforms/ps3/mm.c c->usage_count++; c 808 arch/powerpc/platforms/ps3/mm.c result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag); c 818 arch/powerpc/platforms/ps3/mm.c c->usage_count = 1; c 830 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 843 arch/powerpc/platforms/ps3/mm.c c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len); c 845 arch/powerpc/platforms/ps3/mm.c if (c) { c 848 arch/powerpc/platforms/ps3/mm.c *bus_addr = c->bus_addr + phys_addr - aligned_phys; c 849 arch/powerpc/platforms/ps3/mm.c c->usage_count++; c 854 arch/powerpc/platforms/ps3/mm.c result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c, c 864 arch/powerpc/platforms/ps3/mm.c *bus_addr = c->bus_addr + phys_addr - aligned_phys; c 867 arch/powerpc/platforms/ps3/mm.c c->usage_count = 1; c 886 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 889 arch/powerpc/platforms/ps3/mm.c c = dma_find_chunk(r, bus_addr, len); c 891 arch/powerpc/platforms/ps3/mm.c if (!c) { c 907 arch/powerpc/platforms/ps3/mm.c c->usage_count--; c 909 arch/powerpc/platforms/ps3/mm.c if (!c->usage_count) { c 910 arch/powerpc/platforms/ps3/mm.c list_del(&c->link); c 911 arch/powerpc/platforms/ps3/mm.c dma_sb_free_chunk(c); c 922 arch/powerpc/platforms/ps3/mm.c struct dma_chunk *c; c 926 arch/powerpc/platforms/ps3/mm.c c = dma_find_chunk(r, bus_addr, len); c 928 arch/powerpc/platforms/ps3/mm.c if (!c) { c 945 arch/powerpc/platforms/ps3/mm.c c->usage_count--; c 947 arch/powerpc/platforms/ps3/mm.c if (!c->usage_count) { c 948 arch/powerpc/platforms/ps3/mm.c list_del(&c->link); c 949 arch/powerpc/platforms/ps3/mm.c dma_ioc0_free_chunk(c); c 56 arch/powerpc/sysdev/cpm_common.c static void udbg_putc_cpm(char c) c 58 arch/powerpc/sysdev/cpm_common.c if (c == '\n') c 64 arch/powerpc/sysdev/cpm_common.c out_8(cpm_udbg_txbuf, c); c 44 arch/powerpc/sysdev/udbg_memcons.c void memcons_putc(char c) c 48 arch/powerpc/sysdev/udbg_memcons.c *memcons.output_pos = c; c 59 arch/powerpc/sysdev/udbg_memcons.c char c; c 63 arch/powerpc/sysdev/udbg_memcons.c c = *memcons.input_pos; c 74 arch/powerpc/sysdev/udbg_memcons.c return c; c 82 arch/powerpc/sysdev/udbg_memcons.c int c; c 85 arch/powerpc/sysdev/udbg_memcons.c c = memcons_getc_poll(); c 86 arch/powerpc/sysdev/udbg_memcons.c if (c == -1) c 92 arch/powerpc/sysdev/udbg_memcons.c return c; c 83 arch/powerpc/xmon/nonstdio.c int xmon_putchar(int c) c 85 arch/powerpc/xmon/nonstdio.c char ch = c; c 87 arch/powerpc/xmon/nonstdio.c if (c == '\n') c 89 arch/powerpc/xmon/nonstdio.c return xmon_write(&ch, 1) == 1? c: -1; c 98 arch/powerpc/xmon/nonstdio.c int c; c 103 arch/powerpc/xmon/nonstdio.c c = xmon_readchar(); c 104 arch/powerpc/xmon/nonstdio.c if (c == -1 || c == 4) c 106 arch/powerpc/xmon/nonstdio.c if (c == '\r' || c == '\n') { c 111 arch/powerpc/xmon/nonstdio.c switch (c) { c 133 arch/powerpc/xmon/nonstdio.c xmon_putchar(c); c 134 arch/powerpc/xmon/nonstdio.c *lineptr++ = c; c 150 arch/powerpc/xmon/nonstdio.c int c; c 153 arch/powerpc/xmon/nonstdio.c c = xmon_getchar(); c 154 arch/powerpc/xmon/nonstdio.c if (c == -1) { c 159 arch/powerpc/xmon/nonstdio.c *p++ = c; c 160 arch/powerpc/xmon/nonstdio.c if (c == '\n') c 7 arch/powerpc/xmon/nonstdio.h extern int xmon_putchar(int c); c 118 arch/powerpc/xmon/spu-insns.h #define _A3(a,b,c) {3,a,b,c} c 119 arch/powerpc/xmon/spu-insns.h #define _A4(a,b,c,d) {4,a,b,c,d} c 1304 arch/powerpc/xmon/xmon.c #define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) c 2364 arch/powerpc/xmon/xmon.c int c; c 2366 arch/powerpc/xmon/xmon.c c = inchar(); c 2367 arch/powerpc/xmon/xmon.c switch( c ){ c 2368 arch/powerpc/xmon/xmon.c case 'n': c = '\n'; break; c 2369 arch/powerpc/xmon/xmon.c case 'r': c = '\r'; break; c 2370 arch/powerpc/xmon/xmon.c case 'b': c = '\b'; break; c 2371 arch/powerpc/xmon/xmon.c case 't': c = '\t'; break; c 2373 arch/powerpc/xmon/xmon.c return c; c 2400 arch/powerpc/xmon/xmon.c int c; c 2402 arch/powerpc/xmon/xmon.c c = inchar(); c 2403 arch/powerpc/xmon/xmon.c if (c == 'c') c 2564 arch/powerpc/xmon/xmon.c int c; c 2566 arch/powerpc/xmon/xmon.c c = inchar(); c 2567 arch/powerpc/xmon/xmon.c if (c == 'a') { c 2572 arch/powerpc/xmon/xmon.c termch = c; /* Put c back, it wasn't 'a' */ c 2650 arch/powerpc/xmon/xmon.c int c; c 2657 arch/powerpc/xmon/xmon.c c = inchar(); c 2658 arch/powerpc/xmon/xmon.c if (c == 'a') { c 2661 arch/powerpc/xmon/xmon.c } else if (c == 'i') { c 2669 arch/powerpc/xmon/xmon.c termch = c; /* Put c back, it wasn't 'a' */ c 2714 arch/powerpc/xmon/xmon.c int c; c 2716 arch/powerpc/xmon/xmon.c c = inchar(); c 2719 arch/powerpc/xmon/xmon.c if (c == 'p') { c 2727 arch/powerpc/xmon/xmon.c if (c == 'x') { c 2735 arch/powerpc/xmon/xmon.c if (c == 't') { c 2740 arch/powerpc/xmon/xmon.c if (c == '\n') c 2741 arch/powerpc/xmon/xmon.c termch = c; c 2746 arch/powerpc/xmon/xmon.c if (c == 'i') { c 2754 arch/powerpc/xmon/xmon.c } else if (c == 'l') { c 2756 arch/powerpc/xmon/xmon.c } else if (c == 'o') { c 2758 arch/powerpc/xmon/xmon.c } else if (c == 'v') { c 2761 arch/powerpc/xmon/xmon.c } else if (c == 'r') { c 2775 arch/powerpc/xmon/xmon.c switch (c) { c 2781 arch/powerpc/xmon/xmon.c dump_by_size(adrs, ndump, c - '0'); c 2782 arch/powerpc/xmon/xmon.c last[1] = c; c 2797 arch/powerpc/xmon/xmon.c long n, m, c, r, nr; c 2822 arch/powerpc/xmon/xmon.c c = temp[m]; c 2823 arch/powerpc/xmon/xmon.c putchar(' ' <= c && c <= '~'? c: '.'); c 3284 arch/powerpc/xmon/xmon.c int c; c 3287 arch/powerpc/xmon/xmon.c c = termch; c 3290 arch/powerpc/xmon/xmon.c c = inchar(); c 3291 arch/powerpc/xmon/xmon.c while( c == ' ' || c == '\t' ) c 3292 arch/powerpc/xmon/xmon.c c = inchar(); c 3293 arch/powerpc/xmon/xmon.c return c; c 3314 arch/powerpc/xmon/xmon.c int c, d; c 3317 arch/powerpc/xmon/xmon.c c = skipbl(); c 3318 arch/powerpc/xmon/xmon.c if (c == '%') { c 3324 arch/powerpc/xmon/xmon.c c = inchar(); c 3325 arch/powerpc/xmon/xmon.c if (!isalnum(c)) { c 3326 arch/powerpc/xmon/xmon.c termch = c; c 3329 arch/powerpc/xmon/xmon.c regname[i] = c; c 3347 arch/powerpc/xmon/xmon.c if (c == '0') { c 3348 arch/powerpc/xmon/xmon.c c = inchar(); c 3349 arch/powerpc/xmon/xmon.c if (c == 'x') { c 3350 arch/powerpc/xmon/xmon.c c = inchar(); c 3352 arch/powerpc/xmon/xmon.c d = hexdigit(c); c 3354 arch/powerpc/xmon/xmon.c termch = c; c 3359 arch/powerpc/xmon/xmon.c } else if (c == '$') { c 3362 arch/powerpc/xmon/xmon.c c = inchar(); c 3363 arch/powerpc/xmon/xmon.c if (isspace(c) || c == '\0') { c 3364 arch/powerpc/xmon/xmon.c termch = c; c 3367 arch/powerpc/xmon/xmon.c tmpstr[i] = c; c 3385 arch/powerpc/xmon/xmon.c d = hexdigit(c); c 3387 arch/powerpc/xmon/xmon.c termch = c; c 3393 arch/powerpc/xmon/xmon.c c = inchar(); c 3394 arch/powerpc/xmon/xmon.c d = hexdigit(c); c 3396 arch/powerpc/xmon/xmon.c termch = c; c 3404 arch/powerpc/xmon/xmon.c int c; c 3406 arch/powerpc/xmon/xmon.c c = termch; c 3408 arch/powerpc/xmon/xmon.c while( c != '\n' ) c 3409 arch/powerpc/xmon/xmon.c c = inchar(); c 3412 arch/powerpc/xmon/xmon.c static int hexdigit(int c) c 3414 arch/powerpc/xmon/xmon.c if( '0' <= c && c <= '9' ) c 3415 arch/powerpc/xmon/xmon.c return c - '0'; c 3416 arch/powerpc/xmon/xmon.c if( 'A' <= c && c <= 'F' ) c 3417 arch/powerpc/xmon/xmon.c return c - ('A' - 10); c 3418 arch/powerpc/xmon/xmon.c if( 'a' <= c && c <= 'f' ) c 3419 arch/powerpc/xmon/xmon.c return c - ('a' - 10); c 3426 arch/powerpc/xmon/xmon.c int c; c 3428 arch/powerpc/xmon/xmon.c c = skipbl(); c 3431 arch/powerpc/xmon/xmon.c *s++ = c; c 3434 arch/powerpc/xmon/xmon.c c = inchar(); c 3435 arch/powerpc/xmon/xmon.c } while( c != ' ' && c != '\t' && c != '\n' ); c 3436 arch/powerpc/xmon/xmon.c termch = c; c 214 arch/riscv/include/asm/atomic.h : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) c 235 arch/riscv/include/asm/atomic.h : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) c 327 arch/riscv/include/asm/atomic.h : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) c 349 arch/riscv/include/asm/atomic.h : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) c 101 arch/riscv/include/asm/io.h #define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) c 102 arch/riscv/include/asm/io.h #define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) c 103 arch/riscv/include/asm/io.h #define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) c 105 arch/riscv/include/asm/io.h #define writeb_cpu(v,c) ((void)__raw_writeb((v),(c))) c 106 arch/riscv/include/asm/io.h #define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) c 107 arch/riscv/include/asm/io.h #define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) c 110 arch/riscv/include/asm/io.h #define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) c 111 arch/riscv/include/asm/io.h #define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) c 128 arch/riscv/include/asm/io.h #define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; }) c 129 arch/riscv/include/asm/io.h #define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; }) c 130 arch/riscv/include/asm/io.h #define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; }) c 132 arch/riscv/include/asm/io.h #define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); }) c 133 arch/riscv/include/asm/io.h #define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); }) c 134 arch/riscv/include/asm/io.h #define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); }) c 137 arch/riscv/include/asm/io.h #define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; }) c 138 arch/riscv/include/asm/io.h #define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); }) c 152 arch/riscv/include/asm/io.h #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) c 153 arch/riscv/include/asm/io.h #define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) c 154 arch/riscv/include/asm/io.h #define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) c 156 arch/riscv/include/asm/io.h #define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); }) c 157 arch/riscv/include/asm/io.h #define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); }) c 158 arch/riscv/include/asm/io.h #define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); }) c 161 arch/riscv/include/asm/io.h #define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) c 162 arch/riscv/include/asm/io.h #define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); }) c 193 arch/riscv/include/asm/io.h #define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) c 194 arch/riscv/include/asm/io.h #define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) c 195 arch/riscv/include/asm/io.h #define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) c 197 arch/riscv/include/asm/io.h #define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) c 198 arch/riscv/include/asm/io.h #define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) c 199 arch/riscv/include/asm/io.h #define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) c 202 arch/riscv/include/asm/io.h #define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; }) c 203 arch/riscv/include/asm/io.h #define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); }) c 21 arch/riscv/include/asm/word-at-a-time.h unsigned long *bits, const struct word_at_a_time *c) c 23 arch/riscv/include/asm/word-at-a-time.h unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits; c 29 arch/riscv/include/asm/word-at-a-time.h unsigned long bits, const struct word_at_a_time *c) c 22 arch/s390/include/asm/atomic.h int c; c 26 arch/s390/include/asm/atomic.h : "=d" (c) : "Q" (v->counter)); c 27 arch/s390/include/asm/atomic.h return c; c 89 arch/s390/include/asm/atomic.h s64 c; c 93 arch/s390/include/asm/atomic.h : "=d" (c) : "Q" (v->counter)); c 94 arch/s390/include/asm/atomic.h return c; c 49 arch/s390/include/asm/chpid.h #define chp_id_for_each(c) \ c 50 arch/s390/include/asm/chpid.h for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c)) c 56 arch/s390/include/asm/kvm_host.h __u8 c : 1; c 65 arch/s390/include/asm/kvm_host.h __u8 c : 1; c 776 arch/s390/include/asm/kvm_host.h u8 c : 1; c 790 arch/s390/include/asm/kvm_host.h u8 c : 1; c 33 arch/s390/include/asm/stp.h unsigned int c : 1; c 23 arch/s390/include/asm/string.h void *memset(void *s, int c, size_t n); c 49 arch/s390/include/asm/string.h char *strrchr(const char *s, int c); c 63 arch/s390/include/asm/string.h extern void *__memset(void *s, int c, size_t n); c 73 arch/s390/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) c 108 arch/s390/include/asm/string.h static inline void *memchr(const void * s, int c, size_t n) c 110 arch/s390/include/asm/string.h register int r0 asm("0") = (char) c; c 125 arch/s390/include/asm/string.h static inline void *memscan(void *s, int c, size_t n) c 127 arch/s390/include/asm/string.h register int r0 asm("0") = (char) c; c 200 arch/s390/include/asm/string.h void *memchr(const void * s, int c, size_t n); c 201 arch/s390/include/asm/string.h void *memscan(void *s, int c, size_t n); c 125 arch/s390/include/uapi/asm/chsc.h int c; c 16 arch/s390/include/uapi/asm/clp.h unsigned int c : 1; c 38 arch/s390/include/uapi/asm/runtime_instr.h __u32 c : 1; c 1425 arch/s390/kernel/debug.c unsigned char c = in_buf[i]; c 1427 arch/s390/kernel/debug.c if (isascii(c) && isprint(c)) c 1428 arch/s390/kernel/debug.c rc += sprintf(out_buf + rc, "%c", c); c 546 arch/s390/kernel/perf_cpum_cf_events.c struct attribute **c) c 555 arch/s390/kernel/perf_cpum_cf_events.c for (i = 0; c[i]; i++) c 567 arch/s390/kernel/perf_cpum_cf_events.c for (i = 0; c[i]; i++) c 568 arch/s390/kernel/perf_cpum_cf_events.c new[j++] = c[i]; c 48 arch/s390/kernel/processor.c struct cpu_info *c; c 51 arch/s390/kernel/processor.c c = this_cpu_ptr(&cpu_info); c 52 arch/s390/kernel/processor.c c->cpu_mhz_dynamic = mhz >> 32; c 53 arch/s390/kernel/processor.c c->cpu_mhz_static = mhz & 0xffffffff; c 156 arch/s390/kernel/processor.c struct cpu_info *c = per_cpu_ptr(&cpu_info, n); c 158 arch/s390/kernel/processor.c seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic); c 159 arch/s390/kernel/processor.c seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static); c 1147 arch/s390/kernel/smp.c struct cpu *c; c 1150 arch/s390/kernel/smp.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 1151 arch/s390/kernel/smp.c if (!c) c 1153 arch/s390/kernel/smp.c per_cpu(cpu_device, cpu) = c; c 1154 arch/s390/kernel/smp.c s = &c->dev; c 1155 arch/s390/kernel/smp.c c->hotpluggable = 1; c 1156 arch/s390/kernel/smp.c rc = register_cpu(c, cpu); c 1162 arch/s390/kernel/smp.c rc = topology_cpu_init(c); c 1170 arch/s390/kernel/smp.c unregister_cpu(c); c 662 arch/s390/kernel/time.c if (rc || stp_info.c == 0) c 44 arch/s390/kvm/interrupt.c int c, scn; c 56 arch/s390/kvm/interrupt.c c = sigp_ctrl.c; c 63 arch/s390/kvm/interrupt.c c = sigp_ctrl.c; c 71 arch/s390/kvm/interrupt.c return c; c 87 arch/s390/kvm/interrupt.c new_val.c = 1; c 88 arch/s390/kvm/interrupt.c old_val.c = 0; c 99 arch/s390/kvm/interrupt.c new_val.c = 1; c 100 arch/s390/kvm/interrupt.c old_val.c = 0; c 2639 arch/s390/kvm/kvm-s390.c d->sigp_ctrl.c = s->sigp_ctrl.c; c 247 arch/s390/lib/string.c char *strrchr(const char *s, int c) c 253 arch/s390/lib/string.c if (s[len] == (char) c) c 316 arch/s390/lib/string.c void *memchr(const void *s, int c, size_t n) c 318 arch/s390/lib/string.c register int r0 asm("0") = (char) c; c 361 arch/s390/lib/string.c void *memscan(void *s, int c, size_t n) c 363 arch/s390/lib/string.c register int r0 asm("0") = (char) c; c 638 arch/s390/pci/pci_clp.c return req.c ? clp_immediate_command(&req) : clp_normal_command(&req); c 79 arch/sh/boot/compressed/misc.c void* memset(void* s, int c, size_t n) c 84 arch/sh/boot/compressed/misc.c for (i=0;i<n;i++) ss[i] = c; c 40 arch/sh/include/asm/io.h #define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; }) c 41 arch/sh/include/asm/io.h #define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; }) c 42 arch/sh/include/asm/io.h #define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; }) c 43 arch/sh/include/asm/io.h #define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; }) c 45 arch/sh/include/asm/io.h #define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c)) c 46 arch/sh/include/asm/io.h #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c)) c 47 arch/sh/include/asm/io.h #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c)) c 48 arch/sh/include/asm/io.h #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c)) c 56 arch/sh/include/asm/io_noioport.h #define insb(a, b, c) BUG() c 57 arch/sh/include/asm/io_noioport.h #define insw(a, b, c) BUG() c 58 arch/sh/include/asm/io_noioport.h #define insl(a, b, c) BUG() c 60 arch/sh/include/asm/io_noioport.h #define outsb(a, b, c) BUG() c 61 arch/sh/include/asm/io_noioport.h #define outsw(a, b, c) BUG() c 62 arch/sh/include/asm/io_noioport.h #define outsl(a, b, c) BUG() c 130 arch/sh/include/asm/processor.h const char *get_cpu_subtype(struct sh_cpuinfo *c); c 27 arch/sh/include/asm/word-at-a-time.h static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) c 29 arch/sh/include/asm/word-at-a-time.h unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; c 34 arch/sh/include/asm/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) c 35 arch/sh/kernel/cpu/proc.c const char *get_cpu_subtype(struct sh_cpuinfo *c) c 37 arch/sh/kernel/cpu/proc.c return cpu_name[c->type]; c 48 arch/sh/kernel/cpu/proc.c static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c) c 54 arch/sh/kernel/cpu/proc.c if (!c->flags) { c 60 arch/sh/kernel/cpu/proc.c if ((c->flags & (1 << i))) c 82 arch/sh/kernel/cpu/proc.c struct sh_cpuinfo *c = v; c 83 arch/sh/kernel/cpu/proc.c unsigned int cpu = c - cpu_data; c 95 arch/sh/kernel/cpu/proc.c seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c)); c 96 arch/sh/kernel/cpu/proc.c if (c->cut_major == -1) c 98 arch/sh/kernel/cpu/proc.c else if (c->cut_minor == -1) c 99 arch/sh/kernel/cpu/proc.c seq_printf(m, "cut\t\t: %d.x\n", c->cut_major); c 101 arch/sh/kernel/cpu/proc.c seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor); c 103 arch/sh/kernel/cpu/proc.c show_cpuflags(m, c); c 112 arch/sh/kernel/cpu/proc.c if (c->icache.flags & SH_CACHE_COMBINED) { c 114 arch/sh/kernel/cpu/proc.c show_cacheinfo(m, "cache", c->icache); c 117 arch/sh/kernel/cpu/proc.c show_cacheinfo(m, "icache", c->icache); c 118 arch/sh/kernel/cpu/proc.c show_cacheinfo(m, "dcache", c->dcache); c 122 arch/sh/kernel/cpu/proc.c if (c->flags & CPU_HAS_L2_CACHE) c 123 arch/sh/kernel/cpu/proc.c show_cacheinfo(m, "scache", c->scache); c 125 arch/sh/kernel/cpu/proc.c seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits); c 128 arch/sh/kernel/cpu/proc.c c->loops_per_jiffy/(500000/HZ), c 129 arch/sh/kernel/cpu/proc.c (c->loops_per_jiffy/(5000/HZ)) % 100); c 103 arch/sh/kernel/io.c void memset_io(volatile void __iomem *dst, int c, unsigned long count) c 107 arch/sh/kernel/io.c writeb(c, dst); c 49 arch/sh/kernel/smp.c struct sh_cpuinfo *c = cpu_data + cpu; c 51 arch/sh/kernel/smp.c memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); c 53 arch/sh/kernel/smp.c c->loops_per_jiffy = loops_per_jiffy; c 55 arch/sh/kernel/topology.c struct cpu *c = &per_cpu(cpu_devices, i); c 57 arch/sh/kernel/topology.c c->hotpluggable = 1; c 59 arch/sh/kernel/topology.c ret = register_cpu(c, i); c 247 arch/sh/mm/cache.c static void compute_alias(struct cache_info *c) c 250 arch/sh/mm/cache.c c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); c 252 arch/sh/mm/cache.c c->alias_mask = 0; c 254 arch/sh/mm/cache.c c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; c 18 arch/sparc/crypto/opcodes.h #define CRC32C(a,b,c) \ c 19 arch/sparc/crypto/opcodes.h .word (F3F(2,0x36,0x147)|RS1(a)|RS2(b)|RD(c)); c 30 arch/sparc/crypto/opcodes.h #define AES_EROUND01(a,b,c,d) \ c 31 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 0)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 32 arch/sparc/crypto/opcodes.h #define AES_EROUND23(a,b,c,d) \ c 33 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 1)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 34 arch/sparc/crypto/opcodes.h #define AES_DROUND01(a,b,c,d) \ c 35 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 2)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 36 arch/sparc/crypto/opcodes.h #define AES_DROUND23(a,b,c,d) \ c 37 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 3)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 38 arch/sparc/crypto/opcodes.h #define AES_EROUND01_L(a,b,c,d) \ c 39 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 4)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 40 arch/sparc/crypto/opcodes.h #define AES_EROUND23_L(a,b,c,d) \ c 41 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 5)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 42 arch/sparc/crypto/opcodes.h #define AES_DROUND01_L(a,b,c,d) \ c 43 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 6)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 44 arch/sparc/crypto/opcodes.h #define AES_DROUND23_L(a,b,c,d) \ c 45 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 7)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 46 arch/sparc/crypto/opcodes.h #define AES_KEXPAND1(a,b,c,d) \ c 47 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 8)|RS1(a)|RS2(b)|IMM5_9(c)|RD(d)); c 48 arch/sparc/crypto/opcodes.h #define AES_KEXPAND0(a,b,c) \ c 49 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x36, 0x130)|RS1(a)|RS2(b)|RD(c)); c 50 arch/sparc/crypto/opcodes.h #define AES_KEXPAND2(a,b,c) \ c 51 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x36, 0x131)|RS1(a)|RS2(b)|RD(c)); c 57 arch/sparc/crypto/opcodes.h #define DES_KEXPAND(a,b,c) \ c 58 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x36, 0x136)|RS1(a)|IMM5_0(b)|RD(c)); c 59 arch/sparc/crypto/opcodes.h #define DES_ROUND(a,b,c,d) \ c 60 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 0x009)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 62 arch/sparc/crypto/opcodes.h #define CAMELLIA_F(a,b,c,d) \ c 63 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x19, 0x00c)|RS1(a)|RS2(b)|RS3(c)|RD(d)); c 64 arch/sparc/crypto/opcodes.h #define CAMELLIA_FL(a,b,c) \ c 65 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x36, 0x13c)|RS1(a)|RS2(b)|RD(c)); c 66 arch/sparc/crypto/opcodes.h #define CAMELLIA_FLI(a,b,c) \ c 67 arch/sparc/crypto/opcodes.h .word (F3F(2, 0x36, 0x13d)|RS1(a)|RS2(b)|RD(c)); c 23 arch/sparc/include/asm/asm-prototypes.h void *memset(void *s, int c, size_t n); c 1202 arch/sparc/include/asm/hypervisor.h long sun4v_con_putchar(long c); c 10 arch/sparc/include/asm/io_32.h #define memset_io(d,c,sz) _memset_io(d,c,sz) c 17 arch/sparc/include/asm/io_32.h int c, __kernel_size_t n) c 22 arch/sparc/include/asm/io_32.h writeb(c, d); c 88 arch/sparc/include/asm/io_32.h static inline void sbus_memset_io(volatile void __iomem *__dst, int c, c 92 arch/sparc/include/asm/io_32.h sbus_writeb(c, __dst); c 330 arch/sparc/include/asm/io_64.h static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) c 333 arch/sparc/include/asm/io_64.h sbus_writeb(c, dst); c 338 arch/sparc/include/asm/io_64.h static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) c 343 arch/sparc/include/asm/io_64.h writeb(c, d); c 60 arch/sparc/include/asm/leon.h #define ASI_LEON3_SYSCTRL_CFG_SSIZE(c) (1 << ((c >> 20) & 0xf)) c 89 arch/sparc/include/asm/leon_amba.h #define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7) c 18 arch/sparc/include/asm/string.h #define memset(s, c, count) __builtin_memset(s, c, count) c 22 arch/sparc/kernel/btext.c static void draw_byte(unsigned char c, long locX, long locY); c 141 arch/sparc/kernel/btext.c static void btext_drawchar(char c) c 147 arch/sparc/kernel/btext.c switch (c) { c 164 arch/sparc/kernel/btext.c draw_byte(c, g_loc_X++, g_loc_Y); c 188 arch/sparc/kernel/btext.c static void btext_drawtext(const char *c, unsigned int len) c 191 arch/sparc/kernel/btext.c btext_drawchar(*c++); c 194 arch/sparc/kernel/btext.c static void draw_byte(unsigned char c, long locX, long locY) c 197 arch/sparc/kernel/btext.c unsigned char *font = &vga_font[((unsigned int)c) * 16]; c 817 arch/sparc/kernel/mdesc.c static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) c 830 arch/sparc/kernel/mdesc.c c->icache_size = *size; c 831 arch/sparc/kernel/mdesc.c c->icache_line_size = *line_size; c 833 arch/sparc/kernel/mdesc.c c->dcache_size = *size; c 834 arch/sparc/kernel/mdesc.c c->dcache_line_size = *line_size; c 839 arch/sparc/kernel/mdesc.c c->ecache_size = *size; c 840 arch/sparc/kernel/mdesc.c c->ecache_line_size = *line_size; c 855 arch/sparc/kernel/mdesc.c fill_in_one_cache(c, hp, target); c 1184 arch/sparc/kernel/mdesc.c cpuinfo_sparc *c; c 1197 arch/sparc/kernel/mdesc.c c = &cpu_data(cpuid); c 1198 arch/sparc/kernel/mdesc.c c->clock_tick = *cfreq; c 1209 arch/sparc/kernel/mdesc.c fill_in_one_cache(c, hp, t); c 1219 arch/sparc/kernel/mdesc.c fill_in_one_cache(c, hp, n); c 1223 arch/sparc/kernel/mdesc.c c->core_id = 0; c 1224 arch/sparc/kernel/mdesc.c c->proc_id = -1; c 128 arch/sparc/kernel/setup_32.c static void __init process_switch(char c) c 130 arch/sparc/kernel/setup_32.c switch (c) { c 144 arch/sparc/kernel/setup_32.c printk("Unknown boot switch (-%c)\n", c); c 104 arch/sparc/kernel/setup_64.c static void __init process_switch(char c) c 106 arch/sparc/kernel/setup_64.c switch (c) { c 129 arch/sparc/kernel/setup_64.c printk("Unknown boot switch (-%c)\n", c); c 1377 arch/sparc/kernel/smp_64.c cpuinfo_sparc *c; c 1388 arch/sparc/kernel/smp_64.c c = &cpu_data(cpu); c 1390 arch/sparc/kernel/smp_64.c c->core_id = 0; c 1391 arch/sparc/kernel/smp_64.c c->proc_id = -1; c 176 arch/sparc/kernel/sysfs.c cpuinfo_sparc *c = &cpu_data(dev->id); \ c 177 arch/sparc/kernel/sysfs.c return sprintf(buf, "%lu\n", c->MEMBER); \ c 184 arch/sparc/kernel/sysfs.c cpuinfo_sparc *c = &cpu_data(dev->id); \ c 185 arch/sparc/kernel/sysfs.c return sprintf(buf, "%u\n", c->MEMBER); \ c 210 arch/sparc/kernel/sysfs.c struct cpu *c = &per_cpu(cpu_devices, cpu); c 211 arch/sparc/kernel/sysfs.c struct device *s = &c->dev; c 224 arch/sparc/kernel/sysfs.c struct cpu *c = &per_cpu(cpu_devices, cpu); c 225 arch/sparc/kernel/sysfs.c struct device *s = &c->dev; c 266 arch/sparc/kernel/sysfs.c struct cpu *c = &per_cpu(cpu_devices, cpu); c 268 arch/sparc/kernel/sysfs.c register_cpu(c, cpu); c 293 arch/sparc/net/bpf_jit_comp_64.c static void emit_alu3(u32 opcode, u32 a, u32 b, u32 c, struct jit_ctx *ctx) c 295 arch/sparc/net/bpf_jit_comp_64.c emit(opcode | RS1(a) | RS2(b) | RD(c), ctx); c 543 arch/um/drivers/chan_kern.c char c; c 553 arch/um/drivers/chan_kern.c err = chan->ops->read(chan->fd, &c, chan->data); c 555 arch/um/drivers/chan_kern.c tty_insert_flip_char(port, c, TTY_NORMAL); c 150 arch/um/drivers/chan_user.c char c = 1; c 154 arch/um/drivers/chan_user.c count = write(pipe_fd, &c, sizeof(c)); c 155 arch/um/drivers/chan_user.c if (count != sizeof(c)) c 200 arch/um/drivers/chan_user.c count = read(pipe_fd, &c, sizeof(c)); c 201 arch/um/drivers/chan_user.c if (count != sizeof(c)) c 212 arch/um/drivers/chan_user.c count = write(pipe_fd, &c, sizeof(c)); c 213 arch/um/drivers/chan_user.c if (count != sizeof(c)) c 224 arch/um/drivers/chan_user.c char c; c 249 arch/um/drivers/chan_user.c n = read(fds[0], &c, sizeof(c)); c 250 arch/um/drivers/chan_user.c if (n != sizeof(c)) { c 278 arch/um/drivers/chan_user.c char c = 1; c 296 arch/um/drivers/chan_user.c count = write(thread_fd, &c, sizeof(c)); c 297 arch/um/drivers/chan_user.c if (count != sizeof(c)) c 34 arch/um/drivers/harddog_user.c char pid_buf[sizeof("nnnnnnn\0")], c; c 78 arch/um/drivers/harddog_user.c n = read(in_fds[0], &c, sizeof(c)); c 115 arch/um/drivers/harddog_user.c char c = '\n'; c 117 arch/um/drivers/harddog_user.c n = write(fd, &c, sizeof(c)); c 118 arch/um/drivers/harddog_user.c if (n != sizeof(c)) { c 627 arch/um/drivers/line.c char c; c 631 arch/um/drivers/line.c err = generic_read(fd, &c, NULL); c 53 arch/um/drivers/net_user.c char c; c 57 arch/um/drivers/net_user.c output = &c; c 58 arch/um/drivers/net_user.c len = sizeof(c); c 16 arch/um/drivers/slip_common.h static inline int slip_unesc(unsigned char c, unsigned char *buf, int *pos, c 21 arch/um/drivers/slip_common.h switch(c){ c 33 arch/um/drivers/slip_common.h c = SLIP_ESC; c 39 arch/um/drivers/slip_common.h c = SLIP_END; c 43 arch/um/drivers/slip_common.h buf[(*pos)++] = c; c 50 arch/um/drivers/slip_common.h unsigned char c; c 66 arch/um/drivers/slip_common.h switch(c = *s++) { c 76 arch/um/drivers/slip_common.h *ptr++ = c; c 115 arch/um/drivers/ssl.c static void ssl_console_write(struct console *c, const char *string, c 118 arch/um/drivers/ssl.c struct line *line = &serial_lines[c->index]; c 126 arch/um/drivers/ssl.c static struct tty_driver *ssl_console_device(struct console *c, int *index) c 128 arch/um/drivers/ssl.c *index = c->index; c 127 arch/um/drivers/stdio_console.c static struct tty_driver *uml_console_device(struct console *c, int *index) c 129 arch/um/drivers/stdio_console.c *index = c->index; c 125 arch/um/drivers/ubd_kern.c #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ c 128 arch/um/drivers/ubd_kern.c #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \ c 755 arch/um/drivers/ubd_kern.c flags.c = 1; c 64 arch/um/include/shared/os.h unsigned int c : 1; /* O_CREAT */ c 71 arch/um/include/shared/os.h #define OPENFLAGS() ((struct openflags) { .r = 0, .w = 0, .s = 0, .c = 0, \ c 106 arch/um/include/shared/os.h flags.c = 1; c 16 arch/um/kernel/sigio.c char c; c 18 arch/um/kernel/sigio.c os_read_file(sigio_irq_fd, &c, sizeof(c)); c 103 arch/um/os-Linux/drivers/ethertap_user.c char **args, c; c 123 arch/um/os-Linux/drivers/ethertap_user.c CATCH_EINTR(n = read(control_me, &c, sizeof(c))); c 124 arch/um/os-Linux/drivers/ethertap_user.c if (n != sizeof(c)) { c 130 arch/um/os-Linux/drivers/ethertap_user.c if (c != 1) { c 190 arch/um/os-Linux/file.c if (flags.c) c 56 arch/um/os-Linux/sigio.c char c; c 73 arch/um/os-Linux/sigio.c CATCH_EINTR(n = read(sigio_private[1], &c, c 74 arch/um/os-Linux/sigio.c sizeof(c))); c 75 arch/um/os-Linux/sigio.c if (n != sizeof(c)) c 92 arch/um/os-Linux/sigio.c CATCH_EINTR(n = write(respond_fd, &c, sizeof(c))); c 93 arch/um/os-Linux/sigio.c if (n != sizeof(c)) c 133 arch/um/os-Linux/sigio.c char c; c 136 arch/um/os-Linux/sigio.c CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c))); c 137 arch/um/os-Linux/sigio.c if (n != sizeof(c)) { c 143 arch/um/os-Linux/sigio.c CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c))); c 144 arch/um/os-Linux/sigio.c if (n != sizeof(c)) { c 18 arch/unicore32/include/asm/string.h extern char *strrchr(const char *s, int c); c 21 arch/unicore32/include/asm/string.h extern char *strchr(const char *s, int c); c 14 arch/unicore32/include/mach/ocd.h static inline void ocd_putc(unsigned int c) c 25 arch/unicore32/include/mach/ocd.h asm("movc p1.c1, %0, #1" : : "r" (c)); c 21 arch/unicore32/include/mach/uncompress.h char c; c 23 arch/unicore32/include/mach/uncompress.h while ((c = *ptr++) != '\0') { c 24 arch/unicore32/include/mach/uncompress.h if (c == '\n') c 26 arch/unicore32/include/mach/uncompress.h putc(c); c 23 arch/unicore32/kernel/time.c struct clock_event_device *c = dev_id; c 28 arch/unicore32/kernel/time.c c->event_handler(c); c 34 arch/unicore32/kernel/time.c puv3_osmr0_set_next_event(unsigned long delta, struct clock_event_device *c) c 30 arch/x86/boot/bitops.h asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr)); c 332 arch/x86/boot/boot.h char *strchr(const char *s, int c); c 15 arch/x86/boot/cmdline.c static inline int myisspace(u8 c) c 17 arch/x86/boot/cmdline.c return c <= ' '; /* Close enough approximation */ c 31 arch/x86/boot/cmdline.c char c; c 48 arch/x86/boot/cmdline.c while (cptr < 0x10000 && (c = rdfs8(cptr++))) { c 51 arch/x86/boot/cmdline.c if (myisspace(c)) c 60 arch/x86/boot/cmdline.c if (c == '=' && !*opptr) { c 64 arch/x86/boot/cmdline.c } else if (myisspace(c)) { c 66 arch/x86/boot/cmdline.c } else if (c != *opptr++) { c 72 arch/x86/boot/cmdline.c if (myisspace(c)) c 77 arch/x86/boot/cmdline.c if (myisspace(c)) { c 81 arch/x86/boot/cmdline.c *bufptr++ = c; c 103 arch/x86/boot/cmdline.c char c; c 119 arch/x86/boot/cmdline.c c = rdfs8(cptr++); c 124 arch/x86/boot/cmdline.c if (!c) c 126 arch/x86/boot/cmdline.c else if (myisspace(c)) c 136 arch/x86/boot/cmdline.c if (!c || myisspace(c)) c 140 arch/x86/boot/cmdline.c else if (!c) c 142 arch/x86/boot/cmdline.c else if (c != *opptr++) c 147 arch/x86/boot/cmdline.c if (!c) c 149 arch/x86/boot/cmdline.c else if (myisspace(c)) c 31 arch/x86/boot/compressed/eboot.c static void setup_boot_services##bits(struct efi_config *c) \ c 37 arch/x86/boot/compressed/eboot.c c->runtime_services = table->runtime; \ c 38 arch/x86/boot/compressed/eboot.c c->boot_services = table->boottime; \ c 39 arch/x86/boot/compressed/eboot.c c->text_output = table->con_out; \ c 382 arch/x86/boot/compressed/eboot.c struct boot_params *make_boot_params(struct efi_config *c) c 396 arch/x86/boot/compressed/eboot.c efi_early = c; c 741 arch/x86/boot/compressed/eboot.c efi_main(struct efi_config *c, struct boot_params *boot_params) c 751 arch/x86/boot/compressed/eboot.c efi_early = c; c 111 arch/x86/boot/compressed/misc.c char c; c 128 arch/x86/boot/compressed/misc.c while ((c = *s++) != '\0') { c 129 arch/x86/boot/compressed/misc.c if (c == '\n') { c 136 arch/x86/boot/compressed/misc.c vidmem[(x + cols * y) * 2] = c; c 43 arch/x86/boot/compressed/string.c void *memset(void *s, int c, size_t n) c 49 arch/x86/boot/compressed/string.c ss[i] = c; c 78 arch/x86/boot/compressed/string.c extern void *__memset(void *s, int c, size_t n) __alias(memset); c 56 arch/x86/boot/cpucheck.c #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) c 75 arch/x86/boot/cpuflags.c u32 *a, u32 *b, u32 *c, u32 *d) c 80 arch/x86/boot/cpuflags.c : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) c 85 arch/x86/boot/cpuflags.c #define cpuid(id, a, b, c, d) cpuid_count(id, 0, a, b, c, d) c 28 arch/x86/boot/early_serial_console.c unsigned char c; c 37 arch/x86/boot/early_serial_console.c c = inb(port + LCR); c 38 arch/x86/boot/early_serial_console.c outb(c | DLAB, port + LCR); c 41 arch/x86/boot/early_serial_console.c outb(c & ~DLAB, port + LCR); c 48 arch/x86/boot/printf.c char c, sign, locase; c 58 arch/x86/boot/printf.c c = (type & ZEROPAD) ? '0' : ' '; c 103 arch/x86/boot/printf.c *str++ = c; c 194 arch/x86/boot/string.c char *strchr(const char *s, int c) c 196 arch/x86/boot/string.c while (*s != (char)c) c 228 arch/x86/boot/string.c static inline char _tolower(const char c) c 230 arch/x86/boot/string.c return c | 0x20; c 267 arch/x86/boot/string.c unsigned int c = *s; c 268 arch/x86/boot/string.c unsigned int lc = c | 0x20; /* don't tolower() this line */ c 271 arch/x86/boot/string.c if ('0' <= c && c <= '9') c 272 arch/x86/boot/string.c val = c - '0'; c 11 arch/x86/boot/string.h void *memset(void *dst, int c, size_t len); c 19 arch/x86/boot/string.h #define memset(d,c,l) __builtin_memset(d,c,l) c 26 arch/x86/boot/string.h extern char *strchr(const char *s, int c); c 118 arch/x86/boot/tools/build.c static u32 partial_crc32_one(u8 c, u32 crc) c 120 arch/x86/boot/tools/build.c return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8); c 242 arch/x86/boot/tools/build.c static int reserve_pecoff_reloc_section(int c) c 245 arch/x86/boot/tools/build.c memset(buf+c, 0, PECOFF_RELOC_RESERVE); c 286 arch/x86/boot/tools/build.c static inline int reserve_pecoff_reloc_section(int c) c 307 arch/x86/boot/tools/build.c int c; c 312 arch/x86/boot/tools/build.c c = fread(buf, 1, sizeof(buf) - 1, file); c 316 arch/x86/boot/tools/build.c buf[c] = 0; c 335 arch/x86/boot/tools/build.c int c; c 357 arch/x86/boot/tools/build.c c = fread(buf, 1, sizeof(buf), file); c 360 arch/x86/boot/tools/build.c if (c < 1024) c 366 arch/x86/boot/tools/build.c c += reserve_pecoff_reloc_section(c); c 369 arch/x86/boot/tools/build.c setup_sectors = (c + 511) / 512; c 373 arch/x86/boot/tools/build.c memset(buf+c, 0, i-c); c 380 arch/x86/boot/tools/build.c printf("Setup is %d bytes (padded to %d bytes).\n", c, i); c 433 arch/x86/events/amd/core.c struct event_constraint *c) c 440 arch/x86/events/amd/core.c if (!c) c 441 arch/x86/events/amd/core.c c = &unconstrained; c 444 arch/x86/events/amd/core.c return c; c 456 arch/x86/events/amd/core.c for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { c 770 arch/x86/events/core.c struct event_constraint *c; c 779 arch/x86/events/core.c c = sched->constraints[sched->state.event]; c 781 arch/x86/events/core.c if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { c 783 arch/x86/events/core.c for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) { c 791 arch/x86/events/core.c for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { c 805 arch/x86/events/core.c if (c->overlap) c 827 arch/x86/events/core.c struct event_constraint *c; c 842 arch/x86/events/core.c c = sched->constraints[sched->state.event]; c 843 arch/x86/events/core.c } while (c->weight != sched->state.weight); c 873 arch/x86/events/core.c struct event_constraint *c; c 896 arch/x86/events/core.c c = cpuc->event_constraint[i]; c 902 arch/x86/events/core.c WARN_ON_ONCE((c && i >= n0) || (!c && i < n0)); c 909 arch/x86/events/core.c if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { c 910 arch/x86/events/core.c c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); c 911 arch/x86/events/core.c cpuc->event_constraint[i] = c; c 914 arch/x86/events/core.c wmin = min(wmin, c->weight); c 915 arch/x86/events/core.c wmax = max(wmax, c->weight); c 923 arch/x86/events/core.c c = cpuc->event_constraint[i]; c 930 arch/x86/events/core.c if (!test_bit(hwc->idx, c->idxmsk)) c 2013 arch/x86/events/core.c struct event_constraint *c; c 2020 arch/x86/events/core.c c = x86_pmu.get_event_constraints(fake_cpuc, 0, event); c 2022 arch/x86/events/core.c if (!c || !c->weight) c 2644 arch/x86/events/intel/core.c struct event_constraint *c = &emptyconstraint; c 2701 arch/x86/events/intel/core.c c = NULL; c 2711 arch/x86/events/intel/core.c return c; c 2744 arch/x86/events/intel/core.c struct event_constraint *c = NULL, *d; c 2749 arch/x86/events/intel/core.c c = __intel_shared_reg_get_constraints(cpuc, event, xreg); c 2750 arch/x86/events/intel/core.c if (c == &emptyconstraint) c 2751 arch/x86/events/intel/core.c return c; c 2758 arch/x86/events/intel/core.c c = d; c 2761 arch/x86/events/intel/core.c return c; c 2768 arch/x86/events/intel/core.c struct event_constraint *c; c 2771 arch/x86/events/intel/core.c for_each_event_constraint(c, x86_pmu.event_constraints) { c 2772 arch/x86/events/intel/core.c if (constraint_match(c, event->hw.config)) { c 2773 arch/x86/events/intel/core.c event->hw.flags |= c->flags; c 2774 arch/x86/events/intel/core.c return c; c 2786 arch/x86/events/intel/core.c struct event_constraint *c; c 2788 arch/x86/events/intel/core.c c = intel_bts_constraints(event); c 2789 arch/x86/events/intel/core.c if (c) c 2790 arch/x86/events/intel/core.c return c; c 2792 arch/x86/events/intel/core.c c = intel_shared_regs_constraints(cpuc, event); c 2793 arch/x86/events/intel/core.c if (c) c 2794 arch/x86/events/intel/core.c return c; c 2796 arch/x86/events/intel/core.c c = intel_pebs_constraints(event); c 2797 arch/x86/events/intel/core.c if (c) c 2798 arch/x86/events/intel/core.c return c; c 2836 arch/x86/events/intel/core.c struct event_constraint *c = cpuc->event_constraint[idx]; c 2846 arch/x86/events/intel/core.c if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) c 2853 arch/x86/events/intel/core.c if (c->flags & PERF_X86_EVENT_EXCL) c 2887 arch/x86/events/intel/core.c dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) c 2891 arch/x86/events/intel/core.c if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { c 2903 arch/x86/events/intel/core.c *cx = *c; c 2909 arch/x86/events/intel/core.c c = cx; c 2912 arch/x86/events/intel/core.c return c; c 2917 arch/x86/events/intel/core.c int idx, struct event_constraint *c) c 2929 arch/x86/events/intel/core.c return c; c 2935 arch/x86/events/intel/core.c return c; c 2945 arch/x86/events/intel/core.c c = dyn_constraint(cpuc, c, idx); c 2963 arch/x86/events/intel/core.c is_excl = c->flags & PERF_X86_EVENT_EXCL; c 2978 arch/x86/events/intel/core.c w = c->weight; c 2979 arch/x86/events/intel/core.c for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { c 2986 arch/x86/events/intel/core.c __clear_bit(i, c->idxmsk); c 2996 arch/x86/events/intel/core.c __clear_bit(i, c->idxmsk); c 3008 arch/x86/events/intel/core.c c = &emptyconstraint; c 3010 arch/x86/events/intel/core.c c->weight = w; c 3012 arch/x86/events/intel/core.c return c; c 3470 arch/x86/events/intel/core.c struct event_constraint *c; c 3472 arch/x86/events/intel/core.c c = intel_get_event_constraints(cpuc, idx, event); c 3476 arch/x86/events/intel/core.c if (c->idxmsk64 & (1U << 2)) c 3481 arch/x86/events/intel/core.c return c; c 3503 arch/x86/events/intel/core.c struct event_constraint *c; c 3509 arch/x86/events/intel/core.c c = intel_get_event_constraints(cpuc, idx, event); c 3511 arch/x86/events/intel/core.c return c; c 3518 arch/x86/events/intel/core.c struct event_constraint *c; c 3532 arch/x86/events/intel/core.c c = intel_get_event_constraints(cpuc, idx, event); c 3534 arch/x86/events/intel/core.c return c; c 3543 arch/x86/events/intel/core.c struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); c 3548 arch/x86/events/intel/core.c if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { c 3549 arch/x86/events/intel/core.c c = dyn_constraint(cpuc, c, idx); c 3550 arch/x86/events/intel/core.c c->idxmsk64 &= ~(1ULL << 3); c 3551 arch/x86/events/intel/core.c c->weight--; c 3554 arch/x86/events/intel/core.c return c; c 3636 arch/x86/events/intel/core.c struct intel_excl_cntrs *c; c 3638 arch/x86/events/intel/core.c c = kzalloc_node(sizeof(struct intel_excl_cntrs), c 3640 arch/x86/events/intel/core.c if (c) { c 3641 arch/x86/events/intel/core.c raw_spin_lock_init(&c->lock); c 3642 arch/x86/events/intel/core.c c->core_id = -1; c 3644 arch/x86/events/intel/core.c return c; c 3756 arch/x86/events/intel/core.c struct intel_excl_cntrs *c; c 3759 arch/x86/events/intel/core.c c = sibling->excl_cntrs; c 3760 arch/x86/events/intel/core.c if (c && c->core_id == core_id) { c 3762 arch/x86/events/intel/core.c cpuc->excl_cntrs = c; c 3775 arch/x86/events/intel/core.c struct intel_excl_cntrs *c; c 3777 arch/x86/events/intel/core.c c = cpuc->excl_cntrs; c 3778 arch/x86/events/intel/core.c if (c) { c 3779 arch/x86/events/intel/core.c if (c->core_id == -1 || --c->refcnt == 0) c 3780 arch/x86/events/intel/core.c kfree(c); c 4523 arch/x86/events/intel/core.c struct event_constraint *c; c 5117 arch/x86/events/intel/core.c for_each_event_constraint(c, x86_pmu.event_constraints) { c 5118 arch/x86/events/intel/core.c if (c->cmask == FIXED_EVENT_FLAGS c 5119 arch/x86/events/intel/core.c && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) { c 5120 arch/x86/events/intel/core.c c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; c 5122 arch/x86/events/intel/core.c c->idxmsk64 &= c 5124 arch/x86/events/intel/core.c c->weight = hweight64(c->idxmsk64); c 5184 arch/x86/events/intel/core.c int c; c 5208 arch/x86/events/intel/core.c for_each_online_cpu(c) c 5209 arch/x86/events/intel/core.c free_excl_cntrs(&per_cpu(cpu_hw_events, c)); c 874 arch/x86/events/intel/ds.c struct event_constraint *c; c 880 arch/x86/events/intel/ds.c for_each_event_constraint(c, x86_pmu.pebs_constraints) { c 881 arch/x86/events/intel/ds.c if (constraint_match(c, event->hw.config)) { c 882 arch/x86/events/intel/ds.c event->hw.flags |= c->flags; c 883 arch/x86/events/intel/ds.c return c; c 74 arch/x86/events/intel/pt.c u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; c 77 arch/x86/events/intel/pt.c return (c & cd->mask) >> shift; c 394 arch/x86/events/intel/uncore.c struct event_constraint *c; c 397 arch/x86/events/intel/uncore.c c = type->ops->get_constraint(box, event); c 398 arch/x86/events/intel/uncore.c if (c) c 399 arch/x86/events/intel/uncore.c return c; c 406 arch/x86/events/intel/uncore.c for_each_event_constraint(c, type->constraints) { c 407 arch/x86/events/intel/uncore.c if ((event->hw.config & c->cmask) == c->code) c 408 arch/x86/events/intel/uncore.c return c; c 425 arch/x86/events/intel/uncore.c struct event_constraint *c; c 432 arch/x86/events/intel/uncore.c c = uncore_get_event_constraint(box, box->event_list[i]); c 433 arch/x86/events/intel/uncore.c box->event_constraint[i] = c; c 434 arch/x86/events/intel/uncore.c wmin = min(wmin, c->weight); c 435 arch/x86/events/intel/uncore.c wmax = max(wmax, c->weight); c 441 arch/x86/events/intel/uncore.c c = box->event_constraint[i]; c 448 arch/x86/events/intel/uncore.c if (!test_bit(hwc->idx, c->idxmsk)) c 34 arch/x86/events/intel/uncore.h #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) c 138 arch/x86/events/intel/uncore_nhmex.c #define MBOX_INC_SEL_EXTAR_REG(c, r) \ c 139 arch/x86/events/intel/uncore_nhmex.c EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ c 141 arch/x86/events/intel/uncore_nhmex.c #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ c 142 arch/x86/events/intel/uncore_nhmex.c EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ c 60 arch/x86/events/perf_event.h static inline bool constraint_match(struct event_constraint *c, u64 ecode) c 62 arch/x86/events/perf_event.h return ((ecode & c->cmask) - c->code) <= (u64)c->size; c 280 arch/x86/events/perf_event.h #define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ c 282 arch/x86/events/perf_event.h .code = (c), \ c 283 arch/x86/events/perf_event.h .size = (e) - (c), \ c 290 arch/x86/events/perf_event.h #define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ c 291 arch/x86/events/perf_event.h __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) c 293 arch/x86/events/perf_event.h #define EVENT_CONSTRAINT(c, n, m) \ c 294 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) c 300 arch/x86/events/perf_event.h #define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ c 301 arch/x86/events/perf_event.h __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) c 303 arch/x86/events/perf_event.h #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ c 304 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ c 328 arch/x86/events/perf_event.h #define EVENT_CONSTRAINT_OVERLAP(c, n, m) \ c 329 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0) c 334 arch/x86/events/perf_event.h #define INTEL_EVENT_CONSTRAINT(c, n) \ c 335 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) c 340 arch/x86/events/perf_event.h #define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ c 341 arch/x86/events/perf_event.h EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) c 357 arch/x86/events/perf_event.h #define FIXED_EVENT_CONSTRAINT(c, n) \ c 358 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS) c 363 arch/x86/events/perf_event.h #define INTEL_UEVENT_CONSTRAINT(c, n) \ c 364 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) c 367 arch/x86/events/perf_event.h #define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \ c 368 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c)) c 371 arch/x86/events/perf_event.h #define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \ c 372 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) c 374 arch/x86/events/perf_event.h #define INTEL_EXCLUEVT_CONSTRAINT(c, n) \ c 375 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ c 378 arch/x86/events/perf_event.h #define INTEL_PLD_CONSTRAINT(c, n) \ c 379 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ c 382 arch/x86/events/perf_event.h #define INTEL_PST_CONSTRAINT(c, n) \ c 383 arch/x86/events/perf_event.h __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \ c 387 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ c 388 arch/x86/events/perf_event.h EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) c 390 arch/x86/events/perf_event.h #define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ c 391 arch/x86/events/perf_event.h EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS) c 463 arch/x86/events/perf_event.h #define for_each_event_constraint(e, c) \ c 464 arch/x86/events/perf_event.h for ((e) = (c); (e)->weight != -1; (e)++) c 501 arch/x86/events/perf_event.h #define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \ c 502 arch/x86/events/perf_event.h INTEL_UEVENT_EXTRA_REG(c, \ c 79 arch/x86/ia32/ia32_aout.c char c; c 83 arch/x86/ia32/ia32_aout.c get_user(c, p++); c 84 arch/x86/ia32/ia32_aout.c } while (c); c 89 arch/x86/ia32/ia32_aout.c char c; c 93 arch/x86/ia32/ia32_aout.c get_user(c, p++); c 94 arch/x86/ia32/ia32_aout.c } while (c); c 91 arch/x86/include/asm/acpi.h struct cpuinfo_x86 *c = &cpu_data(0); c 92 arch/x86/include/asm/acpi.h return (c->x86_vendor == X86_VENDOR_INTEL || c 93 arch/x86/include/asm/acpi.h c->x86_vendor == X86_VENDOR_CENTAUR); c 98 arch/x86/include/asm/acpi.h struct cpuinfo_x86 *c = &cpu_data(0); c 102 arch/x86/include/asm/acpi.h if (cpu_has(c, X86_FEATURE_EST)) c 105 arch/x86/include/asm/acpi.h if (cpu_has(c, X86_FEATURE_ACPI)) c 111 arch/x86/include/asm/acpi.h if (!cpu_has(c, X86_FEATURE_MWAIT)) c 36 arch/x86/include/asm/archrandom.h CC_SET(c) c 37 arch/x86/include/asm/archrandom.h : CC_OUT(c) (ok), "=a" (*v)); c 50 arch/x86/include/asm/archrandom.h CC_SET(c) c 51 arch/x86/include/asm/archrandom.h : CC_OUT(c) (ok), "=a" (*v)); c 62 arch/x86/include/asm/archrandom.h CC_SET(c) c 63 arch/x86/include/asm/archrandom.h : CC_OUT(c) (ok), "=a" (*v)); c 71 arch/x86/include/asm/archrandom.h CC_SET(c) c 72 arch/x86/include/asm/archrandom.h : CC_OUT(c) (ok), "=a" (*v)); c 107 arch/x86/include/asm/archrandom.h extern void x86_init_rdrand(struct cpuinfo_x86 *c); c 111 arch/x86/include/asm/archrandom.h static inline void x86_init_rdrand(struct cpuinfo_x86 *c) { } c 113 arch/x86/include/asm/asm.h # define CC_SET(c) "\n\t/* output condition code " #c "*/\n" c 114 arch/x86/include/asm/asm.h # define CC_OUT(c) "=@cc" #c c 116 arch/x86/include/asm/asm.h # define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" c 117 arch/x86/include/asm/asm.h # define CC_OUT(c) [_cc_ ## c] "=qm" c 269 arch/x86/include/asm/atomic64_32.h s64 old, c = 0; c 271 arch/x86/include/asm/atomic64_32.h while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c) c 272 arch/x86/include/asm/atomic64_32.h c = old; c 277 arch/x86/include/asm/atomic64_32.h s64 old, c = 0; c 279 arch/x86/include/asm/atomic64_32.h while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c) c 280 arch/x86/include/asm/atomic64_32.h c = old; c 287 arch/x86/include/asm/atomic64_32.h s64 old, c = 0; c 289 arch/x86/include/asm/atomic64_32.h while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c) c 290 arch/x86/include/asm/atomic64_32.h c = old; c 295 arch/x86/include/asm/atomic64_32.h s64 old, c = 0; c 297 arch/x86/include/asm/atomic64_32.h while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c) c 298 arch/x86/include/asm/atomic64_32.h c = old; c 305 arch/x86/include/asm/atomic64_32.h s64 old, c = 0; c 307 arch/x86/include/asm/atomic64_32.h while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c) c 308 arch/x86/include/asm/atomic64_32.h c = old; c 313 arch/x86/include/asm/atomic64_32.h s64 old, c = 0; c 315 arch/x86/include/asm/atomic64_32.h while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c) c 316 arch/x86/include/asm/atomic64_32.h c = old; c 323 arch/x86/include/asm/atomic64_32.h s64 old, c = 0; c 325 arch/x86/include/asm/atomic64_32.h while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c) c 326 arch/x86/include/asm/atomic64_32.h c = old; c 138 arch/x86/include/asm/bitops.h return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); c 153 arch/x86/include/asm/bitops.h CC_SET(c) c 154 arch/x86/include/asm/bitops.h : CC_OUT(c) (oldbit) c 162 arch/x86/include/asm/bitops.h return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); c 179 arch/x86/include/asm/bitops.h CC_SET(c) c 180 arch/x86/include/asm/bitops.h : CC_OUT(c) (oldbit) c 191 arch/x86/include/asm/bitops.h CC_SET(c) c 192 arch/x86/include/asm/bitops.h : CC_OUT(c) (oldbit) c 201 arch/x86/include/asm/bitops.h return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr); c 215 arch/x86/include/asm/bitops.h CC_SET(c) c 216 arch/x86/include/asm/bitops.h : CC_OUT(c) (oldbit) c 10 arch/x86/include/asm/bugs.h void check_mpx_erratum(struct cpuinfo_x86 *c); c 12 arch/x86/include/asm/bugs.h static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {} c 5 arch/x86/include/asm/cacheinfo.h void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); c 6 arch/x86/include/asm/cacheinfo.h void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); c 51 arch/x86/include/asm/cpufeature.h #define test_cpu_cap(c, bit) \ c 52 arch/x86/include/asm/cpufeature.h test_bit(bit, (unsigned long *)((c)->x86_capability)) c 117 arch/x86/include/asm/cpufeature.h #define cpu_has(c, bit) \ c 119 arch/x86/include/asm/cpufeature.h test_cpu_cap(c, bit)) c 139 arch/x86/include/asm/cpufeature.h #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) c 142 arch/x86/include/asm/cpufeature.h extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); c 225 arch/x86/include/asm/cpufeature.h #define cpu_has_bug(c, bit) cpu_has(c, (bit)) c 226 arch/x86/include/asm/cpufeature.h #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) c 227 arch/x86/include/asm/cpufeature.h #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) c 64 arch/x86/include/asm/dwarf2.h .macro cfi_ignore a=0, b=0, c=0, d=0 c 44 arch/x86/include/asm/fpu/internal.h extern void fpu__init_system(struct cpuinfo_x86 *c); c 132 arch/x86/include/asm/hw_irq.h static inline void send_cleanup_vector(struct irq_cfg *c) { } c 133 arch/x86/include/asm/hw_irq.h static inline void irq_complete_move(struct irq_cfg *c) { } c 139 arch/x86/include/asm/local.h long c, old; \ c 140 arch/x86/include/asm/local.h c = local_read((l)); \ c 142 arch/x86/include/asm/local.h if (unlikely(c == (u))) \ c 144 arch/x86/include/asm/local.h old = local_cmpxchg((l), c, c + (a)); \ c 145 arch/x86/include/asm/local.h if (likely(old == c)) \ c 147 arch/x86/include/asm/local.h c = old; \ c 149 arch/x86/include/asm/local.h c != (u); \ c 18 arch/x86/include/asm/mce.h #define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT) c 44 arch/x86/include/asm/mce.h #define MCI_STATUS_CEC(c) (((c) & MCI_STATUS_CEC_MASK) >> MCI_STATUS_CEC_SHIFT) c 166 arch/x86/include/asm/mce.h void mcheck_cpu_init(struct cpuinfo_x86 *c); c 167 arch/x86/include/asm/mce.h void mcheck_cpu_clear(struct cpuinfo_x86 *c); c 171 arch/x86/include/asm/mce.h static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} c 172 arch/x86/include/asm/mce.h static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {} c 177 arch/x86/include/asm/mce.h void intel_p5_mcheck_init(struct cpuinfo_x86 *c); c 178 arch/x86/include/asm/mce.h void winchip_mcheck_init(struct cpuinfo_x86 *c); c 181 arch/x86/include/asm/mce.h static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {} c 182 arch/x86/include/asm/mce.h static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} c 198 arch/x86/include/asm/mce.h void mce_intel_feature_init(struct cpuinfo_x86 *c); c 199 arch/x86/include/asm/mce.h void mce_intel_feature_clear(struct cpuinfo_x86 *c); c 205 arch/x86/include/asm/mce.h static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } c 206 arch/x86/include/asm/mce.h static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { } c 213 arch/x86/include/asm/mce.h int mce_available(struct cpuinfo_x86 *c); c 258 arch/x86/include/asm/mce.h void intel_init_thermal(struct cpuinfo_x86 *c); c 338 arch/x86/include/asm/mce.h void mce_amd_feature_init(struct cpuinfo_x86 *c); c 346 arch/x86/include/asm/mce.h static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } c 351 arch/x86/include/asm/mce.h static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); } c 83 arch/x86/include/asm/microcode.h #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) c 91 arch/x86/include/asm/microcode.h #define CPUID_IS(a, b, c, ebx, ecx, edx) \ c 92 arch/x86/include/asm/microcode.h (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) c 265 arch/x86/include/asm/nospec-branch.h asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) c 543 arch/x86/include/asm/percpu.h CC_SET(c) c 544 arch/x86/include/asm/percpu.h : CC_OUT(c) (oldbit) c 176 arch/x86/include/asm/processor.h extern void cpu_detect(struct cpuinfo_x86 *c); c 724 arch/x86/include/asm/processor.h extern void select_idle_routine(const struct cpuinfo_x86 *c); c 23 arch/x86/include/asm/qspinlock.h val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, c 99 arch/x86/include/asm/refcount.h int c, result; c 101 arch/x86/include/asm/refcount.h c = atomic_read(&(r->refs)); c 103 arch/x86/include/asm/refcount.h if (unlikely(c == 0)) c 106 arch/x86/include/asm/refcount.h result = c + i; c 109 arch/x86/include/asm/refcount.h if (unlikely(c < 0 || c == INT_MAX || result < c)) { c 116 arch/x86/include/asm/refcount.h } while (!atomic_try_cmpxchg(&(r->refs), &c, result)); c 118 arch/x86/include/asm/refcount.h return c != 0; c 20 arch/x86/include/asm/rmwcc.h bool c = false; \ c 25 arch/x86/include/asm/rmwcc.h cc_label: c = true; \ c 27 arch/x86/include/asm/rmwcc.h c; \ c 36 arch/x86/include/asm/rmwcc.h bool c; \ c 38 arch/x86/include/asm/rmwcc.h : [var] "+m" (_var), CC_OUT(cc) (c) \ c 40 arch/x86/include/asm/rmwcc.h c; \ c 91 arch/x86/include/asm/signal.h asm("btl %2,%1" CC_SET(c) c 92 arch/x86/include/asm/signal.h : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); c 28 arch/x86/include/asm/string_32.h extern char *strchr(const char *s, int c); c 196 arch/x86/include/asm/string_32.h extern void *memchr(const void *cs, int c, size_t count); c 198 arch/x86/include/asm/string_32.h static inline void *__memset_generic(void *s, char c, size_t count) c 204 arch/x86/include/asm/string_32.h : "a" (c), "1" (s), "0" (count) c 210 arch/x86/include/asm/string_32.h #define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count)) c 220 arch/x86/include/asm/string_32.h #define __memset(s, c, count) \ c 222 arch/x86/include/asm/string_32.h ? __constant_count_memset((s), (c), (count)) \ c 223 arch/x86/include/asm/string_32.h : __memset_generic((s), (c), (count))) c 228 arch/x86/include/asm/string_32.h #define memset(s, c, count) __builtin_memset(s, c, count) c 259 arch/x86/include/asm/string_32.h extern void *memscan(void *addr, int c, size_t size); c 18 arch/x86/include/asm/string_64.h void *memset(void *s, int c, size_t n); c 19 arch/x86/include/asm/string_64.h void *__memset(void *s, int c, size_t n); c 77 arch/x86/include/asm/string_64.h #define memset(s, c, n) __memset(s, c, n) c 85 arch/x86/include/asm/sync_bitops.h return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr); c 98 arch/x86/include/asm/sync_bitops.h return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr); c 111 arch/x86/include/asm/sync_bitops.h return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr); c 81 arch/x86/include/asm/vm86.h static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) c 47 arch/x86/include/asm/word-at-a-time.h static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) c 49 arch/x86/include/asm/word-at-a-time.h unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; c 54 arch/x86/include/asm/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) c 174 arch/x86/include/asm/x86_init.h void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); c 373 arch/x86/include/asm/xen/interface.h } c; c 32 arch/x86/kernel/acpi/cstate.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 37 arch/x86/kernel/acpi/cstate.c else if (c->x86_vendor == X86_VENDOR_INTEL) { c 52 arch/x86/kernel/acpi/cstate.c if (c->x86_vendor == X86_VENDOR_INTEL && c 53 arch/x86/kernel/acpi/cstate.c (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) c 62 arch/x86/kernel/acpi/cstate.c if (c->x86_vendor == X86_VENDOR_CENTAUR) { c 63 arch/x86/kernel/acpi/cstate.c if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f && c 64 arch/x86/kernel/acpi/cstate.c c->x86_stepping >= 0x0e)) c 68 arch/x86/kernel/acpi/cstate.c if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { c 149 arch/x86/kernel/acpi/cstate.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 152 arch/x86/kernel/acpi/cstate.c if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF) c 177 arch/x86/kernel/acpi/cstate.c if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2)) c 197 arch/x86/kernel/acpi/cstate.c struct cpuinfo_x86 *c = &boot_cpu_data; c 199 arch/x86/kernel/acpi/cstate.c if (c->x86_vendor != X86_VENDOR_INTEL && c 200 arch/x86/kernel/acpi/cstate.c c->x86_vendor != X86_VENDOR_AMD) c 170 arch/x86/kernel/apic/apic_numachip.c static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) c 183 arch/x86/kernel/apic/apic_numachip.c c->phys_proc_id = node / nodes; c 751 arch/x86/kernel/apic/x2apic_uv_x.c static __init void map_gru_distributed(unsigned long c) c 758 arch/x86/kernel/apic/x2apic_uv_x.c gru.v = c; c 97 arch/x86/kernel/cpu/amd.c static void init_amd_k5(struct cpuinfo_x86 *c) c 109 arch/x86/kernel/cpu/amd.c if (c->x86_model == 9 || c->x86_model == 10) { c 116 arch/x86/kernel/cpu/amd.c static void init_amd_k6(struct cpuinfo_x86 *c) c 122 arch/x86/kernel/cpu/amd.c if (c->x86_model < 6) { c 124 arch/x86/kernel/cpu/amd.c if (c->x86_model == 0) { c 125 arch/x86/kernel/cpu/amd.c clear_cpu_cap(c, X86_FEATURE_APIC); c 126 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_PGE); c 131 arch/x86/kernel/cpu/amd.c if (c->x86_model == 6 && c->x86_stepping == 1) { c 160 arch/x86/kernel/cpu/amd.c if (c->x86_model < 8 || c 161 arch/x86/kernel/cpu/amd.c (c->x86_model == 8 && c->x86_stepping < 8)) { c 180 arch/x86/kernel/cpu/amd.c if ((c->x86_model == 8 && c->x86_stepping > 7) || c 181 arch/x86/kernel/cpu/amd.c c->x86_model == 9 || c->x86_model == 13) { c 202 arch/x86/kernel/cpu/amd.c if (c->x86_model == 10) { c 210 arch/x86/kernel/cpu/amd.c static void init_amd_k7(struct cpuinfo_x86 *c) c 220 arch/x86/kernel/cpu/amd.c if (c->x86_model >= 6 && c->x86_model <= 10) { c 221 arch/x86/kernel/cpu/amd.c if (!cpu_has(c, X86_FEATURE_XMM)) { c 224 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_XMM); c 233 arch/x86/kernel/cpu/amd.c if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { c 243 arch/x86/kernel/cpu/amd.c if (!c->cpu_index) c 251 arch/x86/kernel/cpu/amd.c if ((c->x86_model == 6) && ((c->x86_stepping == 0) || c 252 arch/x86/kernel/cpu/amd.c (c->x86_stepping == 1))) c 256 arch/x86/kernel/cpu/amd.c if ((c->x86_model == 7) && (c->x86_stepping == 0)) c 266 arch/x86/kernel/cpu/amd.c if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || c 267 arch/x86/kernel/cpu/amd.c ((c->x86_model == 7) && (c->x86_stepping >= 1)) || c 268 arch/x86/kernel/cpu/amd.c (c->x86_model > 7)) c 269 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_MP)) c 312 arch/x86/kernel/cpu/amd.c static void legacy_fixup_core_id(struct cpuinfo_x86 *c) c 316 arch/x86/kernel/cpu/amd.c if (c->x86 >= 0x17) c 319 arch/x86/kernel/cpu/amd.c cus_per_node = c->x86_max_cores / nodes_per_socket; c 320 arch/x86/kernel/cpu/amd.c c->cpu_core_id %= cus_per_node; c 324 arch/x86/kernel/cpu/amd.c static void amd_get_topology_early(struct cpuinfo_x86 *c) c 326 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_TOPOEXT)) c 336 arch/x86/kernel/cpu/amd.c static void amd_get_topology(struct cpuinfo_x86 *c) c 350 arch/x86/kernel/cpu/amd.c if (c->x86 == 0x15) c 351 arch/x86/kernel/cpu/amd.c c->cu_id = ebx & 0xff; c 353 arch/x86/kernel/cpu/amd.c if (c->x86 >= 0x17) { c 354 arch/x86/kernel/cpu/amd.c c->cpu_core_id = ebx & 0xff; c 357 arch/x86/kernel/cpu/amd.c c->x86_max_cores /= smp_num_siblings; c 364 arch/x86/kernel/cpu/amd.c err = detect_extended_topology(c); c 366 arch/x86/kernel/cpu/amd.c c->x86_coreid_bits = get_count_order(c->x86_max_cores); c 368 arch/x86/kernel/cpu/amd.c cacheinfo_amd_init_llc_id(c, cpu, node_id); c 370 arch/x86/kernel/cpu/amd.c } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { c 381 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_AMD_DCM); c 382 arch/x86/kernel/cpu/amd.c legacy_fixup_core_id(c); c 390 arch/x86/kernel/cpu/amd.c static void amd_detect_cmp(struct cpuinfo_x86 *c) c 395 arch/x86/kernel/cpu/amd.c bits = c->x86_coreid_bits; c 397 arch/x86/kernel/cpu/amd.c c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); c 399 arch/x86/kernel/cpu/amd.c c->phys_proc_id = c->initial_apicid >> bits; c 401 arch/x86/kernel/cpu/amd.c per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; c 416 arch/x86/kernel/cpu/amd.c static void srat_detect_node(struct cpuinfo_x86 *c) c 421 arch/x86/kernel/cpu/amd.c unsigned apicid = c->apicid; c 433 arch/x86/kernel/cpu/amd.c x86_cpuinit.fixup_cpu_id(c, node); c 455 arch/x86/kernel/cpu/amd.c int ht_nodeid = c->initial_apicid; c 467 arch/x86/kernel/cpu/amd.c static void early_init_amd_mc(struct cpuinfo_x86 *c) c 473 arch/x86/kernel/cpu/amd.c if (c->extended_cpuid_level < 0x80000008) c 478 arch/x86/kernel/cpu/amd.c c->x86_max_cores = (ecx & 0xff) + 1; c 485 arch/x86/kernel/cpu/amd.c while ((1 << bits) < c->x86_max_cores) c 489 arch/x86/kernel/cpu/amd.c c->x86_coreid_bits = bits; c 493 arch/x86/kernel/cpu/amd.c static void bsp_init_amd(struct cpuinfo_x86 *c) c 497 arch/x86/kernel/cpu/amd.c if (c->x86 >= 0xf) { c 515 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { c 517 arch/x86/kernel/cpu/amd.c if (c->x86 > 0x10 || c 518 arch/x86/kernel/cpu/amd.c (c->x86 == 0x10 && c->x86_model >= 0x2)) { c 527 arch/x86/kernel/cpu/amd.c if (c->x86 == 0x15) { c 542 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_MWAITX)) c 559 arch/x86/kernel/cpu/amd.c c->x86 >= 0x15 && c->x86 <= 0x17) { c 562 arch/x86/kernel/cpu/amd.c switch (c->x86) { c 580 arch/x86/kernel/cpu/amd.c static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) c 596 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { c 607 arch/x86/kernel/cpu/amd.c c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; c 625 arch/x86/kernel/cpu/amd.c static void early_init_amd(struct cpuinfo_x86 *c) c 630 arch/x86/kernel/cpu/amd.c early_init_amd_mc(c); c 633 arch/x86/kernel/cpu/amd.c if (c->x86 == 6) c 634 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_K7); c 637 arch/x86/kernel/cpu/amd.c if (c->x86 >= 0xf) c 638 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_K8); c 640 arch/x86/kernel/cpu/amd.c rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); c 646 arch/x86/kernel/cpu/amd.c if (c->x86_power & (1 << 8)) { c 647 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 648 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); c 652 arch/x86/kernel/cpu/amd.c if (c->x86_power & BIT(12)) c 653 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_ACC_POWER); c 656 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_SYSCALL32); c 659 arch/x86/kernel/cpu/amd.c if (c->x86 == 5) c 660 arch/x86/kernel/cpu/amd.c if (c->x86_model == 13 || c->x86_model == 9 || c 661 arch/x86/kernel/cpu/amd.c (c->x86_model == 8 && c->x86_stepping >= 8)) c 662 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_K6_MTRR); c 672 arch/x86/kernel/cpu/amd.c if (c->x86 > 0x16) c 673 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_EXTD_APICID); c 674 arch/x86/kernel/cpu/amd.c else if (c->x86 >= 0xf) { c 680 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_EXTD_APICID); c 690 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_VMMCALL); c 693 arch/x86/kernel/cpu/amd.c if (c->x86 == 0x16 && c->x86_model <= 0xf) c 702 arch/x86/kernel/cpu/amd.c if (cpu_has_amd_erratum(c, amd_erratum_400)) c 703 arch/x86/kernel/cpu/amd.c set_cpu_bug(c, X86_BUG_AMD_E400); c 705 arch/x86/kernel/cpu/amd.c early_detect_mem_encrypt(c); c 708 arch/x86/kernel/cpu/amd.c if (c->x86 == 0x15 && c 709 arch/x86/kernel/cpu/amd.c (c->x86_model >= 0x10 && c->x86_model <= 0x6f) && c 710 arch/x86/kernel/cpu/amd.c !cpu_has(c, X86_FEATURE_TOPOEXT)) { c 715 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_TOPOEXT); c 721 arch/x86/kernel/cpu/amd.c amd_get_topology_early(c); c 724 arch/x86/kernel/cpu/amd.c static void init_amd_k8(struct cpuinfo_x86 *c) c 732 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_REP_GOOD); c 739 arch/x86/kernel/cpu/amd.c if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { c 740 arch/x86/kernel/cpu/amd.c clear_cpu_cap(c, X86_FEATURE_LAHF_LM); c 747 arch/x86/kernel/cpu/amd.c if (!c->x86_model_id[0]) c 748 arch/x86/kernel/cpu/amd.c strcpy(c->x86_model_id, "Hammer"); c 760 arch/x86/kernel/cpu/amd.c set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); c 763 arch/x86/kernel/cpu/amd.c static void init_amd_gh(struct cpuinfo_x86 *c) c 767 arch/x86/kernel/cpu/amd.c if (c == &boot_cpu_data) c 794 arch/x86/kernel/cpu/amd.c if (cpu_has_amd_erratum(c, amd_erratum_383)) c 795 arch/x86/kernel/cpu/amd.c set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); c 800 arch/x86/kernel/cpu/amd.c static void init_amd_ln(struct cpuinfo_x86 *c) c 825 arch/x86/kernel/cpu/amd.c static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) c 853 arch/x86/kernel/cpu/amd.c clear_cpu_cap(c, X86_FEATURE_RDRAND); c 857 arch/x86/kernel/cpu/amd.c static void init_amd_jg(struct cpuinfo_x86 *c) c 864 arch/x86/kernel/cpu/amd.c clear_rdrand_cpuid_bit(c); c 867 arch/x86/kernel/cpu/amd.c static void init_amd_bd(struct cpuinfo_x86 *c) c 875 arch/x86/kernel/cpu/amd.c if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { c 887 arch/x86/kernel/cpu/amd.c clear_rdrand_cpuid_bit(c); c 890 arch/x86/kernel/cpu/amd.c static void init_amd_zn(struct cpuinfo_x86 *c) c 892 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_ZEN); c 902 arch/x86/kernel/cpu/amd.c if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) c 903 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_CPB); c 906 arch/x86/kernel/cpu/amd.c static void init_amd(struct cpuinfo_x86 *c) c 908 arch/x86/kernel/cpu/amd.c early_init_amd(c); c 914 arch/x86/kernel/cpu/amd.c clear_cpu_cap(c, 0*32+31); c 916 arch/x86/kernel/cpu/amd.c if (c->x86 >= 0x10) c 917 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_REP_GOOD); c 920 arch/x86/kernel/cpu/amd.c c->apicid = hard_smp_processor_id(); c 923 arch/x86/kernel/cpu/amd.c if (c->x86 < 6) c 924 arch/x86/kernel/cpu/amd.c clear_cpu_cap(c, X86_FEATURE_MCE); c 926 arch/x86/kernel/cpu/amd.c switch (c->x86) { c 927 arch/x86/kernel/cpu/amd.c case 4: init_amd_k5(c); break; c 928 arch/x86/kernel/cpu/amd.c case 5: init_amd_k6(c); break; c 929 arch/x86/kernel/cpu/amd.c case 6: init_amd_k7(c); break; c 930 arch/x86/kernel/cpu/amd.c case 0xf: init_amd_k8(c); break; c 931 arch/x86/kernel/cpu/amd.c case 0x10: init_amd_gh(c); break; c 932 arch/x86/kernel/cpu/amd.c case 0x12: init_amd_ln(c); break; c 933 arch/x86/kernel/cpu/amd.c case 0x15: init_amd_bd(c); break; c 934 arch/x86/kernel/cpu/amd.c case 0x16: init_amd_jg(c); break; c 935 arch/x86/kernel/cpu/amd.c case 0x17: init_amd_zn(c); break; c 942 arch/x86/kernel/cpu/amd.c if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) c 943 arch/x86/kernel/cpu/amd.c set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); c 945 arch/x86/kernel/cpu/amd.c cpu_detect_cache_sizes(c); c 947 arch/x86/kernel/cpu/amd.c amd_detect_cmp(c); c 948 arch/x86/kernel/cpu/amd.c amd_get_topology(c); c 949 arch/x86/kernel/cpu/amd.c srat_detect_node(c); c 951 arch/x86/kernel/cpu/amd.c init_amd_cacheinfo(c); c 953 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_XMM2)) { c 964 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); c 971 arch/x86/kernel/cpu/amd.c if (c->x86 > 0x11) c 972 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_ARAT); c 975 arch/x86/kernel/cpu/amd.c if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) c 976 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) c 977 arch/x86/kernel/cpu/amd.c set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); c 980 arch/x86/kernel/cpu/amd.c if (!cpu_has(c, X86_FEATURE_XENPV)) c 981 arch/x86/kernel/cpu/amd.c set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); c 988 arch/x86/kernel/cpu/amd.c if (cpu_has(c, X86_FEATURE_IRPERF) && c 989 arch/x86/kernel/cpu/amd.c !cpu_has_amd_erratum(c, amd_erratum_1054)) c 994 arch/x86/kernel/cpu/amd.c static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) c 997 arch/x86/kernel/cpu/amd.c if (c->x86 == 6) { c 999 arch/x86/kernel/cpu/amd.c if (c->x86_model == 3 && c->x86_stepping == 0) c 1002 arch/x86/kernel/cpu/amd.c if (c->x86_model == 4 && c 1003 arch/x86/kernel/cpu/amd.c (c->x86_stepping == 0 || c->x86_stepping == 1)) c 1010 arch/x86/kernel/cpu/amd.c static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) c 1015 arch/x86/kernel/cpu/amd.c if (c->x86 < 0xf) c 1018 arch/x86/kernel/cpu/amd.c if (c->extended_cpuid_level < 0x80000006) c 1030 arch/x86/kernel/cpu/amd.c if (c->x86 == 0xf) { c 1047 arch/x86/kernel/cpu/amd.c if (c->x86 == 0x15 && c->x86_model <= 0x1f) { c 1419 arch/x86/kernel/cpu/bugs.c static void override_cache_bits(struct cpuinfo_x86 *c) c 1421 arch/x86/kernel/cpu/bugs.c if (c->x86 != 6) c 1424 arch/x86/kernel/cpu/bugs.c switch (c->x86_model) { c 1438 arch/x86/kernel/cpu/bugs.c if (c->x86_cache_bits < 44) c 1439 arch/x86/kernel/cpu/bugs.c c->x86_cache_bits = 44; c 628 arch/x86/kernel/cpu/cacheinfo.c static int find_num_cache_leaves(struct cpuinfo_x86 *c) c 634 arch/x86/kernel/cpu/cacheinfo.c if (c->x86_vendor == X86_VENDOR_AMD || c 635 arch/x86/kernel/cpu/cacheinfo.c c->x86_vendor == X86_VENDOR_HYGON) c 649 arch/x86/kernel/cpu/cacheinfo.c void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) c 658 arch/x86/kernel/cpu/cacheinfo.c if (c->x86 < 0x17) { c 661 arch/x86/kernel/cpu/cacheinfo.c } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) { c 666 arch/x86/kernel/cpu/cacheinfo.c per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; c 673 arch/x86/kernel/cpu/cacheinfo.c u32 llc_index = find_num_cache_leaves(c) - 1; c 682 arch/x86/kernel/cpu/cacheinfo.c per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; c 687 arch/x86/kernel/cpu/cacheinfo.c void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) c 700 arch/x86/kernel/cpu/cacheinfo.c per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; c 703 arch/x86/kernel/cpu/cacheinfo.c void init_amd_cacheinfo(struct cpuinfo_x86 *c) c 707 arch/x86/kernel/cpu/cacheinfo.c num_cache_leaves = find_num_cache_leaves(c); c 708 arch/x86/kernel/cpu/cacheinfo.c } else if (c->extended_cpuid_level >= 0x80000006) { c 716 arch/x86/kernel/cpu/cacheinfo.c void init_hygon_cacheinfo(struct cpuinfo_x86 *c) c 718 arch/x86/kernel/cpu/cacheinfo.c num_cache_leaves = find_num_cache_leaves(c); c 721 arch/x86/kernel/cpu/cacheinfo.c void init_intel_cacheinfo(struct cpuinfo_x86 *c) c 729 arch/x86/kernel/cpu/cacheinfo.c unsigned int cpu = c->cpu_index; c 732 arch/x86/kernel/cpu/cacheinfo.c if (c->cpuid_level > 3) { c 737 arch/x86/kernel/cpu/cacheinfo.c num_cache_leaves = find_num_cache_leaves(c); c 764 arch/x86/kernel/cpu/cacheinfo.c l2_id = c->apicid & ~((1 << index_msb) - 1); c 770 arch/x86/kernel/cpu/cacheinfo.c l3_id = c->apicid & ~((1 << index_msb) - 1); c 781 arch/x86/kernel/cpu/cacheinfo.c if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { c 788 arch/x86/kernel/cpu/cacheinfo.c if (num_cache_leaves != 0 && c->x86 == 15) c 868 arch/x86/kernel/cpu/cacheinfo.c per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; c 871 arch/x86/kernel/cpu/cacheinfo.c c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); c 874 arch/x86/kernel/cpu/cacheinfo.c cpu_detect_cache_sizes(c); c 941 arch/x86/kernel/cpu/cacheinfo.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 943 arch/x86/kernel/cpu/cacheinfo.c if (c->x86_vendor == X86_VENDOR_AMD || c 944 arch/x86/kernel/cpu/cacheinfo.c c->x86_vendor == X86_VENDOR_HYGON) { c 959 arch/x86/kernel/cpu/cacheinfo.c if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { c 1008 arch/x86/kernel/cpu/cacheinfo.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 1014 arch/x86/kernel/cpu/cacheinfo.c id4_regs->id = c->apicid >> index_msb; c 28 arch/x86/kernel/cpu/centaur.c static void init_c3(struct cpuinfo_x86 *c) c 55 arch/x86/kernel/cpu/centaur.c c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001); c 59 arch/x86/kernel/cpu/centaur.c if (c->x86_model >= 6 && c->x86_model <= 13) { c 63 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_CX8); c 67 arch/x86/kernel/cpu/centaur.c if (c->x86_model >= 6 && c->x86_model < 9) c 68 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_3DNOW); c 70 arch/x86/kernel/cpu/centaur.c if (c->x86 == 0x6 && c->x86_model >= 0xf) { c 71 arch/x86/kernel/cpu/centaur.c c->x86_cache_alignment = c->x86_clflush_size * 2; c 72 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_REP_GOOD); c 75 arch/x86/kernel/cpu/centaur.c cpu_detect_cache_sizes(c); c 99 arch/x86/kernel/cpu/centaur.c static void early_init_centaur(struct cpuinfo_x86 *c) c 101 arch/x86/kernel/cpu/centaur.c switch (c->x86) { c 105 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); c 109 arch/x86/kernel/cpu/centaur.c if (c->x86_model >= 0xf) c 110 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 114 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_SYSENTER32); c 116 arch/x86/kernel/cpu/centaur.c if (c->x86_power & (1 << 8)) { c 117 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 118 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); c 122 arch/x86/kernel/cpu/centaur.c static void centaur_detect_vmx_virtcap(struct cpuinfo_x86 *c) c 130 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); c 132 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_VNMI); c 139 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); c 141 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_EPT); c 143 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_VPID); c 147 arch/x86/kernel/cpu/centaur.c static void init_centaur(struct cpuinfo_x86 *c) c 160 arch/x86/kernel/cpu/centaur.c clear_cpu_cap(c, 0*32+31); c 162 arch/x86/kernel/cpu/centaur.c early_init_centaur(c); c 163 arch/x86/kernel/cpu/centaur.c init_intel_cacheinfo(c); c 164 arch/x86/kernel/cpu/centaur.c detect_num_cpu_cores(c); c 166 arch/x86/kernel/cpu/centaur.c detect_ht(c); c 169 arch/x86/kernel/cpu/centaur.c if (c->cpuid_level > 9) { c 178 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); c 181 arch/x86/kernel/cpu/centaur.c switch (c->x86) { c 184 arch/x86/kernel/cpu/centaur.c switch (c->x86_model) { c 190 arch/x86/kernel/cpu/centaur.c clear_cpu_cap(c, X86_FEATURE_TSC); c 193 arch/x86/kernel/cpu/centaur.c switch (c->x86_stepping) { c 229 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); c 231 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_CX8); c 233 arch/x86/kernel/cpu/centaur.c if (c->x86_model >= 8) c 234 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_3DNOW); c 240 arch/x86/kernel/cpu/centaur.c c->x86_cache_size = (cc>>24)+(dd>>24); c 242 arch/x86/kernel/cpu/centaur.c sprintf(c->x86_model_id, "WinChip %s", name); c 246 arch/x86/kernel/cpu/centaur.c init_c3(c); c 250 arch/x86/kernel/cpu/centaur.c set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); c 253 arch/x86/kernel/cpu/centaur.c if (cpu_has(c, X86_FEATURE_VMX)) c 254 arch/x86/kernel/cpu/centaur.c centaur_detect_vmx_virtcap(c); c 259 arch/x86/kernel/cpu/centaur.c centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) c 262 arch/x86/kernel/cpu/centaur.c if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) c 270 arch/x86/kernel/cpu/centaur.c if ((c->x86 == 6) && (c->x86_model == 9) && c 271 arch/x86/kernel/cpu/centaur.c (c->x86_stepping == 1) && (size == 65)) c 89 arch/x86/kernel/cpu/common.c static void default_init(struct cpuinfo_x86 *c) c 92 arch/x86/kernel/cpu/common.c cpu_detect_cache_sizes(c); c 96 arch/x86/kernel/cpu/common.c if (c->cpuid_level == -1) { c 98 arch/x86/kernel/cpu/common.c if (c->x86 == 4) c 99 arch/x86/kernel/cpu/common.c strcpy(c->x86_model_id, "486"); c 100 arch/x86/kernel/cpu/common.c else if (c->x86 == 3) c 101 arch/x86/kernel/cpu/common.c strcpy(c->x86_model_id, "386"); c 271 arch/x86/kernel/cpu/common.c static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) c 275 arch/x86/kernel/cpu/common.c if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) c 285 arch/x86/kernel/cpu/common.c clear_cpu_cap(c, X86_FEATURE_PN); c 288 arch/x86/kernel/cpu/common.c c->cpuid_level = cpuid_eax(0); c 302 arch/x86/kernel/cpu/common.c static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) c 316 arch/x86/kernel/cpu/common.c static __always_inline void setup_smep(struct cpuinfo_x86 *c) c 318 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_SMEP)) c 329 arch/x86/kernel/cpu/common.c static __always_inline void setup_smap(struct cpuinfo_x86 *c) c 336 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_SMAP)) { c 345 arch/x86/kernel/cpu/common.c static __always_inline void setup_umip(struct cpuinfo_x86 *c) c 352 arch/x86/kernel/cpu/common.c if (!cpu_has(c, X86_FEATURE_UMIP)) c 445 arch/x86/kernel/cpu/common.c static __always_inline void setup_pku(struct cpuinfo_x86 *c) c 453 arch/x86/kernel/cpu/common.c if (!cpu_has(c, X86_FEATURE_PKU)) c 467 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_OSPKE); c 509 arch/x86/kernel/cpu/common.c static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) c 515 arch/x86/kernel/cpu/common.c if (!cpu_has(c, df->feature)) c 525 arch/x86/kernel/cpu/common.c (u32)df->level > (u32)c->extended_cpuid_level : c 526 arch/x86/kernel/cpu/common.c (s32)df->level > (s32)c->cpuid_level)) c 529 arch/x86/kernel/cpu/common.c clear_cpu_cap(c, df->feature); c 546 arch/x86/kernel/cpu/common.c static const char *table_lookup_model(struct cpuinfo_x86 *c) c 551 arch/x86/kernel/cpu/common.c if (c->x86_model >= 16) c 560 arch/x86/kernel/cpu/common.c if (info->family == c->x86) c 561 arch/x86/kernel/cpu/common.c return info->model_names[c->x86_model]; c 623 arch/x86/kernel/cpu/common.c static void get_model_name(struct cpuinfo_x86 *c) c 628 arch/x86/kernel/cpu/common.c if (c->extended_cpuid_level < 0x80000004) c 631 arch/x86/kernel/cpu/common.c v = (unsigned int *)c->x86_model_id; c 635 arch/x86/kernel/cpu/common.c c->x86_model_id[48] = 0; c 638 arch/x86/kernel/cpu/common.c p = q = s = &c->x86_model_id[0]; c 654 arch/x86/kernel/cpu/common.c void detect_num_cpu_cores(struct cpuinfo_x86 *c) c 658 arch/x86/kernel/cpu/common.c c->x86_max_cores = 1; c 659 arch/x86/kernel/cpu/common.c if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) c 664 arch/x86/kernel/cpu/common.c c->x86_max_cores = (eax >> 26) + 1; c 667 arch/x86/kernel/cpu/common.c void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) c 671 arch/x86/kernel/cpu/common.c n = c->extended_cpuid_level; c 675 arch/x86/kernel/cpu/common.c c->x86_cache_size = (ecx>>24) + (edx>>24); c 678 arch/x86/kernel/cpu/common.c c->x86_tlbsize = 0; c 689 arch/x86/kernel/cpu/common.c c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); c 693 arch/x86/kernel/cpu/common.c l2size = this_cpu->legacy_cache_size(c, l2size); c 703 arch/x86/kernel/cpu/common.c c->x86_cache_size = l2size; c 714 arch/x86/kernel/cpu/common.c static void cpu_detect_tlb(struct cpuinfo_x86 *c) c 717 arch/x86/kernel/cpu/common.c this_cpu->c_detect_tlb(c); c 728 arch/x86/kernel/cpu/common.c int detect_ht_early(struct cpuinfo_x86 *c) c 733 arch/x86/kernel/cpu/common.c if (!cpu_has(c, X86_FEATURE_HT)) c 736 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) c 739 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) c 751 arch/x86/kernel/cpu/common.c void detect_ht(struct cpuinfo_x86 *c) c 756 arch/x86/kernel/cpu/common.c if (detect_ht_early(c) < 0) c 760 arch/x86/kernel/cpu/common.c c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); c 762 arch/x86/kernel/cpu/common.c smp_num_siblings = smp_num_siblings / c->x86_max_cores; c 766 arch/x86/kernel/cpu/common.c core_bits = get_count_order(c->x86_max_cores); c 768 arch/x86/kernel/cpu/common.c c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & c 773 arch/x86/kernel/cpu/common.c static void get_cpu_vendor(struct cpuinfo_x86 *c) c 775 arch/x86/kernel/cpu/common.c char *v = c->x86_vendor_id; c 787 arch/x86/kernel/cpu/common.c c->x86_vendor = this_cpu->c_x86_vendor; c 795 arch/x86/kernel/cpu/common.c c->x86_vendor = X86_VENDOR_UNKNOWN; c 799 arch/x86/kernel/cpu/common.c void cpu_detect(struct cpuinfo_x86 *c) c 802 arch/x86/kernel/cpu/common.c cpuid(0x00000000, (unsigned int *)&c->cpuid_level, c 803 arch/x86/kernel/cpu/common.c (unsigned int *)&c->x86_vendor_id[0], c 804 arch/x86/kernel/cpu/common.c (unsigned int *)&c->x86_vendor_id[8], c 805 arch/x86/kernel/cpu/common.c (unsigned int *)&c->x86_vendor_id[4]); c 807 arch/x86/kernel/cpu/common.c c->x86 = 4; c 809 arch/x86/kernel/cpu/common.c if (c->cpuid_level >= 0x00000001) { c 813 arch/x86/kernel/cpu/common.c c->x86 = x86_family(tfms); c 814 arch/x86/kernel/cpu/common.c c->x86_model = x86_model(tfms); c 815 arch/x86/kernel/cpu/common.c c->x86_stepping = x86_stepping(tfms); c 818 arch/x86/kernel/cpu/common.c c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; c 819 arch/x86/kernel/cpu/common.c c->x86_cache_alignment = c->x86_clflush_size; c 824 arch/x86/kernel/cpu/common.c static void apply_forced_caps(struct cpuinfo_x86 *c) c 829 arch/x86/kernel/cpu/common.c c->x86_capability[i] &= ~cpu_caps_cleared[i]; c 830 arch/x86/kernel/cpu/common.c c->x86_capability[i] |= cpu_caps_set[i]; c 834 arch/x86/kernel/cpu/common.c static void init_speculation_control(struct cpuinfo_x86 *c) c 842 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { c 843 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_IBRS); c 844 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_IBPB); c 845 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); c 848 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) c 849 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_STIBP); c 851 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || c 852 arch/x86/kernel/cpu/common.c cpu_has(c, X86_FEATURE_VIRT_SSBD)) c 853 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_SSBD); c 855 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { c 856 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_IBRS); c 857 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); c 860 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_AMD_IBPB)) c 861 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_IBPB); c 863 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { c 864 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_STIBP); c 865 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); c 868 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { c 869 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_SSBD); c 870 arch/x86/kernel/cpu/common.c set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); c 871 arch/x86/kernel/cpu/common.c clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); c 875 arch/x86/kernel/cpu/common.c static void init_cqm(struct cpuinfo_x86 *c) c 877 arch/x86/kernel/cpu/common.c if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { c 878 arch/x86/kernel/cpu/common.c c->x86_cache_max_rmid = -1; c 879 arch/x86/kernel/cpu/common.c c->x86_cache_occ_scale = -1; c 884 arch/x86/kernel/cpu/common.c c->x86_cache_max_rmid = cpuid_ebx(0xf); c 886 arch/x86/kernel/cpu/common.c if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || c 887 arch/x86/kernel/cpu/common.c cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || c 888 arch/x86/kernel/cpu/common.c cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { c 894 arch/x86/kernel/cpu/common.c c->x86_cache_max_rmid = ecx; c 895 arch/x86/kernel/cpu/common.c c->x86_cache_occ_scale = ebx; c 899 arch/x86/kernel/cpu/common.c void get_cpu_cap(struct cpuinfo_x86 *c) c 904 arch/x86/kernel/cpu/common.c if (c->cpuid_level >= 0x00000001) { c 907 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_1_ECX] = ecx; c 908 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_1_EDX] = edx; c 912 arch/x86/kernel/cpu/common.c if (c->cpuid_level >= 0x00000006) c 913 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); c 916 arch/x86/kernel/cpu/common.c if (c->cpuid_level >= 0x00000007) { c 918 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_7_0_EBX] = ebx; c 919 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_7_ECX] = ecx; c 920 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_7_EDX] = edx; c 925 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_7_1_EAX] = eax; c 930 arch/x86/kernel/cpu/common.c if (c->cpuid_level >= 0x0000000d) { c 933 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_D_1_EAX] = eax; c 938 arch/x86/kernel/cpu/common.c c->extended_cpuid_level = eax; c 944 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_8000_0001_ECX] = ecx; c 945 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_8000_0001_EDX] = edx; c 949 arch/x86/kernel/cpu/common.c if (c->extended_cpuid_level >= 0x80000007) { c 952 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_8000_0007_EBX] = ebx; c 953 arch/x86/kernel/cpu/common.c c->x86_power = edx; c 956 arch/x86/kernel/cpu/common.c if (c->extended_cpuid_level >= 0x80000008) { c 958 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_8000_0008_EBX] = ebx; c 961 arch/x86/kernel/cpu/common.c if (c->extended_cpuid_level >= 0x8000000a) c 962 arch/x86/kernel/cpu/common.c c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); c 964 arch/x86/kernel/cpu/common.c init_scattered_cpuid_features(c); c 965 arch/x86/kernel/cpu/common.c init_speculation_control(c); c 966 arch/x86/kernel/cpu/common.c init_cqm(c); c 973 arch/x86/kernel/cpu/common.c apply_forced_caps(c); c 976 arch/x86/kernel/cpu/common.c void get_cpu_address_sizes(struct cpuinfo_x86 *c) c 980 arch/x86/kernel/cpu/common.c if (c->extended_cpuid_level >= 0x80000008) { c 983 arch/x86/kernel/cpu/common.c c->x86_virt_bits = (eax >> 8) & 0xff; c 984 arch/x86/kernel/cpu/common.c c->x86_phys_bits = eax & 0xff; c 987 arch/x86/kernel/cpu/common.c else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) c 988 arch/x86/kernel/cpu/common.c c->x86_phys_bits = 36; c 990 arch/x86/kernel/cpu/common.c c->x86_cache_bits = c->x86_phys_bits; c 993 arch/x86/kernel/cpu/common.c static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) c 1003 arch/x86/kernel/cpu/common.c c->x86 = 4; c 1005 arch/x86/kernel/cpu/common.c c->x86 = 3; c 1009 arch/x86/kernel/cpu/common.c c->x86_vendor_id[0] = 0; c 1010 arch/x86/kernel/cpu/common.c cpu_devs[i]->c_identify(c); c 1011 arch/x86/kernel/cpu/common.c if (c->x86_vendor_id[0]) { c 1012 arch/x86/kernel/cpu/common.c get_cpu_vendor(c); c 1134 arch/x86/kernel/cpu/common.c static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) c 1153 arch/x86/kernel/cpu/common.c !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) c 1180 arch/x86/kernel/cpu/common.c (cpu_has(c, X86_FEATURE_RTM) || c 1188 arch/x86/kernel/cpu/common.c if ((cpu_has(c, X86_FEATURE_RDRAND) || c 1189 arch/x86/kernel/cpu/common.c cpu_has(c, X86_FEATURE_RDSEED)) && c 1235 arch/x86/kernel/cpu/common.c static void __init early_identify_cpu(struct cpuinfo_x86 *c) c 1238 arch/x86/kernel/cpu/common.c c->x86_clflush_size = 64; c 1239 arch/x86/kernel/cpu/common.c c->x86_phys_bits = 36; c 1240 arch/x86/kernel/cpu/common.c c->x86_virt_bits = 48; c 1242 arch/x86/kernel/cpu/common.c c->x86_clflush_size = 32; c 1243 arch/x86/kernel/cpu/common.c c->x86_phys_bits = 32; c 1244 arch/x86/kernel/cpu/common.c c->x86_virt_bits = 32; c 1246 arch/x86/kernel/cpu/common.c c->x86_cache_alignment = c->x86_clflush_size; c 1248 arch/x86/kernel/cpu/common.c memset(&c->x86_capability, 0, sizeof(c->x86_capability)); c 1249 arch/x86/kernel/cpu/common.c c->extended_cpuid_level = 0; c 1252 arch/x86/kernel/cpu/common.c identify_cpu_without_cpuid(c); c 1256 arch/x86/kernel/cpu/common.c cpu_detect(c); c 1257 arch/x86/kernel/cpu/common.c get_cpu_vendor(c); c 1258 arch/x86/kernel/cpu/common.c get_cpu_cap(c); c 1259 arch/x86/kernel/cpu/common.c get_cpu_address_sizes(c); c 1263 arch/x86/kernel/cpu/common.c this_cpu->c_early_init(c); c 1265 arch/x86/kernel/cpu/common.c c->cpu_index = 0; c 1266 arch/x86/kernel/cpu/common.c filter_cpuid_features(c, false); c 1269 arch/x86/kernel/cpu/common.c this_cpu->c_bsp_init(c); c 1276 arch/x86/kernel/cpu/common.c cpu_set_bug_bits(c); c 1278 arch/x86/kernel/cpu/common.c fpu__init_system(c); c 1339 arch/x86/kernel/cpu/common.c static void detect_null_seg_behavior(struct cpuinfo_x86 *c) c 1363 arch/x86/kernel/cpu/common.c set_cpu_bug(c, X86_BUG_NULL_SEG); c 1368 arch/x86/kernel/cpu/common.c static void generic_identify(struct cpuinfo_x86 *c) c 1370 arch/x86/kernel/cpu/common.c c->extended_cpuid_level = 0; c 1373 arch/x86/kernel/cpu/common.c identify_cpu_without_cpuid(c); c 1379 arch/x86/kernel/cpu/common.c cpu_detect(c); c 1381 arch/x86/kernel/cpu/common.c get_cpu_vendor(c); c 1383 arch/x86/kernel/cpu/common.c get_cpu_cap(c); c 1385 arch/x86/kernel/cpu/common.c get_cpu_address_sizes(c); c 1387 arch/x86/kernel/cpu/common.c if (c->cpuid_level >= 0x00000001) { c 1388 arch/x86/kernel/cpu/common.c c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; c 1391 arch/x86/kernel/cpu/common.c c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c 1393 arch/x86/kernel/cpu/common.c c->apicid = c->initial_apicid; c 1396 arch/x86/kernel/cpu/common.c c->phys_proc_id = c->initial_apicid; c 1399 arch/x86/kernel/cpu/common.c get_model_name(c); /* Default name */ c 1401 arch/x86/kernel/cpu/common.c detect_null_seg_behavior(c); c 1421 arch/x86/kernel/cpu/common.c set_cpu_bug(c, X86_BUG_ESPFIX); c 1424 arch/x86/kernel/cpu/common.c set_cpu_bug(c, X86_BUG_ESPFIX); c 1429 arch/x86/kernel/cpu/common.c static void x86_init_cache_qos(struct cpuinfo_x86 *c) c 1436 arch/x86/kernel/cpu/common.c if (c != &boot_cpu_data) { c 1439 arch/x86/kernel/cpu/common.c c->x86_cache_max_rmid); c 1447 arch/x86/kernel/cpu/common.c static void validate_apic_and_package_id(struct cpuinfo_x86 *c) c 1454 arch/x86/kernel/cpu/common.c if (apicid != c->apicid) { c 1456 arch/x86/kernel/cpu/common.c cpu, apicid, c->initial_apicid); c 1458 arch/x86/kernel/cpu/common.c BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); c 1459 arch/x86/kernel/cpu/common.c BUG_ON(topology_update_die_map(c->cpu_die_id, cpu)); c 1461 arch/x86/kernel/cpu/common.c c->logical_proc_id = 0; c 1468 arch/x86/kernel/cpu/common.c static void identify_cpu(struct cpuinfo_x86 *c) c 1472 arch/x86/kernel/cpu/common.c c->loops_per_jiffy = loops_per_jiffy; c 1473 arch/x86/kernel/cpu/common.c c->x86_cache_size = 0; c 1474 arch/x86/kernel/cpu/common.c c->x86_vendor = X86_VENDOR_UNKNOWN; c 1475 arch/x86/kernel/cpu/common.c c->x86_model = c->x86_stepping = 0; /* So far unknown... */ c 1476 arch/x86/kernel/cpu/common.c c->x86_vendor_id[0] = '\0'; /* Unset */ c 1477 arch/x86/kernel/cpu/common.c c->x86_model_id[0] = '\0'; /* Unset */ c 1478 arch/x86/kernel/cpu/common.c c->x86_max_cores = 1; c 1479 arch/x86/kernel/cpu/common.c c->x86_coreid_bits = 0; c 1480 arch/x86/kernel/cpu/common.c c->cu_id = 0xff; c 1482 arch/x86/kernel/cpu/common.c c->x86_clflush_size = 64; c 1483 arch/x86/kernel/cpu/common.c c->x86_phys_bits = 36; c 1484 arch/x86/kernel/cpu/common.c c->x86_virt_bits = 48; c 1486 arch/x86/kernel/cpu/common.c c->cpuid_level = -1; /* CPUID not detected */ c 1487 arch/x86/kernel/cpu/common.c c->x86_clflush_size = 32; c 1488 arch/x86/kernel/cpu/common.c c->x86_phys_bits = 32; c 1489 arch/x86/kernel/cpu/common.c c->x86_virt_bits = 32; c 1491 arch/x86/kernel/cpu/common.c c->x86_cache_alignment = c->x86_clflush_size; c 1492 arch/x86/kernel/cpu/common.c memset(&c->x86_capability, 0, sizeof(c->x86_capability)); c 1494 arch/x86/kernel/cpu/common.c generic_identify(c); c 1497 arch/x86/kernel/cpu/common.c this_cpu->c_identify(c); c 1500 arch/x86/kernel/cpu/common.c apply_forced_caps(c); c 1503 arch/x86/kernel/cpu/common.c c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c 1517 arch/x86/kernel/cpu/common.c this_cpu->c_init(c); c 1520 arch/x86/kernel/cpu/common.c squash_the_stupid_serial_number(c); c 1523 arch/x86/kernel/cpu/common.c setup_smep(c); c 1524 arch/x86/kernel/cpu/common.c setup_smap(c); c 1525 arch/x86/kernel/cpu/common.c setup_umip(c); c 1533 arch/x86/kernel/cpu/common.c filter_cpuid_features(c, true); c 1536 arch/x86/kernel/cpu/common.c if (!c->x86_model_id[0]) { c 1538 arch/x86/kernel/cpu/common.c p = table_lookup_model(c); c 1540 arch/x86/kernel/cpu/common.c strcpy(c->x86_model_id, p); c 1543 arch/x86/kernel/cpu/common.c sprintf(c->x86_model_id, "%02x/%02x", c 1544 arch/x86/kernel/cpu/common.c c->x86, c->x86_model); c 1548 arch/x86/kernel/cpu/common.c detect_ht(c); c 1551 arch/x86/kernel/cpu/common.c x86_init_rdrand(c); c 1552 arch/x86/kernel/cpu/common.c x86_init_cache_qos(c); c 1553 arch/x86/kernel/cpu/common.c setup_pku(c); c 1559 arch/x86/kernel/cpu/common.c apply_forced_caps(c); c 1567 arch/x86/kernel/cpu/common.c if (c != &boot_cpu_data) { c 1570 arch/x86/kernel/cpu/common.c boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; c 1574 arch/x86/kernel/cpu/common.c c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; c 1578 arch/x86/kernel/cpu/common.c mcheck_cpu_init(c); c 1580 arch/x86/kernel/cpu/common.c select_idle_routine(c); c 1630 arch/x86/kernel/cpu/common.c void identify_secondary_cpu(struct cpuinfo_x86 *c) c 1632 arch/x86/kernel/cpu/common.c BUG_ON(c == &boot_cpu_data); c 1633 arch/x86/kernel/cpu/common.c identify_cpu(c); c 1638 arch/x86/kernel/cpu/common.c validate_apic_and_package_id(c); c 1651 arch/x86/kernel/cpu/common.c void print_cpu_info(struct cpuinfo_x86 *c) c 1655 arch/x86/kernel/cpu/common.c if (c->x86_vendor < X86_VENDOR_NUM) { c 1658 arch/x86/kernel/cpu/common.c if (c->cpuid_level >= 0) c 1659 arch/x86/kernel/cpu/common.c vendor = c->x86_vendor_id; c 1662 arch/x86/kernel/cpu/common.c if (vendor && !strstr(c->x86_model_id, vendor)) c 1665 arch/x86/kernel/cpu/common.c if (c->x86_model_id[0]) c 1666 arch/x86/kernel/cpu/common.c pr_cont("%s", c->x86_model_id); c 1668 arch/x86/kernel/cpu/common.c pr_cont("%d86", c->x86); c 1670 arch/x86/kernel/cpu/common.c pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); c 1672 arch/x86/kernel/cpu/common.c if (c->x86_stepping || c->cpuid_level >= 0) c 1673 arch/x86/kernel/cpu/common.c pr_cont(", stepping: 0x%x)\n", c->x86_stepping); c 63 arch/x86/kernel/cpu/cpu.h extern void get_cpu_cap(struct cpuinfo_x86 *c); c 64 arch/x86/kernel/cpu/cpu.h extern void get_cpu_address_sizes(struct cpuinfo_x86 *c); c 65 arch/x86/kernel/cpu/cpu.h extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); c 66 arch/x86/kernel/cpu/cpu.h extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); c 67 arch/x86/kernel/cpu/cpu.h extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); c 68 arch/x86/kernel/cpu/cpu.h extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); c 69 arch/x86/kernel/cpu/cpu.h extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c); c 71 arch/x86/kernel/cpu/cpu.h extern void detect_num_cpu_cores(struct cpuinfo_x86 *c); c 72 arch/x86/kernel/cpu/cpu.h extern int detect_extended_topology_early(struct cpuinfo_x86 *c); c 73 arch/x86/kernel/cpu/cpu.h extern int detect_extended_topology(struct cpuinfo_x86 *c); c 74 arch/x86/kernel/cpu/cpu.h extern int detect_ht_early(struct cpuinfo_x86 *c); c 75 arch/x86/kernel/cpu/cpu.h extern void detect_ht(struct cpuinfo_x86 *c); c 75 arch/x86/kernel/cpu/cpuid-deps.c static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature) c 82 arch/x86/kernel/cpu/cpuid-deps.c if (!c) { c 86 arch/x86/kernel/cpu/cpuid-deps.c clear_bit(feature, (unsigned long *)c->x86_capability); c 93 arch/x86/kernel/cpu/cpuid-deps.c static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) c 102 arch/x86/kernel/cpu/cpuid-deps.c clear_feature(c, feature); c 118 arch/x86/kernel/cpu/cpuid-deps.c clear_feature(c, d->feature); c 123 arch/x86/kernel/cpu/cpuid-deps.c void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) c 125 arch/x86/kernel/cpu/cpuid-deps.c do_clear_cpu_cap(c, feature); c 93 arch/x86/kernel/cpu/cyrix.c static void check_cx686_slop(struct cpuinfo_x86 *c) c 112 arch/x86/kernel/cpu/cyrix.c c->loops_per_jiffy = loops_per_jiffy; c 172 arch/x86/kernel/cpu/cyrix.c static void early_init_cyrix(struct cpuinfo_x86 *c) c 182 arch/x86/kernel/cpu/cyrix.c set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); c 186 arch/x86/kernel/cpu/cyrix.c set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); c 191 arch/x86/kernel/cpu/cyrix.c static void init_cyrix(struct cpuinfo_x86 *c) c 194 arch/x86/kernel/cpu/cyrix.c char *buf = c->x86_model_id; c 201 arch/x86/kernel/cpu/cyrix.c clear_cpu_cap(c, 0*32+31); c 204 arch/x86/kernel/cpu/cyrix.c if (test_cpu_cap(c, 1*32+24)) { c 205 arch/x86/kernel/cpu/cyrix.c clear_cpu_cap(c, 1*32+24); c 206 arch/x86/kernel/cpu/cyrix.c set_cpu_cap(c, X86_FEATURE_CXMMX); c 211 arch/x86/kernel/cpu/cyrix.c check_cx686_slop(c); c 217 arch/x86/kernel/cpu/cyrix.c c->x86_model = (dir1 >> 4) + 1; c 218 arch/x86/kernel/cpu/cyrix.c c->x86_stepping = dir1 & 0xf; c 249 arch/x86/kernel/cpu/cyrix.c (c->x86_model)++; c 253 arch/x86/kernel/cpu/cyrix.c set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); c 255 arch/x86/kernel/cpu/cyrix.c set_cpu_bug(c, X86_BUG_COMA); c 294 arch/x86/kernel/cpu/cyrix.c c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */ c 297 arch/x86/kernel/cpu/cyrix.c if (c->cpuid_level == 2) { c 314 arch/x86/kernel/cpu/cyrix.c c->x86_model = (dir1 & 0x20) ? 1 : 2; c 325 arch/x86/kernel/cpu/cyrix.c set_cpu_bug(c, X86_BUG_COMA); c 331 arch/x86/kernel/cpu/cyrix.c (c->x86_model)++; c 333 arch/x86/kernel/cpu/cyrix.c set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); c 363 arch/x86/kernel/cpu/cyrix.c static void init_nsc(struct cpuinfo_x86 *c) c 380 arch/x86/kernel/cpu/cyrix.c if (c->x86 == 5 && c->x86_model == 5) c 381 arch/x86/kernel/cpu/cyrix.c cpu_detect_cache_sizes(c); c 383 arch/x86/kernel/cpu/cyrix.c init_cyrix(c); c 412 arch/x86/kernel/cpu/cyrix.c static void cyrix_identify(struct cpuinfo_x86 *c) c 415 arch/x86/kernel/cpu/cyrix.c if (c->x86 == 4 && test_cyrix_52div()) { c 418 arch/x86/kernel/cpu/cyrix.c strcpy(c->x86_vendor_id, "CyrixInstead"); c 419 arch/x86/kernel/cpu/cyrix.c c->x86_vendor = X86_VENDOR_CYRIX; c 53 arch/x86/kernel/cpu/hygon.c static void hygon_get_topology_early(struct cpuinfo_x86 *c) c 55 arch/x86/kernel/cpu/hygon.c if (cpu_has(c, X86_FEATURE_TOPOEXT)) c 65 arch/x86/kernel/cpu/hygon.c static void hygon_get_topology(struct cpuinfo_x86 *c) c 79 arch/x86/kernel/cpu/hygon.c c->cpu_core_id = ebx & 0xff; c 82 arch/x86/kernel/cpu/hygon.c c->x86_max_cores /= smp_num_siblings; c 88 arch/x86/kernel/cpu/hygon.c err = detect_extended_topology(c); c 90 arch/x86/kernel/cpu/hygon.c c->x86_coreid_bits = get_count_order(c->x86_max_cores); c 93 arch/x86/kernel/cpu/hygon.c c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; c 95 arch/x86/kernel/cpu/hygon.c cacheinfo_hygon_init_llc_id(c, cpu, node_id); c 96 arch/x86/kernel/cpu/hygon.c } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { c 107 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_AMD_DCM); c 114 arch/x86/kernel/cpu/hygon.c static void hygon_detect_cmp(struct cpuinfo_x86 *c) c 119 arch/x86/kernel/cpu/hygon.c bits = c->x86_coreid_bits; c 121 arch/x86/kernel/cpu/hygon.c c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); c 123 arch/x86/kernel/cpu/hygon.c c->phys_proc_id = c->initial_apicid >> bits; c 125 arch/x86/kernel/cpu/hygon.c per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; c 128 arch/x86/kernel/cpu/hygon.c static void srat_detect_node(struct cpuinfo_x86 *c) c 133 arch/x86/kernel/cpu/hygon.c unsigned int apicid = c->apicid; c 145 arch/x86/kernel/cpu/hygon.c x86_cpuinit.fixup_cpu_id(c, node); c 166 arch/x86/kernel/cpu/hygon.c int ht_nodeid = c->initial_apicid; c 178 arch/x86/kernel/cpu/hygon.c static void early_init_hygon_mc(struct cpuinfo_x86 *c) c 184 arch/x86/kernel/cpu/hygon.c if (c->extended_cpuid_level < 0x80000008) c 189 arch/x86/kernel/cpu/hygon.c c->x86_max_cores = (ecx & 0xff) + 1; c 196 arch/x86/kernel/cpu/hygon.c while ((1 << bits) < c->x86_max_cores) c 200 arch/x86/kernel/cpu/hygon.c c->x86_coreid_bits = bits; c 204 arch/x86/kernel/cpu/hygon.c static void bsp_init_hygon(struct cpuinfo_x86 *c) c 223 arch/x86/kernel/cpu/hygon.c if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { c 231 arch/x86/kernel/cpu/hygon.c if (cpu_has(c, X86_FEATURE_MWAITX)) c 260 arch/x86/kernel/cpu/hygon.c static void early_init_hygon(struct cpuinfo_x86 *c) c 264 arch/x86/kernel/cpu/hygon.c early_init_hygon_mc(c); c 266 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_K8); c 268 arch/x86/kernel/cpu/hygon.c rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); c 274 arch/x86/kernel/cpu/hygon.c if (c->x86_power & (1 << 8)) { c 275 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 276 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); c 280 arch/x86/kernel/cpu/hygon.c if (c->x86_power & BIT(12)) c 281 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_ACC_POWER); c 284 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_SYSCALL32); c 293 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_EXTD_APICID); c 301 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_VMMCALL); c 303 arch/x86/kernel/cpu/hygon.c hygon_get_topology_early(c); c 306 arch/x86/kernel/cpu/hygon.c static void init_hygon(struct cpuinfo_x86 *c) c 308 arch/x86/kernel/cpu/hygon.c early_init_hygon(c); c 314 arch/x86/kernel/cpu/hygon.c clear_cpu_cap(c, 0*32+31); c 316 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_REP_GOOD); c 319 arch/x86/kernel/cpu/hygon.c c->apicid = hard_smp_processor_id(); c 321 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_ZEN); c 322 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_CPB); c 324 arch/x86/kernel/cpu/hygon.c cpu_detect_cache_sizes(c); c 326 arch/x86/kernel/cpu/hygon.c hygon_detect_cmp(c); c 327 arch/x86/kernel/cpu/hygon.c hygon_get_topology(c); c 328 arch/x86/kernel/cpu/hygon.c srat_detect_node(c); c 330 arch/x86/kernel/cpu/hygon.c init_hygon_cacheinfo(c); c 332 arch/x86/kernel/cpu/hygon.c if (cpu_has(c, X86_FEATURE_XMM2)) { c 343 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); c 349 arch/x86/kernel/cpu/hygon.c set_cpu_cap(c, X86_FEATURE_ARAT); c 352 arch/x86/kernel/cpu/hygon.c if (!cpu_has(c, X86_FEATURE_XENPV)) c 353 arch/x86/kernel/cpu/hygon.c set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); c 356 arch/x86/kernel/cpu/hygon.c static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) c 361 arch/x86/kernel/cpu/hygon.c if (c->extended_cpuid_level < 0x80000006) c 48 arch/x86/kernel/cpu/intel.c void check_mpx_erratum(struct cpuinfo_x86 *c) c 63 arch/x86/kernel/cpu/intel.c if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) { c 76 arch/x86/kernel/cpu/intel.c static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c) c 78 arch/x86/kernel/cpu/intel.c switch (c->x86_model) { c 104 arch/x86/kernel/cpu/intel.c static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) c 110 arch/x86/kernel/cpu/intel.c if (c->x86 != 6) c 112 arch/x86/kernel/cpu/intel.c switch (c->x86_model) { c 123 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_RING3MWAIT); c 127 arch/x86/kernel/cpu/intel.c if (c == &boot_cpu_data) c 168 arch/x86/kernel/cpu/intel.c static bool bad_spectre_microcode(struct cpuinfo_x86 *c) c 176 arch/x86/kernel/cpu/intel.c if (cpu_has(c, X86_FEATURE_HYPERVISOR)) c 179 arch/x86/kernel/cpu/intel.c if (c->x86 != 6) c 183 arch/x86/kernel/cpu/intel.c if (c->x86_model == spectre_bad_microcodes[i].model && c 184 arch/x86/kernel/cpu/intel.c c->x86_stepping == spectre_bad_microcodes[i].stepping) c 185 arch/x86/kernel/cpu/intel.c return (c->microcode <= spectre_bad_microcodes[i].microcode); c 190 arch/x86/kernel/cpu/intel.c static void early_init_intel(struct cpuinfo_x86 *c) c 195 arch/x86/kernel/cpu/intel.c if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { c 198 arch/x86/kernel/cpu/intel.c c->cpuid_level = cpuid_eax(0); c 199 arch/x86/kernel/cpu/intel.c get_cpu_cap(c); c 203 arch/x86/kernel/cpu/intel.c if ((c->x86 == 0xf && c->x86_model >= 0x03) || c 204 arch/x86/kernel/cpu/intel.c (c->x86 == 0x6 && c->x86_model >= 0x0e)) c 205 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 207 arch/x86/kernel/cpu/intel.c if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) c 208 arch/x86/kernel/cpu/intel.c c->microcode = intel_get_microcode_revision(); c 211 arch/x86/kernel/cpu/intel.c if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || c 212 arch/x86/kernel/cpu/intel.c cpu_has(c, X86_FEATURE_INTEL_STIBP) || c 213 arch/x86/kernel/cpu/intel.c cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || c 214 arch/x86/kernel/cpu/intel.c cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { c 234 arch/x86/kernel/cpu/intel.c if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && c 235 arch/x86/kernel/cpu/intel.c c->microcode < 0x20e) { c 237 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_PSE); c 241 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_SYSENTER32); c 244 arch/x86/kernel/cpu/intel.c if (c->x86 == 15 && c->x86_cache_alignment == 64) c 245 arch/x86/kernel/cpu/intel.c c->x86_cache_alignment = 128; c 249 arch/x86/kernel/cpu/intel.c if (c->x86 == 0xF && c->x86_model == 0x3 c 250 arch/x86/kernel/cpu/intel.c && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) c 251 arch/x86/kernel/cpu/intel.c c->x86_phys_bits = 36; c 260 arch/x86/kernel/cpu/intel.c if (c->x86_power & (1 << 8)) { c 261 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 262 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); c 266 arch/x86/kernel/cpu/intel.c if (c->x86 == 6) { c 267 arch/x86/kernel/cpu/intel.c switch (c->x86_model) { c 272 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); c 289 arch/x86/kernel/cpu/intel.c if (c->x86 == 6 && c->x86_model < 15) c 290 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_PAT); c 296 arch/x86/kernel/cpu/intel.c if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { c 315 arch/x86/kernel/cpu/intel.c if (c->x86 == 5 && c->x86_model == 9) { c 320 arch/x86/kernel/cpu/intel.c if (c->cpuid_level >= 0x00000001) { c 330 arch/x86/kernel/cpu/intel.c c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); c 333 arch/x86/kernel/cpu/intel.c check_mpx_erratum(c); c 334 arch/x86/kernel/cpu/intel.c check_memory_type_self_snoop_errata(c); c 340 arch/x86/kernel/cpu/intel.c if (detect_extended_topology_early(c) < 0) c 341 arch/x86/kernel/cpu/intel.c detect_ht_early(c); c 364 arch/x86/kernel/cpu/intel.c static void intel_smp_check(struct cpuinfo_x86 *c) c 367 arch/x86/kernel/cpu/intel.c if (!c->cpu_index) c 373 arch/x86/kernel/cpu/intel.c if (c->x86 == 5 && c 374 arch/x86/kernel/cpu/intel.c c->x86_stepping >= 1 && c->x86_stepping <= 4 && c 375 arch/x86/kernel/cpu/intel.c c->x86_model <= 3) { c 392 arch/x86/kernel/cpu/intel.c static void intel_workarounds(struct cpuinfo_x86 *c) c 401 arch/x86/kernel/cpu/intel.c clear_cpu_bug(c, X86_BUG_F00F); c 402 arch/x86/kernel/cpu/intel.c if (c->x86 == 5 && c->x86_model < 9) { c 405 arch/x86/kernel/cpu/intel.c set_cpu_bug(c, X86_BUG_F00F); c 417 arch/x86/kernel/cpu/intel.c if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) c 418 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_SEP); c 427 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_PAE); c 435 arch/x86/kernel/cpu/intel.c if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { c 449 arch/x86/kernel/cpu/intel.c if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && c 450 arch/x86/kernel/cpu/intel.c (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) c 451 arch/x86/kernel/cpu/intel.c set_cpu_bug(c, X86_BUG_11AP); c 458 arch/x86/kernel/cpu/intel.c switch (c->x86) { c 472 arch/x86/kernel/cpu/intel.c intel_smp_check(c); c 475 arch/x86/kernel/cpu/intel.c static void intel_workarounds(struct cpuinfo_x86 *c) c 480 arch/x86/kernel/cpu/intel.c static void srat_detect_node(struct cpuinfo_x86 *c) c 497 arch/x86/kernel/cpu/intel.c static void detect_vmx_virtcap(struct cpuinfo_x86 *c) c 511 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); c 512 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_VNMI); c 513 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); c 514 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_EPT); c 515 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_VPID); c 516 arch/x86/kernel/cpu/intel.c clear_cpu_cap(c, X86_FEATURE_EPT_AD); c 521 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); c 523 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_VNMI); c 530 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); c 532 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_EPT); c 536 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_EPT_AD); c 539 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_VPID); c 563 arch/x86/kernel/cpu/intel.c static void detect_tme(struct cpuinfo_x86 *c) c 624 arch/x86/kernel/cpu/intel.c c->x86_phys_bits -= keyid_bits; c 627 arch/x86/kernel/cpu/intel.c static void init_cpuid_fault(struct cpuinfo_x86 *c) c 633 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); c 637 arch/x86/kernel/cpu/intel.c static void init_intel_misc_features(struct cpuinfo_x86 *c) c 648 arch/x86/kernel/cpu/intel.c init_cpuid_fault(c); c 649 arch/x86/kernel/cpu/intel.c probe_xeon_phi_r3mwait(c); c 655 arch/x86/kernel/cpu/intel.c static void init_intel(struct cpuinfo_x86 *c) c 657 arch/x86/kernel/cpu/intel.c early_init_intel(c); c 659 arch/x86/kernel/cpu/intel.c intel_workarounds(c); c 666 arch/x86/kernel/cpu/intel.c detect_extended_topology(c); c 668 arch/x86/kernel/cpu/intel.c if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { c 673 arch/x86/kernel/cpu/intel.c detect_num_cpu_cores(c); c 675 arch/x86/kernel/cpu/intel.c detect_ht(c); c 679 arch/x86/kernel/cpu/intel.c init_intel_cacheinfo(c); c 681 arch/x86/kernel/cpu/intel.c if (c->cpuid_level > 9) { c 685 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); c 688 arch/x86/kernel/cpu/intel.c if (cpu_has(c, X86_FEATURE_XMM2)) c 689 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); c 696 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_BTS); c 698 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_PEBS); c 701 arch/x86/kernel/cpu/intel.c if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && c 702 arch/x86/kernel/cpu/intel.c (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) c 703 arch/x86/kernel/cpu/intel.c set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); c 705 arch/x86/kernel/cpu/intel.c if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && c 706 arch/x86/kernel/cpu/intel.c ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT))) c 707 arch/x86/kernel/cpu/intel.c set_cpu_bug(c, X86_BUG_MONITOR); c 710 arch/x86/kernel/cpu/intel.c if (c->x86 == 15) c 711 arch/x86/kernel/cpu/intel.c c->x86_cache_alignment = c->x86_clflush_size * 2; c 712 arch/x86/kernel/cpu/intel.c if (c->x86 == 6) c 713 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_REP_GOOD); c 720 arch/x86/kernel/cpu/intel.c if (c->x86 == 6) { c 721 arch/x86/kernel/cpu/intel.c unsigned int l2 = c->x86_cache_size; c 724 arch/x86/kernel/cpu/intel.c switch (c->x86_model) { c 735 arch/x86/kernel/cpu/intel.c else if (c->x86_stepping == 0 || c->x86_stepping == 5) c 746 arch/x86/kernel/cpu/intel.c strcpy(c->x86_model_id, p); c 749 arch/x86/kernel/cpu/intel.c if (c->x86 == 15) c 750 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_P4); c 751 arch/x86/kernel/cpu/intel.c if (c->x86 == 6) c 752 arch/x86/kernel/cpu/intel.c set_cpu_cap(c, X86_FEATURE_P3); c 756 arch/x86/kernel/cpu/intel.c srat_detect_node(c); c 758 arch/x86/kernel/cpu/intel.c if (cpu_has(c, X86_FEATURE_VMX)) c 759 arch/x86/kernel/cpu/intel.c detect_vmx_virtcap(c); c 761 arch/x86/kernel/cpu/intel.c if (cpu_has(c, X86_FEATURE_TME)) c 762 arch/x86/kernel/cpu/intel.c detect_tme(c); c 764 arch/x86/kernel/cpu/intel.c init_intel_misc_features(c); c 773 arch/x86/kernel/cpu/intel.c static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) c 781 arch/x86/kernel/cpu/intel.c if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) c 788 arch/x86/kernel/cpu/intel.c if ((c->x86 == 5) && (c->x86_model == 9)) c 942 arch/x86/kernel/cpu/intel.c static void intel_detect_tlb(struct cpuinfo_x86 *c) c 948 arch/x86/kernel/cpu/intel.c if (c->cpuid_level < 2) c 35 arch/x86/kernel/cpu/match.c struct cpuinfo_x86 *c = &boot_cpu_data; c 40 arch/x86/kernel/cpu/match.c if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor) c 42 arch/x86/kernel/cpu/match.c if (m->family != X86_FAMILY_ANY && c->x86 != m->family) c 44 arch/x86/kernel/cpu/match.c if (m->model != X86_MODEL_ANY && c->x86_model != m->model) c 47 arch/x86/kernel/cpu/match.c !(BIT(c->x86_stepping) & m->steppings)) c 49 arch/x86/kernel/cpu/match.c if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature)) c 60 arch/x86/kernel/cpu/match.c struct cpuinfo_x86 *c = &boot_cpu_data; c 64 arch/x86/kernel/cpu/match.c if (c->x86_vendor != m->x86_vendor) c 66 arch/x86/kernel/cpu/match.c if (c->x86 != m->x86_family) c 68 arch/x86/kernel/cpu/match.c if (c->x86_model != m->x86_model) c 70 arch/x86/kernel/cpu/match.c if (c->x86_stepping != m->x86_stepping) c 452 arch/x86/kernel/cpu/mce/amd.c static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) c 568 arch/x86/kernel/cpu/mce/amd.c struct cpuinfo_x86 *c = &boot_cpu_data; c 572 arch/x86/kernel/cpu/mce/amd.c if (c->x86 == 0x17 && c 573 arch/x86/kernel/cpu/mce/amd.c c->x86_model >= 0x10 && c->x86_model <= 0x2F && c 586 arch/x86/kernel/cpu/mce/amd.c void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank) c 593 arch/x86/kernel/cpu/mce/amd.c if (c->x86 == 0x15 && bank == 4) { c 597 arch/x86/kernel/cpu/mce/amd.c } else if (c->x86 == 0x17 && c 598 arch/x86/kernel/cpu/mce/amd.c (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) { c 626 arch/x86/kernel/cpu/mce/amd.c void mce_amd_feature_init(struct cpuinfo_x86 *c) c 637 arch/x86/kernel/cpu/mce/amd.c disable_err_thresholding(c, bank); c 659 arch/x86/kernel/cpu/mce/amd.c deferred_error_interrupt_enable(c); c 462 arch/x86/kernel/cpu/mce/core.c int mce_available(struct cpuinfo_x86 *c) c 466 arch/x86/kernel/cpu/mce/core.c return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); c 1633 arch/x86/kernel/cpu/mce/core.c static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) c 1638 arch/x86/kernel/cpu/mce/core.c if (c->x86_vendor == X86_VENDOR_UNKNOWN) { c 1644 arch/x86/kernel/cpu/mce/core.c if (c->x86_vendor == X86_VENDOR_AMD) { c 1645 arch/x86/kernel/cpu/mce/core.c if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) { c 1653 arch/x86/kernel/cpu/mce/core.c if (c->x86 < 0x11 && cfg->bootlog < 0) { c 1664 arch/x86/kernel/cpu/mce/core.c if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0) c 1671 arch/x86/kernel/cpu/mce/core.c if (c->x86 == 0x15 && c->x86_model <= 0xf) c 1676 arch/x86/kernel/cpu/mce/core.c if (c->x86_vendor == X86_VENDOR_INTEL) { c 1686 arch/x86/kernel/cpu/mce/core.c if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0) c 1693 arch/x86/kernel/cpu/mce/core.c if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && c 1701 arch/x86/kernel/cpu/mce/core.c if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0) c 1704 arch/x86/kernel/cpu/mce/core.c if (c->x86 == 6 && c->x86_model == 45) c 1715 arch/x86/kernel/cpu/mce/core.c static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) c 1717 arch/x86/kernel/cpu/mce/core.c if (c->x86 != 5) c 1720 arch/x86/kernel/cpu/mce/core.c switch (c->x86_vendor) { c 1722 arch/x86/kernel/cpu/mce/core.c intel_p5_mcheck_init(c); c 1726 arch/x86/kernel/cpu/mce/core.c winchip_mcheck_init(c); c 1739 arch/x86/kernel/cpu/mce/core.c static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) c 1741 arch/x86/kernel/cpu/mce/core.c if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { c 1742 arch/x86/kernel/cpu/mce/core.c mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); c 1743 arch/x86/kernel/cpu/mce/core.c mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); c 1744 arch/x86/kernel/cpu/mce/core.c mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); c 1755 arch/x86/kernel/cpu/mce/core.c static void mce_centaur_feature_init(struct cpuinfo_x86 *c) c 1763 arch/x86/kernel/cpu/mce/core.c if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || c 1764 arch/x86/kernel/cpu/mce/core.c c->x86 > 6) { c 1770 arch/x86/kernel/cpu/mce/core.c static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) c 1772 arch/x86/kernel/cpu/mce/core.c switch (c->x86_vendor) { c 1774 arch/x86/kernel/cpu/mce/core.c mce_intel_feature_init(c); c 1779 arch/x86/kernel/cpu/mce/core.c mce_amd_feature_init(c); c 1784 arch/x86/kernel/cpu/mce/core.c mce_hygon_feature_init(c); c 1788 arch/x86/kernel/cpu/mce/core.c mce_centaur_feature_init(c); c 1796 arch/x86/kernel/cpu/mce/core.c static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) c 1798 arch/x86/kernel/cpu/mce/core.c switch (c->x86_vendor) { c 1800 arch/x86/kernel/cpu/mce/core.c mce_intel_feature_clear(c); c 1861 arch/x86/kernel/cpu/mce/core.c void mcheck_cpu_init(struct cpuinfo_x86 *c) c 1866 arch/x86/kernel/cpu/mce/core.c if (__mcheck_cpu_ancient_init(c)) c 1869 arch/x86/kernel/cpu/mce/core.c if (!mce_available(c)) c 1874 arch/x86/kernel/cpu/mce/core.c if (__mcheck_cpu_apply_quirks(c) < 0) { c 1887 arch/x86/kernel/cpu/mce/core.c __mcheck_cpu_init_early(c); c 1889 arch/x86/kernel/cpu/mce/core.c __mcheck_cpu_init_vendor(c); c 1898 arch/x86/kernel/cpu/mce/core.c void mcheck_cpu_clear(struct cpuinfo_x86 *c) c 1903 arch/x86/kernel/cpu/mce/core.c if (!mce_available(c)) c 1910 arch/x86/kernel/cpu/mce/core.c __mcheck_cpu_clear_vendor(c); c 414 arch/x86/kernel/cpu/mce/inject.c struct cpuinfo_x86 *c = &boot_cpu_data; c 417 arch/x86/kernel/cpu/mce/inject.c cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket(); c 470 arch/x86/kernel/cpu/mce/intel.c static void intel_ppin_init(struct cpuinfo_x86 *c) c 479 arch/x86/kernel/cpu/mce/intel.c switch (c->x86_model) { c 504 arch/x86/kernel/cpu/mce/intel.c set_cpu_cap(c, X86_FEATURE_INTEL_PPIN); c 508 arch/x86/kernel/cpu/mce/intel.c void mce_intel_feature_init(struct cpuinfo_x86 *c) c 510 arch/x86/kernel/cpu/mce/intel.c intel_init_thermal(c); c 513 arch/x86/kernel/cpu/mce/intel.c intel_ppin_init(c); c 516 arch/x86/kernel/cpu/mce/intel.c void mce_intel_feature_clear(struct cpuinfo_x86 *c) c 46 arch/x86/kernel/cpu/mce/p5.c void intel_p5_mcheck_init(struct cpuinfo_x86 *c) c 55 arch/x86/kernel/cpu/mce/p5.c if (!cpu_has(c, X86_FEATURE_MCE)) c 45 arch/x86/kernel/cpu/mce/severity.c #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } c 241 arch/x86/kernel/cpu/mce/therm_throt.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 247 arch/x86/kernel/cpu/mce/therm_throt.c if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) c 251 arch/x86/kernel/cpu/mce/therm_throt.c if (cpu_has(c, X86_FEATURE_PTS)) { c 255 arch/x86/kernel/cpu/mce/therm_throt.c if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) c 408 arch/x86/kernel/cpu/mce/therm_throt.c static int intel_thermal_supported(struct cpuinfo_x86 *c) c 412 arch/x86/kernel/cpu/mce/therm_throt.c if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) c 428 arch/x86/kernel/cpu/mce/therm_throt.c void intel_init_thermal(struct cpuinfo_x86 *c) c 434 arch/x86/kernel/cpu/mce/therm_throt.c if (!intel_thermal_supported(c)) c 466 arch/x86/kernel/cpu/mce/therm_throt.c if (cpu_has(c, X86_FEATURE_TM2)) { c 467 arch/x86/kernel/cpu/mce/therm_throt.c if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { c 480 arch/x86/kernel/cpu/mce/therm_throt.c if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) c 484 arch/x86/kernel/cpu/mce/therm_throt.c else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) c 492 arch/x86/kernel/cpu/mce/therm_throt.c if (cpu_has(c, X86_FEATURE_PTS)) { c 494 arch/x86/kernel/cpu/mce/therm_throt.c if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) c 499 arch/x86/kernel/cpu/mce/therm_throt.c else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) c 30 arch/x86/kernel/cpu/mce/winchip.c void winchip_mcheck_init(struct cpuinfo_x86 *c) c 649 arch/x86/kernel/cpu/microcode/amd.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 654 arch/x86/kernel/cpu/microcode/amd.c csig->rev = c->microcode; c 671 arch/x86/kernel/cpu/microcode/amd.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 710 arch/x86/kernel/cpu/microcode/amd.c c->microcode = rev; c 713 arch/x86/kernel/cpu/microcode/amd.c if (c->cpu_index == boot_cpu_data.cpu_index) c 892 arch/x86/kernel/cpu/microcode/amd.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 893 arch/x86/kernel/cpu/microcode/amd.c bool bsp = c->cpu_index == boot_cpu_data.cpu_index; c 901 arch/x86/kernel/cpu/microcode/amd.c if (c->x86 >= 0x15) c 902 arch/x86/kernel/cpu/microcode/amd.c snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); c 913 arch/x86/kernel/cpu/microcode/amd.c ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); c 945 arch/x86/kernel/cpu/microcode/amd.c struct cpuinfo_x86 *c = &boot_cpu_data; c 947 arch/x86/kernel/cpu/microcode/amd.c if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { c 948 arch/x86/kernel/cpu/microcode/amd.c pr_warn("AMD CPU family 0x%x not supported\n", c->x86); c 236 arch/x86/kernel/cpu/microcode/core.c struct cpuinfo_x86 *c = &boot_cpu_data; c 239 arch/x86/kernel/cpu/microcode/core.c switch (c->x86_vendor) { c 241 arch/x86/kernel/cpu/microcode/core.c if (c->x86 >= 6) c 245 arch/x86/kernel/cpu/microcode/core.c if (c->x86 >= 0x10) c 832 arch/x86/kernel/cpu/microcode/core.c struct cpuinfo_x86 *c = &boot_cpu_data; c 838 arch/x86/kernel/cpu/microcode/core.c if (c->x86_vendor == X86_VENDOR_INTEL) c 840 arch/x86/kernel/cpu/microcode/core.c else if (c->x86_vendor == X86_VENDOR_AMD) c 765 arch/x86/kernel/cpu/microcode/intel.c struct cpuinfo_x86 *c = &cpu_data(cpu_num); c 772 arch/x86/kernel/cpu/microcode/intel.c if ((c->x86_model >= 5) || (c->x86 > 6)) { c 778 arch/x86/kernel/cpu/microcode/intel.c csig->rev = c->microcode; c 793 arch/x86/kernel/cpu/microcode/intel.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 852 arch/x86/kernel/cpu/microcode/intel.c c->microcode = rev; c 855 arch/x86/kernel/cpu/microcode/intel.c if (c->cpu_index == boot_cpu_data.cpu_index) c 947 arch/x86/kernel/cpu/microcode/intel.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 955 arch/x86/kernel/cpu/microcode/intel.c if (c->x86 == 6 && c 956 arch/x86/kernel/cpu/microcode/intel.c c->x86_model == INTEL_FAM6_BROADWELL_X && c 957 arch/x86/kernel/cpu/microcode/intel.c c->x86_stepping == 0x01 && c 959 arch/x86/kernel/cpu/microcode/intel.c c->microcode < 0x0b000021) { c 960 arch/x86/kernel/cpu/microcode/intel.c pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); c 971 arch/x86/kernel/cpu/microcode/intel.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 982 arch/x86/kernel/cpu/microcode/intel.c c->x86, c->x86_model, c->x86_stepping); c 1022 arch/x86/kernel/cpu/microcode/intel.c static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) c 1024 arch/x86/kernel/cpu/microcode/intel.c u64 llc_size = c->x86_cache_size * 1024ULL; c 1026 arch/x86/kernel/cpu/microcode/intel.c do_div(llc_size, c->x86_max_cores); c 1033 arch/x86/kernel/cpu/microcode/intel.c struct cpuinfo_x86 *c = &boot_cpu_data; c 1035 arch/x86/kernel/cpu/microcode/intel.c if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || c 1036 arch/x86/kernel/cpu/microcode/intel.c cpu_has(c, X86_FEATURE_IA64)) { c 1037 arch/x86/kernel/cpu/microcode/intel.c pr_err("Intel CPU family 0x%x not supported\n", c->x86); c 1041 arch/x86/kernel/cpu/microcode/intel.c llc_size_per_core = calc_llc_size_per_core(c); c 431 arch/x86/kernel/cpu/mtrr/if.c struct cpuinfo_x86 *c = &boot_cpu_data; c 433 arch/x86/kernel/cpu/mtrr/if.c if ((!cpu_has(c, X86_FEATURE_MTRR)) && c 434 arch/x86/kernel/cpu/mtrr/if.c (!cpu_has(c, X86_FEATURE_K6_MTRR)) && c 435 arch/x86/kernel/cpu/mtrr/if.c (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) && c 436 arch/x86/kernel/cpu/mtrr/if.c (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) c 13 arch/x86/kernel/cpu/proc.c static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, c 17 arch/x86/kernel/cpu/proc.c seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); c 20 arch/x86/kernel/cpu/proc.c seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); c 21 arch/x86/kernel/cpu/proc.c seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); c 22 arch/x86/kernel/cpu/proc.c seq_printf(m, "apicid\t\t: %d\n", c->apicid); c 23 arch/x86/kernel/cpu/proc.c seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); c 28 arch/x86/kernel/cpu/proc.c static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) c 43 arch/x86/kernel/cpu/proc.c c->cpuid_level); c 46 arch/x86/kernel/cpu/proc.c static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) c 53 arch/x86/kernel/cpu/proc.c c->cpuid_level); c 59 arch/x86/kernel/cpu/proc.c struct cpuinfo_x86 *c = v; c 63 arch/x86/kernel/cpu/proc.c cpu = c->cpu_index; c 70 arch/x86/kernel/cpu/proc.c c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c 71 arch/x86/kernel/cpu/proc.c c->x86, c 72 arch/x86/kernel/cpu/proc.c c->x86_model, c 73 arch/x86/kernel/cpu/proc.c c->x86_model_id[0] ? c->x86_model_id : "unknown"); c 75 arch/x86/kernel/cpu/proc.c if (c->x86_stepping || c->cpuid_level >= 0) c 76 arch/x86/kernel/cpu/proc.c seq_printf(m, "stepping\t: %d\n", c->x86_stepping); c 79 arch/x86/kernel/cpu/proc.c if (c->microcode) c 80 arch/x86/kernel/cpu/proc.c seq_printf(m, "microcode\t: 0x%x\n", c->microcode); c 82 arch/x86/kernel/cpu/proc.c if (cpu_has(c, X86_FEATURE_TSC)) { c 94 arch/x86/kernel/cpu/proc.c if (c->x86_cache_size) c 95 arch/x86/kernel/cpu/proc.c seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); c 97 arch/x86/kernel/cpu/proc.c show_cpuinfo_core(m, c, cpu); c 98 arch/x86/kernel/cpu/proc.c show_cpuinfo_misc(m, c); c 102 arch/x86/kernel/cpu/proc.c if (cpu_has(c, i) && x86_cap_flags[i] != NULL) c 109 arch/x86/kernel/cpu/proc.c if (cpu_has_bug(c, bug_bit) && x86_bug_flags[i]) c 114 arch/x86/kernel/cpu/proc.c c->loops_per_jiffy/(500000/HZ), c 115 arch/x86/kernel/cpu/proc.c (c->loops_per_jiffy/(5000/HZ)) % 100); c 118 arch/x86/kernel/cpu/proc.c if (c->x86_tlbsize > 0) c 119 arch/x86/kernel/cpu/proc.c seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); c 121 arch/x86/kernel/cpu/proc.c seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); c 122 arch/x86/kernel/cpu/proc.c seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); c 124 arch/x86/kernel/cpu/proc.c c->x86_phys_bits, c->x86_virt_bits); c 128 arch/x86/kernel/cpu/proc.c if (c->x86_power & (1 << i)) { c 30 arch/x86/kernel/cpu/rdrand.c void x86_init_rdrand(struct cpuinfo_x86 *c) c 35 arch/x86/kernel/cpu/rdrand.c if (!cpu_has(c, X86_FEATURE_RDRAND)) c 40 arch/x86/kernel/cpu/rdrand.c clear_cpu_cap(c, X86_FEATURE_RDRAND); c 47 arch/x86/kernel/cpu/scattered.c void init_scattered_cpuid_features(struct cpuinfo_x86 *c) c 66 arch/x86/kernel/cpu/scattered.c set_cpu_cap(c, cb->feature); c 49 arch/x86/kernel/cpu/topology.c static int detect_extended_topology_leaf(struct cpuinfo_x86 *c) c 51 arch/x86/kernel/cpu/topology.c if (c->cpuid_level >= 0x1f) { c 56 arch/x86/kernel/cpu/topology.c if (c->cpuid_level >= 0xb) { c 65 arch/x86/kernel/cpu/topology.c int detect_extended_topology_early(struct cpuinfo_x86 *c) c 71 arch/x86/kernel/cpu/topology.c leaf = detect_extended_topology_leaf(c); c 75 arch/x86/kernel/cpu/topology.c set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); c 81 arch/x86/kernel/cpu/topology.c c->initial_apicid = edx; c 92 arch/x86/kernel/cpu/topology.c int detect_extended_topology(struct cpuinfo_x86 *c) c 101 arch/x86/kernel/cpu/topology.c leaf = detect_extended_topology_leaf(c); c 109 arch/x86/kernel/cpu/topology.c c->initial_apicid = edx; c 140 arch/x86/kernel/cpu/topology.c c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, c 142 arch/x86/kernel/cpu/topology.c c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, c 144 arch/x86/kernel/cpu/topology.c c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, c 149 arch/x86/kernel/cpu/topology.c c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c 151 arch/x86/kernel/cpu/topology.c c->x86_max_cores = (core_level_siblings / smp_num_siblings); c 10 arch/x86/kernel/cpu/transmeta.c static void early_init_transmeta(struct cpuinfo_x86 *c) c 18 arch/x86/kernel/cpu/transmeta.c c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); c 22 arch/x86/kernel/cpu/transmeta.c static void init_transmeta(struct cpuinfo_x86 *c) c 29 arch/x86/kernel/cpu/transmeta.c early_init_transmeta(c); c 31 arch/x86/kernel/cpu/transmeta.c cpu_detect_cache_sizes(c); c 88 arch/x86/kernel/cpu/transmeta.c c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001); c 92 arch/x86/kernel/cpu/transmeta.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 26 arch/x86/kernel/cpu/zhaoxin.c static void init_zhaoxin_cap(struct cpuinfo_x86 *c) c 56 arch/x86/kernel/cpu/zhaoxin.c c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001); c 59 arch/x86/kernel/cpu/zhaoxin.c if (c->x86 >= 0x6) c 60 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_REP_GOOD); c 62 arch/x86/kernel/cpu/zhaoxin.c cpu_detect_cache_sizes(c); c 65 arch/x86/kernel/cpu/zhaoxin.c static void early_init_zhaoxin(struct cpuinfo_x86 *c) c 67 arch/x86/kernel/cpu/zhaoxin.c if (c->x86 >= 0x6) c 68 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 70 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_SYSENTER32); c 72 arch/x86/kernel/cpu/zhaoxin.c if (c->x86_power & (1 << 8)) { c 73 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); c 74 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); c 77 arch/x86/kernel/cpu/zhaoxin.c if (c->cpuid_level >= 0x00000001) { c 87 arch/x86/kernel/cpu/zhaoxin.c c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); c 92 arch/x86/kernel/cpu/zhaoxin.c static void zhaoxin_detect_vmx_virtcap(struct cpuinfo_x86 *c) c 100 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); c 102 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_VNMI); c 109 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); c 111 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_EPT); c 113 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_VPID); c 117 arch/x86/kernel/cpu/zhaoxin.c static void init_zhaoxin(struct cpuinfo_x86 *c) c 119 arch/x86/kernel/cpu/zhaoxin.c early_init_zhaoxin(c); c 120 arch/x86/kernel/cpu/zhaoxin.c init_intel_cacheinfo(c); c 121 arch/x86/kernel/cpu/zhaoxin.c detect_num_cpu_cores(c); c 123 arch/x86/kernel/cpu/zhaoxin.c detect_ht(c); c 126 arch/x86/kernel/cpu/zhaoxin.c if (c->cpuid_level > 9) { c 135 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); c 138 arch/x86/kernel/cpu/zhaoxin.c if (c->x86 >= 0x6) c 139 arch/x86/kernel/cpu/zhaoxin.c init_zhaoxin_cap(c); c 141 arch/x86/kernel/cpu/zhaoxin.c set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); c 144 arch/x86/kernel/cpu/zhaoxin.c if (cpu_has(c, X86_FEATURE_VMX)) c 145 arch/x86/kernel/cpu/zhaoxin.c zhaoxin_detect_vmx_virtcap(c); c 150 arch/x86/kernel/cpu/zhaoxin.c zhaoxin_size_cache(struct cpuinfo_x86 *c, unsigned int size) c 105 arch/x86/kernel/cpuid.c struct cpuinfo_x86 *c; c 111 arch/x86/kernel/cpuid.c c = &cpu_data(cpu); c 112 arch/x86/kernel/cpuid.c if (c->cpuid_level < 0) c 34 arch/x86/kernel/early_printk.c char c; c 37 arch/x86/kernel/early_printk.c while ((c = *str++) != '\0' && n-- > 0) { c 51 arch/x86/kernel/early_printk.c if (c == '\b') { c 54 arch/x86/kernel/early_printk.c } else if (c == '\r') { c 58 arch/x86/kernel/early_printk.c if (c == '\n') { c 61 arch/x86/kernel/early_printk.c } else if (c != '\r') { c 62 arch/x86/kernel/early_printk.c writew(((0x7 << 8) | (unsigned short) c), c 135 arch/x86/kernel/early_printk.c unsigned char c; c 142 arch/x86/kernel/early_printk.c c = serial_in(early_serial_base, LCR); c 143 arch/x86/kernel/early_printk.c serial_out(early_serial_base, LCR, c | DLAB); c 146 arch/x86/kernel/early_printk.c serial_out(early_serial_base, LCR, c & ~DLAB); c 71 arch/x86/kernel/fpu/init.c static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) c 282 arch/x86/kernel/fpu/init.c void __init fpu__init_system(struct cpuinfo_x86 *c) c 285 arch/x86/kernel/fpu/init.c fpu__init_system_early_generic(c); c 159 arch/x86/kernel/msr.c struct cpuinfo_x86 *c; c 167 arch/x86/kernel/msr.c c = &cpu_data(cpu); c 168 arch/x86/kernel/msr.c if (!cpu_has(c, X86_FEATURE_MSR)) c 191 arch/x86/kernel/probe_roms.c unsigned char sum, c; c 193 arch/x86/kernel/probe_roms.c for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--) c 194 arch/x86/kernel/probe_roms.c sum += c; c 202 arch/x86/kernel/probe_roms.c unsigned char c; c 214 arch/x86/kernel/probe_roms.c if (probe_kernel_address(rom + 2, c) != 0) c 218 arch/x86/kernel/probe_roms.c length = c * 512; c 252 arch/x86/kernel/probe_roms.c if (probe_kernel_address(rom + 2, c) != 0) c 256 arch/x86/kernel/probe_roms.c length = c * 512; c 660 arch/x86/kernel/process.c static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) c 662 arch/x86/kernel/process.c if (c->x86_vendor != X86_VENDOR_INTEL) c 665 arch/x86/kernel/process.c if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR)) c 698 arch/x86/kernel/process.c void select_idle_routine(const struct cpuinfo_x86 *c) c 710 arch/x86/kernel/process.c } else if (prefer_mwait_c1_over_halt(c)) { c 302 arch/x86/kernel/smpboot.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 304 arch/x86/kernel/smpboot.c if (c->initialized && c->phys_proc_id == phys_pkg) c 305 arch/x86/kernel/smpboot.c return c->logical_proc_id; c 321 arch/x86/kernel/smpboot.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 323 arch/x86/kernel/smpboot.c if (c->initialized && c->cpu_die_id == die_id && c 324 arch/x86/kernel/smpboot.c c->phys_proc_id == proc_id) c 325 arch/x86/kernel/smpboot.c return c->logical_die_id; c 381 arch/x86/kernel/smpboot.c struct cpuinfo_x86 *c = &cpu_data(id); c 383 arch/x86/kernel/smpboot.c *c = boot_cpu_data; c 384 arch/x86/kernel/smpboot.c c->cpu_index = id; c 385 arch/x86/kernel/smpboot.c topology_update_package_map(c->phys_proc_id, id); c 386 arch/x86/kernel/smpboot.c topology_update_die_map(c->cpu_die_id, id); c 387 arch/x86/kernel/smpboot.c c->initialized = true; c 396 arch/x86/kernel/smpboot.c struct cpuinfo_x86 *c = &cpu_data(id); c 399 arch/x86/kernel/smpboot.c if (!c->initialized) c 400 arch/x86/kernel/smpboot.c *c = boot_cpu_data; c 401 arch/x86/kernel/smpboot.c c->cpu_index = id; c 406 arch/x86/kernel/smpboot.c identify_secondary_cpu(c); c 407 arch/x86/kernel/smpboot.c c->initialized = true; c 411 arch/x86/kernel/smpboot.c topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) c 413 arch/x86/kernel/smpboot.c int cpu1 = c->cpu_index, cpu2 = o->cpu_index; c 419 arch/x86/kernel/smpboot.c topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) c 421 arch/x86/kernel/smpboot.c int cpu1 = c->cpu_index, cpu2 = o->cpu_index; c 423 arch/x86/kernel/smpboot.c return !WARN_ONCE(!topology_same_node(c, o), c 435 arch/x86/kernel/smpboot.c static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) c 438 arch/x86/kernel/smpboot.c int cpu1 = c->cpu_index, cpu2 = o->cpu_index; c 440 arch/x86/kernel/smpboot.c if (c->phys_proc_id == o->phys_proc_id && c 441 arch/x86/kernel/smpboot.c c->cpu_die_id == o->cpu_die_id && c 443 arch/x86/kernel/smpboot.c if (c->cpu_core_id == o->cpu_core_id) c 444 arch/x86/kernel/smpboot.c return topology_sane(c, o, "smt"); c 446 arch/x86/kernel/smpboot.c if ((c->cu_id != 0xff) && c 448 arch/x86/kernel/smpboot.c (c->cu_id == o->cu_id)) c 449 arch/x86/kernel/smpboot.c return topology_sane(c, o, "smt"); c 452 arch/x86/kernel/smpboot.c } else if (c->phys_proc_id == o->phys_proc_id && c 453 arch/x86/kernel/smpboot.c c->cpu_die_id == o->cpu_die_id && c 454 arch/x86/kernel/smpboot.c c->cpu_core_id == o->cpu_core_id) { c 455 arch/x86/kernel/smpboot.c return topology_sane(c, o, "smt"); c 481 arch/x86/kernel/smpboot.c static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) c 483 arch/x86/kernel/smpboot.c int cpu1 = c->cpu_index, cpu2 = o->cpu_index; c 498 arch/x86/kernel/smpboot.c if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) c 501 arch/x86/kernel/smpboot.c return topology_sane(c, o, "llc"); c 509 arch/x86/kernel/smpboot.c static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) c 511 arch/x86/kernel/smpboot.c if (c->phys_proc_id == o->phys_proc_id) c 516 arch/x86/kernel/smpboot.c static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) c 518 arch/x86/kernel/smpboot.c if ((c->phys_proc_id == o->phys_proc_id) && c 519 arch/x86/kernel/smpboot.c (c->cpu_die_id == o->cpu_die_id)) c 577 arch/x86/kernel/smpboot.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 588 arch/x86/kernel/smpboot.c c->booted_cores = 1; c 595 arch/x86/kernel/smpboot.c if ((i == cpu) || (has_smt && match_smt(c, o))) c 598 arch/x86/kernel/smpboot.c if ((i == cpu) || (has_mp && match_llc(c, o))) c 610 arch/x86/kernel/smpboot.c if ((i == cpu) || (has_mp && match_pkg(c, o))) { c 624 arch/x86/kernel/smpboot.c c->booted_cores++; c 631 arch/x86/kernel/smpboot.c } else if (i != cpu && !c->booted_cores) c 632 arch/x86/kernel/smpboot.c c->booted_cores = cpu_data(i).booted_cores; c 634 arch/x86/kernel/smpboot.c if (match_pkg(c, o) && !topology_same_node(c, o)) c 637 arch/x86/kernel/smpboot.c if ((i == cpu) || (has_mp && match_die(c, o))) c 1296 arch/x86/kernel/smpboot.c struct cpuinfo_x86 *c; c 1299 arch/x86/kernel/smpboot.c c = &cpu_data(i); c 1301 arch/x86/kernel/smpboot.c c->cpu_index = nr_cpu_ids; c 1543 arch/x86/kernel/smpboot.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 1564 arch/x86/kernel/smpboot.c c->cpu_core_id = 0; c 1565 arch/x86/kernel/smpboot.c c->booted_cores = 0; c 109 arch/x86/kernel/topology.c struct cpuinfo_x86 *c = &cpu_data(num); c 116 arch/x86/kernel/topology.c if (c->x86_vendor != X86_VENDOR_INTEL || c 56 arch/x86/kvm/i8254.c struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; c 58 arch/x86/kvm/i8254.c switch (c->mode) { c 69 arch/x86/kvm/i8254.c if (c->gate < val) c 70 arch/x86/kvm/i8254.c c->count_load_time = ktime_get(); c 74 arch/x86/kvm/i8254.c c->gate = val; c 106 arch/x86/kvm/i8254.c static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c, c 112 arch/x86/kvm/i8254.c return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); c 117 arch/x86/kvm/i8254.c struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; c 121 arch/x86/kvm/i8254.c t = kpit_elapsed(pit, c, channel); c 124 arch/x86/kvm/i8254.c switch (c->mode) { c 129 arch/x86/kvm/i8254.c counter = (c->count - d) & 0xffff; c 133 arch/x86/kvm/i8254.c counter = c->count - (mod_64((2 * d), c->count)); c 136 arch/x86/kvm/i8254.c counter = c->count - mod_64(d, c->count); c 144 arch/x86/kvm/i8254.c struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; c 148 arch/x86/kvm/i8254.c t = kpit_elapsed(pit, c, channel); c 151 arch/x86/kvm/i8254.c switch (c->mode) { c 154 arch/x86/kvm/i8254.c out = (d >= c->count); c 157 arch/x86/kvm/i8254.c out = (d < c->count); c 160 arch/x86/kvm/i8254.c out = ((mod_64(d, c->count) == 0) && (d != 0)); c 163 arch/x86/kvm/i8254.c out = (mod_64(d, c->count) < ((c->count + 1) >> 1)); c 167 arch/x86/kvm/i8254.c out = (d == c->count); c 176 arch/x86/kvm/i8254.c struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; c 178 arch/x86/kvm/i8254.c if (!c->count_latched) { c 179 arch/x86/kvm/i8254.c c->latched_count = pit_get_count(pit, channel); c 180 arch/x86/kvm/i8254.c c->count_latched = c->rw_mode; c 186 arch/x86/kvm/i8254.c struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel]; c 188 arch/x86/kvm/i8254.c if (!c->status_latched) { c 190 arch/x86/kvm/i8254.c c->status = ((pit_get_out(pit, channel) << 7) | c 191 arch/x86/kvm/i8254.c (c->rw_mode << 4) | c 192 arch/x86/kvm/i8254.c (c->mode << 1) | c 193 arch/x86/kvm/i8254.c c->bcd); c 194 arch/x86/kvm/i8254.c c->status_latched = 1; c 617 arch/x86/kvm/i8254.c struct kvm_kpit_channel_state *c; c 621 arch/x86/kvm/i8254.c c = &pit->pit_state.channels[i]; c 622 arch/x86/kvm/i8254.c c->mode = 0xff; c 623 arch/x86/kvm/i8254.c c->gate = (i != 2); c 503 arch/x86/kvm/svm.c struct vmcb_control_area *c, *h; c 511 arch/x86/kvm/svm.c c = &svm->vmcb->control; c 515 arch/x86/kvm/svm.c c->intercept_cr = h->intercept_cr | g->intercept_cr; c 516 arch/x86/kvm/svm.c c->intercept_dr = h->intercept_dr | g->intercept_dr; c 517 arch/x86/kvm/svm.c c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; c 518 arch/x86/kvm/svm.c c->intercept = h->intercept | g->intercept; c 913 arch/x86/kvm/x86.c static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c) c 917 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_XSAVE)) c 920 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_SMEP)) c 923 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_SMAP)) c 926 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_FSGSBASE)) c 929 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_PKU)) c 932 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_LA57) && c 936 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated()) c 11 arch/x86/lib/cmdline.c static inline int myisspace(u8 c) c 13 arch/x86/lib/cmdline.c return c <= ' '; /* Close enough approximation */ c 31 arch/x86/lib/cmdline.c char c; c 48 arch/x86/lib/cmdline.c c = *(char *)cmdline++; c 53 arch/x86/lib/cmdline.c if (!c) c 55 arch/x86/lib/cmdline.c else if (myisspace(c)) c 71 arch/x86/lib/cmdline.c if (!c || myisspace(c)) c 78 arch/x86/lib/cmdline.c } else if (!c) { c 84 arch/x86/lib/cmdline.c } else if (c == *opptr++) { c 95 arch/x86/lib/cmdline.c if (!c) c 97 arch/x86/lib/cmdline.c else if (myisspace(c)) c 124 arch/x86/lib/cmdline.c char c; c 143 arch/x86/lib/cmdline.c c = *(char *)cmdline++; c 144 arch/x86/lib/cmdline.c if (!c) c 149 arch/x86/lib/cmdline.c if (myisspace(c)) c 157 arch/x86/lib/cmdline.c if ((c == '=') && !*opptr) { c 167 arch/x86/lib/cmdline.c } else if (c == *opptr++) { c 178 arch/x86/lib/cmdline.c if (myisspace(c)) c 183 arch/x86/lib/cmdline.c if (myisspace(c)) { c 192 arch/x86/lib/cmdline.c *bufptr++ = c; c 61 arch/x86/lib/iomem.c void memset_io(volatile void __iomem *a, int b, size_t c) c 67 arch/x86/lib/iomem.c memset((void *)a, b, c); c 18 arch/x86/lib/memcpy_32.c __visible void *memset(void *s, int c, size_t count) c 20 arch/x86/lib/memcpy_32.c return __memset(s, c, count); c 143 arch/x86/lib/string_32.c char *strchr(const char *s, int c) c 157 arch/x86/lib/string_32.c : "1" (s), "0" (c) c 180 arch/x86/lib/string_32.c void *memchr(const void *cs, int c, size_t count) c 192 arch/x86/lib/string_32.c : "a" (c), "0" (cs), "1" (count) c 200 arch/x86/lib/string_32.c void *memscan(void *addr, int c, size_t size) c 209 arch/x86/lib/string_32.c : "0" (addr), "1" (size), "a" (c) c 91 arch/x86/math-emu/fpu_etc.c int c = 0; c 94 arch/x86/math-emu/fpu_etc.c c = SW_C3 | SW_C0; c 97 arch/x86/math-emu/fpu_etc.c c = SW_C3; c 100 arch/x86/math-emu/fpu_etc.c c = SW_C2; c 105 arch/x86/math-emu/fpu_etc.c c = SW_C2 | SW_C3; /* Denormal */ c 111 arch/x86/math-emu/fpu_etc.c c = SW_C0; c 114 arch/x86/math-emu/fpu_etc.c c = SW_C2 | SW_C0; c 119 arch/x86/math-emu/fpu_etc.c c |= SW_C1; c 120 arch/x86/math-emu/fpu_etc.c setcc(c); c 48 arch/x86/math-emu/poly.h #define MK_XSIG(a,b,c) { c, b, a } c 172 arch/x86/math-emu/reg_compare.c int f, c; c 174 arch/x86/math-emu/reg_compare.c c = compare(loaded_data, loaded_tag); c 176 arch/x86/math-emu/reg_compare.c if (c & COMP_NaN) { c 180 arch/x86/math-emu/reg_compare.c switch (c & 7) { c 201 arch/x86/math-emu/reg_compare.c if (c & COMP_Denormal) { c 209 arch/x86/math-emu/reg_compare.c int f, c; c 220 arch/x86/math-emu/reg_compare.c c = compare(st_ptr, FPU_gettagi(nr)); c 221 arch/x86/math-emu/reg_compare.c if (c & COMP_NaN) { c 226 arch/x86/math-emu/reg_compare.c switch (c & 7) { c 247 arch/x86/math-emu/reg_compare.c if (c & COMP_Denormal) { c 255 arch/x86/math-emu/reg_compare.c int f, c; c 267 arch/x86/math-emu/reg_compare.c c = compare(st_ptr, FPU_gettagi(nr)); c 268 arch/x86/math-emu/reg_compare.c if (c & COMP_NaN) { c 274 arch/x86/math-emu/reg_compare.c switch (c & 7) { c 295 arch/x86/math-emu/reg_compare.c if (c & COMP_Denormal) { c 303 arch/x86/math-emu/reg_compare.c int f = 0, c; c 314 arch/x86/math-emu/reg_compare.c c = compare(st_ptr, FPU_gettagi(nr)); c 315 arch/x86/math-emu/reg_compare.c if (c & COMP_NaN) { c 317 arch/x86/math-emu/reg_compare.c if (c & COMP_SNaN) { /* This is the only difference between c 324 arch/x86/math-emu/reg_compare.c switch (c & 7) { c 345 arch/x86/math-emu/reg_compare.c if (c & COMP_Denormal) { c 353 arch/x86/math-emu/reg_compare.c int f = 0, c; c 365 arch/x86/math-emu/reg_compare.c c = compare(st_ptr, FPU_gettagi(nr)); c 366 arch/x86/math-emu/reg_compare.c if (c & COMP_NaN) { c 368 arch/x86/math-emu/reg_compare.c if (c & COMP_SNaN) { /* This is the only difference between c 376 arch/x86/math-emu/reg_compare.c switch (c & 7) { c 397 arch/x86/math-emu/reg_compare.c if (c & COMP_Denormal) { c 53 arch/x86/math-emu/reg_constant.c static void fld_const(FPU_REG const * c, int adj, u_char tag) c 62 arch/x86/math-emu/reg_constant.c reg_copy(c, st_new_ptr); c 165 arch/x86/mm/pat.c #define CM(c) (_PAGE_CACHE_MODE_ ## c) c 306 arch/x86/mm/pat.c struct cpuinfo_x86 *c = &boot_cpu_data; c 311 arch/x86/mm/pat.c if ((c->x86_vendor == X86_VENDOR_INTEL) && c 312 arch/x86/mm/pat.c (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || c 313 arch/x86/mm/pat.c ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { c 482 arch/x86/pci/common.c struct cpuinfo_x86 *c = &boot_cpu_data; c 490 arch/x86/pci/common.c if (c->x86_clflush_size > 0) { c 491 arch/x86/pci/common.c pci_dfl_cache_line_size = c->x86_clflush_size >> 2; c 184 arch/x86/platform/uv/uv_time.c int c, bcpu = -1; c 187 arch/x86/platform/uv/uv_time.c for (c = 0; c < head->ncpus; c++) { c 188 arch/x86/platform/uv/uv_time.c u64 exp = head->cpu[c].expires; c 190 arch/x86/platform/uv/uv_time.c bcpu = c; c 196 arch/x86/platform/uv/uv_time.c c = head->cpu[bcpu].lcpu; c 197 arch/x86/platform/uv/uv_time.c if (uv_setup_intr(c, lowest)) c 199 arch/x86/platform/uv/uv_time.c uv_rtc_send_IPI(c); c 465 arch/x86/power/cpu.c static int msr_save_cpuid_features(const struct x86_cpu_id *c) c 472 arch/x86/power/cpu.c c->family); c 496 arch/x86/power/cpu.c static int pm_cpu_check(const struct x86_cpu_id *c) c 90 arch/x86/tools/insn_decoder_test.c int c; c 92 arch/x86/tools/insn_decoder_test.c while ((c = getopt(argc, argv, "ynv")) != -1) { c 93 arch/x86/tools/insn_decoder_test.c switch (c) { c 164 arch/x86/tools/insn_sanity.c int c; c 169 arch/x86/tools/insn_sanity.c while ((c = getopt(argc, argv, "ynvs:m:i:")) != -1) { c 170 arch/x86/tools/insn_sanity.c switch (c) { c 203 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.intel; c 268 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.amd; c 352 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.amd; c 377 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.intel; c 133 arch/xtensa/include/asm/string.h #define memset(s, c, n) __memset(s, c, n) c 99 arch/xtensa/platforms/iss/console.c unsigned char c; c 104 arch/xtensa/platforms/iss/console.c rd = simc_read(0, &c, 1); c 107 arch/xtensa/platforms/iss/console.c tty_insert_flip_char(port, c, TTY_NORMAL); c 233 arch/xtensa/platforms/iss/console.c static struct tty_driver* iss_console_device(struct console *c, int *index) c 235 arch/xtensa/platforms/iss/console.c *index = c->index; c 67 arch/xtensa/platforms/iss/include/platform/simcall.h static inline int __simc(int a, int b, int c, int d) c 72 arch/xtensa/platforms/iss/include/platform/simcall.h register int c1 asm("a4") = c; c 292 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 293 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 294 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 295 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 296 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 297 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 298 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 299 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 300 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 301 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 302 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 303 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 304 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 305 arch/xtensa/variants/test_kc705_be/include/variant/tie-asm.h .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 313 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 314 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 315 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 316 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 317 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 318 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 319 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 320 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 321 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 322 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 323 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 324 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 325 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 326 arch/xtensa/variants/test_kc705_hifi/include/variant/tie-asm.h .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 167 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 168 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 169 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 170 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 171 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 172 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 173 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 174 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 175 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 176 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 177 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 178 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 179 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm c 180 arch/xtensa/variants/test_mmuhifi_c3/include/variant/tie-asm.h .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm c 808 block/blk-iocost.c u64 *c = ioc->params.lcoefs; c 811 block/blk-iocost.c &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]); c 813 block/blk-iocost.c &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]); c 391 block/ioctl.c struct pr_clear c; c 397 block/ioctl.c if (copy_from_user(&c, arg, sizeof(c))) c 400 block/ioctl.c if (c.flags) c 402 block/ioctl.c return ops->pr_clear(bdev, c.key); c 718 block/partitions/efi.c u8 c = ptes[i].partition_name[label_count] & 0xff; c 719 block/partitions/efi.c if (c && !isprint(c)) c 720 block/partitions/efi.c c = '!'; c 721 block/partitions/efi.c info->volname[label_count] = c; c 23 crypto/aegis128-neon-inner.c void *memset(void *s, int c, size_t n); c 949 crypto/algapi.c u8 c; c 952 crypto/algapi.c c = *--b + 1; c 953 crypto/algapi.c *b = c; c 954 crypto/algapi.c if (c) c 962 crypto/algapi.c u32 c; c 967 crypto/algapi.c c = be32_to_cpu(*--b) + 1; c 968 crypto/algapi.c *b = cpu_to_be32(c); c 969 crypto/algapi.c if (likely(c)) c 27 crypto/async_tx/async_raid6_recov.c u8 *a, *b, *c; c 71 crypto/async_tx/async_raid6_recov.c c = page_address(dest); c 76 crypto/async_tx/async_raid6_recov.c *c++ = ax ^ bx; c 303 crypto/cast5_generic.c void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) c 312 crypto/cast5_generic.c Km = c->Km; c 313 crypto/cast5_generic.c Kr = c->Kr; c 341 crypto/cast5_generic.c if (!(c->rr)) { c 360 crypto/cast5_generic.c void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) c 369 crypto/cast5_generic.c Km = c->Km; c 370 crypto/cast5_generic.c Kr = c->Kr; c 375 crypto/cast5_generic.c if (!(c->rr)) { c 481 crypto/cast5_generic.c struct cast5_ctx *c = crypto_tfm_ctx(tfm); c 488 crypto/cast5_generic.c c->rr = key_len <= 10 ? 1 : 0; c 501 crypto/cast5_generic.c c->Km[i] = k[i]; c 504 crypto/cast5_generic.c c->Kr[i] = k[i] & 0x1f; c 106 crypto/cast6_generic.c int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key, c 134 crypto/cast6_generic.c c->Kr[i][0] = key[0] & 0x1f; c 135 crypto/cast6_generic.c c->Kr[i][1] = key[2] & 0x1f; c 136 crypto/cast6_generic.c c->Kr[i][2] = key[4] & 0x1f; c 137 crypto/cast6_generic.c c->Kr[i][3] = key[6] & 0x1f; c 139 crypto/cast6_generic.c c->Km[i][0] = key[7]; c 140 crypto/cast6_generic.c c->Km[i][1] = key[5]; c 141 crypto/cast6_generic.c c->Km[i][2] = key[3]; c 142 crypto/cast6_generic.c c->Km[i][3] = key[1]; c 176 crypto/cast6_generic.c void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) c 189 crypto/cast6_generic.c Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km); c 190 crypto/cast6_generic.c Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km); c 191 crypto/cast6_generic.c Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km); c 192 crypto/cast6_generic.c Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km); c 193 crypto/cast6_generic.c Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km); c 194 crypto/cast6_generic.c Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km); c 195 crypto/cast6_generic.c Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km); c 196 crypto/cast6_generic.c Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km); c 197 crypto/cast6_generic.c Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km); c 198 crypto/cast6_generic.c Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km); c 199 crypto/cast6_generic.c Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km); c 200 crypto/cast6_generic.c Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km); c 214 crypto/cast6_generic.c void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) c 227 crypto/cast6_generic.c Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km); c 228 crypto/cast6_generic.c Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km); c 229 crypto/cast6_generic.c Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km); c 230 crypto/cast6_generic.c Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km); c 231 crypto/cast6_generic.c Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km); c 232 crypto/cast6_generic.c Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km); c 233 crypto/cast6_generic.c Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km); c 234 crypto/cast6_generic.c Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km); c 235 crypto/cast6_generic.c Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km); c 236 crypto/cast6_generic.c Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km); c 237 crypto/cast6_generic.c Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km); c 238 crypto/cast6_generic.c Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km); c 450 crypto/crypto_user_base.c struct netlink_dump_control c = { c 455 crypto/crypto_user_base.c err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c); c 519 crypto/ecc.c u64 c = -mod[0]; c 525 crypto/ecc.c vli_umult(t, r + ndigits, c, ndigits); c 228 crypto/fcrypt.c union lc4 { __be32 l; u8 c[4]; } u; \ c 230 crypto/fcrypt.c L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \ c 750 crypto/khazad.c static const u64 c[KHAZAD_ROUNDS + 1] = { c 779 crypto/khazad.c c[r] ^ K2; c 63 crypto/md4.c #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) c 64 crypto/md4.c #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) c 65 crypto/md4.c #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) c 69 crypto/md4.c u32 a, b, c, d; c 73 crypto/md4.c c = hash[2]; c 76 crypto/md4.c ROUND1(a, b, c, d, in[0], 3); c 77 crypto/md4.c ROUND1(d, a, b, c, in[1], 7); c 78 crypto/md4.c ROUND1(c, d, a, b, in[2], 11); c 79 crypto/md4.c ROUND1(b, c, d, a, in[3], 19); c 80 crypto/md4.c ROUND1(a, b, c, d, in[4], 3); c 81 crypto/md4.c ROUND1(d, a, b, c, in[5], 7); c 82 crypto/md4.c ROUND1(c, d, a, b, in[6], 11); c 83 crypto/md4.c ROUND1(b, c, d, a, in[7], 19); c 84 crypto/md4.c ROUND1(a, b, c, d, in[8], 3); c 85 crypto/md4.c ROUND1(d, a, b, c, in[9], 7); c 86 crypto/md4.c ROUND1(c, d, a, b, in[10], 11); c 87 crypto/md4.c ROUND1(b, c, d, a, in[11], 19); c 88 crypto/md4.c ROUND1(a, b, c, d, in[12], 3); c 89 crypto/md4.c ROUND1(d, a, b, c, in[13], 7); c 90 crypto/md4.c ROUND1(c, d, a, b, in[14], 11); c 91 crypto/md4.c ROUND1(b, c, d, a, in[15], 19); c 93 crypto/md4.c ROUND2(a, b, c, d,in[ 0], 3); c 94 crypto/md4.c ROUND2(d, a, b, c, in[4], 5); c 95 crypto/md4.c ROUND2(c, d, a, b, in[8], 9); c 96 crypto/md4.c ROUND2(b, c, d, a, in[12], 13); c 97 crypto/md4.c ROUND2(a, b, c, d, in[1], 3); c 98 crypto/md4.c ROUND2(d, a, b, c, in[5], 5); c 99 crypto/md4.c ROUND2(c, d, a, b, in[9], 9); c 100 crypto/md4.c ROUND2(b, c, d, a, in[13], 13); c 101 crypto/md4.c ROUND2(a, b, c, d, in[2], 3); c 102 crypto/md4.c ROUND2(d, a, b, c, in[6], 5); c 103 crypto/md4.c ROUND2(c, d, a, b, in[10], 9); c 104 crypto/md4.c ROUND2(b, c, d, a, in[14], 13); c 105 crypto/md4.c ROUND2(a, b, c, d, in[3], 3); c 106 crypto/md4.c ROUND2(d, a, b, c, in[7], 5); c 107 crypto/md4.c ROUND2(c, d, a, b, in[11], 9); c 108 crypto/md4.c ROUND2(b, c, d, a, in[15], 13); c 110 crypto/md4.c ROUND3(a, b, c, d,in[ 0], 3); c 111 crypto/md4.c ROUND3(d, a, b, c, in[8], 9); c 112 crypto/md4.c ROUND3(c, d, a, b, in[4], 11); c 113 crypto/md4.c ROUND3(b, c, d, a, in[12], 15); c 114 crypto/md4.c ROUND3(a, b, c, d, in[2], 3); c 115 crypto/md4.c ROUND3(d, a, b, c, in[10], 9); c 116 crypto/md4.c ROUND3(c, d, a, b, in[6], 11); c 117 crypto/md4.c ROUND3(b, c, d, a, in[14], 15); c 118 crypto/md4.c ROUND3(a, b, c, d, in[1], 3); c 119 crypto/md4.c ROUND3(d, a, b, c, in[9], 9); c 120 crypto/md4.c ROUND3(c, d, a, b, in[5], 11); c 121 crypto/md4.c ROUND3(b, c, d, a, in[13], 15); c 122 crypto/md4.c ROUND3(a, b, c, d, in[3], 3); c 123 crypto/md4.c ROUND3(d, a, b, c, in[11], 9); c 124 crypto/md4.c ROUND3(c, d, a, b, in[7], 11); c 125 crypto/md4.c ROUND3(b, c, d, a, in[15], 15); c 129 crypto/md4.c hash[2] += c; c 45 crypto/md5.c u32 a, b, c, d; c 49 crypto/md5.c c = hash[2]; c 52 crypto/md5.c MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); c 53 crypto/md5.c MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); c 54 crypto/md5.c MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); c 55 crypto/md5.c MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); c 56 crypto/md5.c MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); c 57 crypto/md5.c MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); c 58 crypto/md5.c MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); c 59 crypto/md5.c MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); c 60 crypto/md5.c MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); c 61 crypto/md5.c MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); c 62 crypto/md5.c MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); c 63 crypto/md5.c MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); c 64 crypto/md5.c MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); c 65 crypto/md5.c MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); c 66 crypto/md5.c MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); c 67 crypto/md5.c MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); c 69 crypto/md5.c MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); c 70 crypto/md5.c MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); c 71 crypto/md5.c MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); c 72 crypto/md5.c MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); c 73 crypto/md5.c MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); c 74 crypto/md5.c MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); c 75 crypto/md5.c MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); c 76 crypto/md5.c MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); c 77 crypto/md5.c MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); c 78 crypto/md5.c MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); c 79 crypto/md5.c MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); c 80 crypto/md5.c MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); c 81 crypto/md5.c MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); c 82 crypto/md5.c MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); c 83 crypto/md5.c MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); c 84 crypto/md5.c MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); c 86 crypto/md5.c MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); c 87 crypto/md5.c MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); c 88 crypto/md5.c MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); c 89 crypto/md5.c MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); c 90 crypto/md5.c MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); c 91 crypto/md5.c MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); c 92 crypto/md5.c MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); c 93 crypto/md5.c MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); c 94 crypto/md5.c MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); c 95 crypto/md5.c MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); c 96 crypto/md5.c MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); c 97 crypto/md5.c MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); c 98 crypto/md5.c MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); c 99 crypto/md5.c MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); c 100 crypto/md5.c MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); c 101 crypto/md5.c MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); c 103 crypto/md5.c MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); c 104 crypto/md5.c MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); c 105 crypto/md5.c MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); c 106 crypto/md5.c MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); c 107 crypto/md5.c MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); c 108 crypto/md5.c MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); c 109 crypto/md5.c MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); c 110 crypto/md5.c MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); c 111 crypto/md5.c MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); c 112 crypto/md5.c MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); c 113 crypto/md5.c MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); c 114 crypto/md5.c MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); c 115 crypto/md5.c MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); c 116 crypto/md5.c MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); c 117 crypto/md5.c MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); c 118 crypto/md5.c MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); c 122 crypto/md5.c hash[2] += c; c 40 crypto/rmd128.c #define ROUND(a, b, c, d, f, k, x, s) { \ c 41 crypto/rmd128.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ c 43 crypto/rmd160.c #define ROUND(a, b, c, d, e, f, k, x, s) { \ c 44 crypto/rmd160.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ c 46 crypto/rmd160.c (c) = rol32((c), 10); \ c 40 crypto/rmd256.c #define ROUND(a, b, c, d, f, k, x, s) { \ c 41 crypto/rmd256.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ c 43 crypto/rmd320.c #define ROUND(a, b, c, d, e, f, k, x, s) { \ c 44 crypto/rmd320.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ c 46 crypto/rmd320.c (c) = rol32((c), 10); \ c 25 crypto/rsa.c static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m) c 32 crypto/rsa.c return mpi_powm(c, m, key->e, key->n); c 39 crypto/rsa.c static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c) c 42 crypto/rsa.c if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) c 46 crypto/rsa.c return mpi_powm(m, c, key->d, key->n); c 58 crypto/rsa.c MPI m, c = mpi_alloc(0); c 62 crypto/rsa.c if (!c) c 75 crypto/rsa.c ret = _rsa_enc(pkey, c, m); c 79 crypto/rsa.c ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign); c 89 crypto/rsa.c mpi_free(c); c 97 crypto/rsa.c MPI c, m = mpi_alloc(0); c 110 crypto/rsa.c c = mpi_read_raw_from_sgl(req->src, req->src_len); c 111 crypto/rsa.c if (!c) c 114 crypto/rsa.c ret = _rsa_dec(pkey, m, c); c 125 crypto/rsa.c mpi_free(c); c 29 crypto/serpent_generic.c #define keyiter(a, b, c, d, i, j) \ c 30 crypto/serpent_generic.c ({ b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b, 11); k[j] = b; }) c 101 crypto/sha512_generic.c u64 a, b, c, d, e, f, g, h, t1, t2; c 107 crypto/sha512_generic.c a=state[0]; b=state[1]; c=state[2]; d=state[3]; c 127 crypto/sha512_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; c 129 crypto/sha512_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; c 130 crypto/sha512_generic.c t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; c 132 crypto/sha512_generic.c t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; c 134 crypto/sha512_generic.c t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4]; c 136 crypto/sha512_generic.c t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; c 137 crypto/sha512_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; c 139 crypto/sha512_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; c 141 crypto/sha512_generic.c t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; c 144 crypto/sha512_generic.c state[0] += a; state[1] += b; state[2] += c; state[3] += d; c 148 crypto/sha512_generic.c a = b = c = d = e = f = g = h = t1 = t2 = 0; c 39 crypto/sm3_generic.c static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) c 41 crypto/sm3_generic.c return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); c 78 crypto/sm3_generic.c u32 a, b, c, d, e, f, g, h; c 83 crypto/sm3_generic.c c = m[2]; c 96 crypto/sm3_generic.c tt1 = ff(i, a, b, c) + d + ss2 + *wt; c 102 crypto/sm3_generic.c d = c; c 103 crypto/sm3_generic.c c = rol32(b, 9); c 114 crypto/sm3_generic.c m[2] = c ^ m[2]; c 121 crypto/sm3_generic.c a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; c 3668 crypto/testmgr.c const char *m, *c; c 3712 crypto/testmgr.c c = vecs->c; c 3719 crypto/testmgr.c m = vecs->c; /* signature */ c 3721 crypto/testmgr.c c = vecs->m; /* digest */ c 3736 crypto/testmgr.c memcpy(xbuf[1], c, c_size); c 3764 crypto/testmgr.c if (memcmp(c, outbuf_enc, c_size) != 0) { c 3790 crypto/testmgr.c memcpy(xbuf[0], c, c_size); c 147 crypto/testmgr.h const unsigned char *c; c 200 crypto/testmgr.h .c = c 240 crypto/testmgr.h .c = c 298 crypto/testmgr.h .c = c 340 crypto/testmgr.h .c = c 523 crypto/testmgr.h .c = c 579 crypto/testmgr.h .c = c 605 crypto/testmgr.h .c = c 631 crypto/testmgr.h .c = c 660 crypto/testmgr.h .c = c 695 crypto/testmgr.h .c = c 790 crypto/testmgr.h .c = c 34 crypto/tgr192.c u64 a, b, c; c 401 crypto/tgr192.c u64 c = *rc; c 403 crypto/tgr192.c c ^= x; c 404 crypto/tgr192.c a -= sbox1[c & 0xff] ^ sbox2[(c >> 16) & 0xff] c 405 crypto/tgr192.c ^ sbox3[(c >> 32) & 0xff] ^ sbox4[(c >> 48) & 0xff]; c 406 crypto/tgr192.c b += sbox4[(c >> 8) & 0xff] ^ sbox3[(c >> 24) & 0xff] c 407 crypto/tgr192.c ^ sbox2[(c >> 40) & 0xff] ^ sbox1[(c >> 56) & 0xff]; c 412 crypto/tgr192.c *rc = c; c 420 crypto/tgr192.c u64 c = *rc; c 422 crypto/tgr192.c tgr192_round(&a, &b, &c, x[0], mul); c 423 crypto/tgr192.c tgr192_round(&b, &c, &a, x[1], mul); c 424 crypto/tgr192.c tgr192_round(&c, &a, &b, x[2], mul); c 425 crypto/tgr192.c tgr192_round(&a, &b, &c, x[3], mul); c 426 crypto/tgr192.c tgr192_round(&b, &c, &a, x[4], mul); c 427 crypto/tgr192.c tgr192_round(&c, &a, &b, x[5], mul); c 428 crypto/tgr192.c tgr192_round(&a, &b, &c, x[6], mul); c 429 crypto/tgr192.c tgr192_round(&b, &c, &a, x[7], mul); c 433 crypto/tgr192.c *rc = c; c 464 crypto/tgr192.c u64 a, b, c, aa, bb, cc; c 474 crypto/tgr192.c c = cc = tctx->c; c 476 crypto/tgr192.c tgr192_pass(&a, &b, &c, x, 5); c 478 crypto/tgr192.c tgr192_pass(&c, &a, &b, x, 7); c 480 crypto/tgr192.c tgr192_pass(&b, &c, &a, x, 9); c 486 crypto/tgr192.c c += cc; c 490 crypto/tgr192.c tctx->c = c; c 499 crypto/tgr192.c tctx->c = 0xf096a5b4c3b2e187ULL; c 599 crypto/tgr192.c dst[2] = be64p[2] = cpu_to_be64(tctx->c); c 468 crypto/twofish_common.c #define CALC_S(a, b, c, d, i, w, x, y, z) \ c 473 crypto/twofish_common.c (c) ^= exp_to_poly[tmp + (y)]; \ c 529 crypto/twofish_common.c #define CALC_K_2(a, b, c, d, j) \ c 532 crypto/twofish_common.c ^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \ c 542 crypto/twofish_common.c #define CALC_K192_2(a, b, c, d, j) \ c 545 crypto/twofish_common.c q0[c ^ key[(j) + 18]], \ c 53 crypto/twofish_generic.c #define ENCROUND(n, a, b, c, d) \ c 56 crypto/twofish_generic.c (c) ^= x + ctx->k[2 * (n)]; \ c 57 crypto/twofish_generic.c (c) = ror32((c), 1); \ c 60 crypto/twofish_generic.c #define DECROUND(n, a, b, c, d) \ c 65 crypto/twofish_generic.c (c) = rol32((c), 1); \ c 66 crypto/twofish_generic.c (c) ^= (x + ctx->k[2 * (n)]) c 72 crypto/twofish_generic.c ENCROUND (2 * (n), a, b, c, d); \ c 73 crypto/twofish_generic.c ENCROUND (2 * (n) + 1, c, d, a, b) c 76 crypto/twofish_generic.c DECROUND (2 * (n) + 1, c, d, a, b); \ c 77 crypto/twofish_generic.c DECROUND (2 * (n), a, b, c, d) c 102 crypto/twofish_generic.c u32 a, b, c, d; c 110 crypto/twofish_generic.c INPACK (2, c, 2); c 124 crypto/twofish_generic.c OUTUNPACK (0, c, 4); c 139 crypto/twofish_generic.c u32 a, b, c, d; c 145 crypto/twofish_generic.c INPACK (0, c, 4); c 163 crypto/twofish_generic.c OUTUNPACK (2, c, 2); c 65 drivers/accessibility/braille/braille_console.c unsigned char data[1 + 1 + 2*WIDTH + 2 + 1], csum = 0, *c; c 84 drivers/accessibility/braille/braille_console.c c = &data[2]; c 93 drivers/accessibility/braille/braille_console.c *c++ = SOH; c 96 drivers/accessibility/braille/braille_console.c *c++ = out; c 100 drivers/accessibility/braille/braille_console.c *c++ = SOH; c 103 drivers/accessibility/braille/braille_console.c *c++ = csum; c 104 drivers/accessibility/braille/braille_console.c *c++ = ETX; c 106 drivers/accessibility/braille/braille_console.c braille_co->write(braille_co, data, c - data); c 274 drivers/accessibility/braille/braille_console.c unsigned char c = param->c; c 277 drivers/accessibility/braille/braille_console.c switch (c) { c 292 drivers/accessibility/braille/braille_console.c c = ' '; c 295 drivers/accessibility/braille/braille_console.c if (c < 32) c 308 drivers/accessibility/braille/braille_console.c console_buf[console_cursor-1] = c; c 324 drivers/acpi/acpi_pnp.c char c = toupper(idstr[i]); c 326 drivers/acpi/acpi_pnp.c if (!isxdigit(c) c 327 drivers/acpi/acpi_pnp.c || (list_id[i] != 'X' && c != toupper(list_id[i]))) c 191 drivers/acpi/acpica/acmacros.h #define ACPI_IS_ASCII(c) ((c) < 0x80) c 363 drivers/acpi/acpica/acmacros.h #define ACPI_IS_ROOT_PREFIX(c) ((c) == (u8) 0x5C) /* Backslash */ c 364 drivers/acpi/acpica/acmacros.h #define ACPI_IS_PARENT_PREFIX(c) ((c) == (u8) 0x5E) /* Carat */ c 365 drivers/acpi/acpica/acmacros.h #define ACPI_IS_PATH_SEPARATOR(c) ((c) == (u8) 0x2E) /* Period (dot) */ c 400 drivers/acpi/acpica/acmacros.h #define ARGI_LIST3(a, b, c) (ARG_1(c)|ARG_2(b)|ARG_3(a)) c 401 drivers/acpi/acpica/acmacros.h #define ARGI_LIST4(a, b, c, d) (ARG_1(d)|ARG_2(c)|ARG_3(b)|ARG_4(a)) c 402 drivers/acpi/acpica/acmacros.h #define ARGI_LIST5(a, b, c, d, e) (ARG_1(e)|ARG_2(d)|ARG_3(c)|ARG_4(b)|ARG_5(a)) c 403 drivers/acpi/acpica/acmacros.h #define ARGI_LIST6(a, b, c, d, e, f) (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a)) c 407 drivers/acpi/acpica/acmacros.h #define ARGP_LIST3(a, b, c) (ARG_1(a)|ARG_2(b)|ARG_3(c)) c 408 drivers/acpi/acpica/acmacros.h #define ARGP_LIST4(a, b, c, d) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)) c 409 drivers/acpi/acpica/acmacros.h #define ARGP_LIST5(a, b, c, d, e) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)) c 410 drivers/acpi/acpica/acmacros.h #define ARGP_LIST6(a, b, c, d, e, f) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f)) c 456 drivers/acpi/acpica/acmacros.h #define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ c 459 drivers/acpi/acpica/acmacros.h (c) & 0xFF, ((c) >> 8) & 0xFF, \ c 477 drivers/acpi/acpica/acmacros.h #define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) cv_print_one_comment_type (a,b,c,d); c 480 drivers/acpi/acpica/acmacros.h #define ASL_CV_INIT_FILETREE(a,b,c) cv_init_file_tree(a,b,c); c 492 drivers/acpi/acpica/acmacros.h #define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) c 495 drivers/acpi/acpica/acmacros.h #define ASL_CV_INIT_FILETREE(a,b,c) c 208 drivers/acpi/acpica/acparser.h u8 acpi_ps_is_leading_char(u32 c); c 124 drivers/acpi/acpica/acpredef.h #define PACKAGE_INFO(a,b,c,d,e,f) {{{(a),(b),(c),(d)}, ((((u16)(f)) << 8) | (e)), 0}} c 472 drivers/acpi/acpica/hwgpe.c struct acpi_gpe_block_status_context *c = context; c 497 drivers/acpi/acpica/hwgpe.c if (ret_mask && c->gpe_skip_register_info == gpe_register_info) { c 498 drivers/acpi/acpica/hwgpe.c ret_mask &= ~c->gpe_skip_mask; c 500 drivers/acpi/acpica/hwgpe.c c->retval |= ret_mask; c 199 drivers/acpi/acpica/nsnames.c char c, *left, *right; c 235 drivers/acpi/acpica/nsnames.c c = name[4 - i - 1]; c 236 drivers/acpi/acpica/nsnames.c if (do_no_trailing && c != '_') { c 240 drivers/acpi/acpica/nsnames.c ACPI_PATH_PUT8(full_path, path_size, c, length); c 256 drivers/acpi/acpica/nsnames.c c = *left; c 258 drivers/acpi/acpica/nsnames.c *right-- = c; c 179 drivers/acpi/acpica/psutils.c u8 acpi_ps_is_leading_char(u32 c) c 181 drivers/acpi/acpica/psutils.c return ((u8) (c == '_' || (c >= 'A' && c <= 'Z'))); c 27 drivers/acpi/acpica/utprint.c static char *acpi_ut_bound_string_output(char *string, const char *end, char c); c 77 drivers/acpi/acpica/utprint.c static char *acpi_ut_bound_string_output(char *string, const char *end, char c) c 81 drivers/acpi/acpica/utprint.c *string = c; c 328 drivers/acpi/acpica/utprint.c char c; c 429 drivers/acpi/acpica/utprint.c c = (char)va_arg(args, int); c 430 drivers/acpi/acpica/utprint.c pos = acpi_ut_bound_string_output(pos, end, c); c 126 drivers/acpi/cppc_acpi.c struct attribute *attr, const char *c, ssize_t count); c 200 drivers/acpi/device_sysfs.c char *c; c 207 drivers/acpi/device_sysfs.c for (c = buf.pointer; *c != '\0'; c++) c 208 drivers/acpi/device_sysfs.c *c = tolower(*c); c 2449 drivers/acpi/nfit/core.c unsigned int c; c 2458 drivers/acpi/nfit/core.c c = min_t(size_t, len, mmio->line_size - line_offset); c 2461 drivers/acpi/nfit/core.c c = len; c 2465 drivers/acpi/nfit/core.c memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c); c 2469 drivers/acpi/nfit/core.c mmio->addr.aperture + offset, c); c 2471 drivers/acpi/nfit/core.c memcpy(iobuf + copied, mmio->addr.aperture + offset, c); c 2474 drivers/acpi/nfit/core.c copied += c; c 2475 drivers/acpi/nfit/core.c len -= c; c 2496 drivers/acpi/nfit/core.c u64 c = min(len, mmio->size); c 2499 drivers/acpi/nfit/core.c iobuf + copied, c, rw, lane); c 2503 drivers/acpi/nfit/core.c copied += c; c 2504 drivers/acpi/nfit/core.c len -= c; c 93 drivers/acpi/pci_irq.c #define PCI_INTX_PIN(c) (c - 'A' + 1) c 514 drivers/acpi/resource.c struct res_proc_context *c) c 520 drivers/acpi/resource.c c->error = -ENOMEM; c 525 drivers/acpi/resource.c resource_list_add_tail(rentry, c->list); c 526 drivers/acpi/resource.c c->count++; c 533 drivers/acpi/resource.c struct res_proc_context *c = context; c 538 drivers/acpi/resource.c if (c->preproc) { c 541 drivers/acpi/resource.c ret = c->preproc(ares, c->preproc_data); c 543 drivers/acpi/resource.c c->error = ret; c 556 drivers/acpi/resource.c return acpi_dev_new_resource_entry(&win, c); c 561 drivers/acpi/resource.c status = acpi_dev_new_resource_entry(&win, c); c 574 drivers/acpi/resource.c struct res_proc_context c; c 583 drivers/acpi/resource.c c.list = list; c 584 drivers/acpi/resource.c c.preproc = preproc; c 585 drivers/acpi/resource.c c.preproc_data = preproc_data; c 586 drivers/acpi/resource.c c.count = 0; c 587 drivers/acpi/resource.c c.error = 0; c 589 drivers/acpi/resource.c acpi_dev_process_resource, &c); c 592 drivers/acpi/resource.c return c.error ? c.error : -EIO; c 595 drivers/acpi/resource.c return c.count; c 109 drivers/ata/libahci_platform.c int c, rc; c 111 drivers/ata/libahci_platform.c for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) { c 112 drivers/ata/libahci_platform.c rc = clk_prepare_enable(hpriv->clks[c]); c 119 drivers/ata/libahci_platform.c while (--c >= 0) c 120 drivers/ata/libahci_platform.c clk_disable_unprepare(hpriv->clks[c]); c 134 drivers/ata/libahci_platform.c int c; c 136 drivers/ata/libahci_platform.c for (c = AHCI_MAX_CLKS - 1; c >= 0; c--) c 137 drivers/ata/libahci_platform.c if (hpriv->clks[c]) c 138 drivers/ata/libahci_platform.c clk_disable_unprepare(hpriv->clks[c]); c 288 drivers/ata/libahci_platform.c int c; c 295 drivers/ata/libahci_platform.c for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) c 296 drivers/ata/libahci_platform.c clk_put(hpriv->clks[c]); c 302 drivers/ata/libahci_platform.c for (c = 0; c < hpriv->nports; c++) c 303 drivers/ata/libahci_platform.c if (hpriv->target_pwrs && hpriv->target_pwrs[c]) c 304 drivers/ata/libahci_platform.c regulator_put(hpriv->target_pwrs[c]); c 1116 drivers/ata/libata-core.c unsigned int c; c 1121 drivers/ata/libata-core.c c = id[ofs] >> 8; c 1122 drivers/ata/libata-core.c *s = c; c 1125 drivers/ata/libata-core.c c = id[ofs] & 0xff; c 1126 drivers/ata/libata-core.c *s = c; c 75 drivers/ata/pata_mpc52xx.c #define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c))) c 226 drivers/ata/pata_octeon_cf.c int c; c 252 drivers/ata/pata_octeon_cf.c c = (cf_port->dma_base & 8) >> 3; c 255 drivers/ata/pata_octeon_cf.c dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1; c 1429 drivers/atm/ambassador.c amb_cq * c = &dev->cq; c 1431 drivers/atm/ambassador.c c->pending, c->high, c->maximum); c 149 drivers/atm/eni.c #define NEPMOK(a0,d,b,c) NEPJOK(a0,(a0+d) & (c-1),b) c 150 drivers/atm/eni.c #define EEPMOK(a0,d,b,c) EEPJOK(a0,(a0+d) & (c-1),b) c 606 drivers/atm/firestream.c static int c; c 607 drivers/atm/firestream.c if (!(c++ % 100)) c 734 drivers/atm/firestream.c static int c=0; c 736 drivers/atm/firestream.c if (!(c++ % 100)) { c 2363 drivers/atm/fore200e.c static void fore200e_monitor_putc(struct fore200e *fore200e, char c) c 2368 drivers/atm/fore200e.c printk("%c", c); c 2370 drivers/atm/fore200e.c fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); c 2378 drivers/atm/fore200e.c int c; c 2382 drivers/atm/fore200e.c c = (int) fore200e->bus->read(&monitor->soft_uart.recv); c 2384 drivers/atm/fore200e.c if (c & FORE200E_CP_MONITOR_UART_AVAIL) { c 2388 drivers/atm/fore200e.c printk("%c", c & 0xFF); c 2390 drivers/atm/fore200e.c return c & 0xFF; c 572 drivers/atm/horizon.c static int make_rate (const hrz_dev * dev, u32 c, rounding r, c 588 drivers/atm/horizon.c PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c, c 592 drivers/atm/horizon.c if (!c) { c 604 drivers/atm/horizon.c if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) { c 609 drivers/atm/horizon.c pre = DIV_ROUND_UP(br, c<<div); c 615 drivers/atm/horizon.c pre = DIV_ROUND_CLOSEST(br, c<<div); c 621 drivers/atm/horizon.c pre = br/(c<<div); c 634 drivers/atm/horizon.c if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) { c 642 drivers/atm/horizon.c pre = DIV_ROUND_UP(br, c<<div); c 645 drivers/atm/horizon.c pre = DIV_ROUND_CLOSEST(br, c<<div); c 648 drivers/atm/horizon.c pre = br/(c<<div); c 679 drivers/atm/horizon.c static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol, c 684 drivers/atm/horizon.c c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol); c 690 drivers/atm/horizon.c if (make_rate (dev, c, round_nearest, bit_pattern, actual)) c 694 drivers/atm/horizon.c if (c - tol <= *actual && *actual <= c + tol) c 699 drivers/atm/horizon.c return make_rate (dev, c, r, bit_pattern, actual); c 1406 drivers/atm/iphase.h u_short c = cmd; \ c 1410 drivers/atm/iphase.h NVRAM_CLKOUT((c & (1 << (CMD_LEN - 1))) ? 1 : 0); \ c 1411 drivers/atm/iphase.h c <<= 1; \ c 135 drivers/atm/midway.h #define MID_TX_PLACE(c) (0x10+4*(c)) c 148 drivers/atm/midway.h #define MID_TX_RDPTR(c) (0x11+4*(c)) c 156 drivers/atm/midway.h #define MID_TX_DESCRSTART(c) (0x12+4*(c)) c 327 drivers/atm/nicstar.c int i, c; c 329 drivers/atm/nicstar.c c = count; c 330 drivers/atm/nicstar.c c <<= 2; /* to use increments of 4 */ c 333 drivers/atm/nicstar.c for (i = 0; i <= c; i += 4) c 793 drivers/atm/zatm.c unsigned long i,m,c; c 802 drivers/atm/zatm.c c = 5; c 836 drivers/atm/zatm.c c = 20; /* @@@ should use max_cdv ! */ c 844 drivers/atm/zatm.c zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper)); c 259 drivers/auxdisplay/cfag12864b.c unsigned char c; c 272 drivers/auxdisplay/cfag12864b.c for (c = 0, b = 0; b < 8; b++) c 278 drivers/auxdisplay/cfag12864b.c c |= bit(b); c 279 drivers/auxdisplay/cfag12864b.c cfag12864b_writebyte(c); c 180 drivers/auxdisplay/charlcd.c static void charlcd_print(struct charlcd *lcd, char c) c 186 drivers/auxdisplay/charlcd.c c = lcd->char_conv[(unsigned char)c]; c 187 drivers/auxdisplay/charlcd.c lcd->ops->write_data(lcd, c); c 581 drivers/auxdisplay/charlcd.c static void charlcd_write_char(struct charlcd *lcd, char c) c 586 drivers/auxdisplay/charlcd.c if ((c != '\n') && priv->esc_seq.len >= 0) { c 588 drivers/auxdisplay/charlcd.c priv->esc_seq.buf[priv->esc_seq.len++] = c; c 594 drivers/auxdisplay/charlcd.c switch (c) { c 643 drivers/auxdisplay/charlcd.c charlcd_print(lcd, c); c 687 drivers/auxdisplay/charlcd.c char c; c 697 drivers/auxdisplay/charlcd.c if (get_user(c, tmp)) c 700 drivers/auxdisplay/charlcd.c charlcd_write_char(the_charlcd, c); c 323 drivers/base/arch_topology.c struct device_node *c; c 336 drivers/base/arch_topology.c c = of_get_child_by_name(cluster, name); c 337 drivers/base/arch_topology.c if (c) { c 339 drivers/base/arch_topology.c ret = parse_cluster(c, depth + 1); c 340 drivers/base/arch_topology.c of_node_put(c); c 345 drivers/base/arch_topology.c } while (c); c 351 drivers/base/arch_topology.c c = of_get_child_by_name(cluster, name); c 352 drivers/base/arch_topology.c if (c) { c 357 drivers/base/arch_topology.c c); c 358 drivers/base/arch_topology.c of_node_put(c); c 363 drivers/base/arch_topology.c ret = parse_core(c, package_id, core_id++); c 370 drivers/base/arch_topology.c of_node_put(c); c 375 drivers/base/arch_topology.c } while (c); c 165 drivers/base/component.c struct component *c; c 167 drivers/base/component.c list_for_each_entry(c, &component_list, node) { c 168 drivers/base/component.c if (c->master && c->master != master) c 171 drivers/base/component.c if (mc->compare && mc->compare(c->dev, mc->data)) c 172 drivers/base/component.c return c; c 175 drivers/base/component.c mc->compare_typed(c->dev, c->subcomponent, mc->data)) c 176 drivers/base/component.c return c; c 194 drivers/base/component.c struct component *c; c 201 drivers/base/component.c c = find_component(master, mc); c 202 drivers/base/component.c if (!c) { c 207 drivers/base/component.c dev_dbg(master->dev, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master); c 210 drivers/base/component.c match->compare[i].duplicate = !!c->master; c 211 drivers/base/component.c match->compare[i].component = c; c 212 drivers/base/component.c c->master = master; c 218 drivers/base/component.c static void remove_component(struct master *master, struct component *c) c 224 drivers/base/component.c if (master->match->compare[i].component == c) c 447 drivers/base/component.c struct component *c = match->compare[i].component; c 448 drivers/base/component.c if (c) c 449 drivers/base/component.c c->master = NULL; c 552 drivers/base/component.c struct component *c; c 564 drivers/base/component.c c = master->match->compare[i].component; c 565 drivers/base/component.c component_unbind(c, master, data); c 635 drivers/base/component.c struct component *c; c 648 drivers/base/component.c c = master->match->compare[i].component; c 649 drivers/base/component.c ret = component_bind(c, master, data); c 657 drivers/base/component.c c = master->match->compare[i - 1].component; c 658 drivers/base/component.c component_unbind(c, master, data); c 759 drivers/base/component.c struct component *c, *component = NULL; c 762 drivers/base/component.c list_for_each_entry(c, &component_list, node) c 763 drivers/base/component.c if (c->dev == dev && c->ops == ops) { c 764 drivers/base/component.c list_del(&c->node); c 765 drivers/base/component.c component = c; c 3275 drivers/base/core.c char c; c 3278 drivers/base/core.c c = 'b'; c 3280 drivers/base/core.c c = 'c'; c 3284 drivers/base/core.c c, MAJOR(dev->devt), MINOR(dev->devt)); c 107 drivers/base/node.c struct node_access_nodes *c, *cnext; c 109 drivers/base/node.c list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { c 110 drivers/base/node.c list_del(&c->list_node); c 111 drivers/base/node.c device_unregister(&c->dev); c 187 drivers/base/node.c struct node_access_nodes *c; c 195 drivers/base/node.c c = node_init_node_access(node, access); c 196 drivers/base/node.c if (!c) c 199 drivers/base/node.c c->hmem_attrs = *hmem_attrs; c 201 drivers/base/node.c if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i], c 367 drivers/base/power/clock_ops.c struct pm_clock_entry *ce, *c; c 377 drivers/base/power/clock_ops.c list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) c 384 drivers/base/power/clock_ops.c list_for_each_entry_safe_reverse(ce, c, &list, node) { c 183 drivers/base/power/qos.c struct pm_qos_constraints *c; c 196 drivers/base/power/qos.c c = &qos->resume_latency; c 197 drivers/base/power/qos.c plist_head_init(&c->list); c 198 drivers/base/power/qos.c c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c 199 drivers/base/power/qos.c c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c 200 drivers/base/power/qos.c c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; c 201 drivers/base/power/qos.c c->type = PM_QOS_MIN; c 202 drivers/base/power/qos.c c->notifiers = n; c 205 drivers/base/power/qos.c c = &qos->latency_tolerance; c 206 drivers/base/power/qos.c plist_head_init(&c->list); c 207 drivers/base/power/qos.c c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; c 208 drivers/base/power/qos.c c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; c 209 drivers/base/power/qos.c c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; c 210 drivers/base/power/qos.c c->type = PM_QOS_MIN; c 234 drivers/base/power/qos.c struct pm_qos_constraints *c; c 256 drivers/base/power/qos.c c = &qos->resume_latency; c 257 drivers/base/power/qos.c plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { c 266 drivers/base/power/qos.c c = &qos->latency_tolerance; c 267 drivers/base/power/qos.c plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { c 140 drivers/base/power/trace.c unsigned char c; c 141 drivers/base/power/trace.c while ((c = *data++) != 0) { c 142 drivers/base/power/trace.c seed = (seed << 16) + (seed << 6) - seed + c; c 70 drivers/base/regmap/regmap-debugfs.c struct regmap_debugfs_off_cache *c; c 73 drivers/base/regmap/regmap-debugfs.c c = list_first_entry(&map->debugfs_off_cache, c 76 drivers/base/regmap/regmap-debugfs.c list_del(&c->list); c 77 drivers/base/regmap/regmap-debugfs.c kfree(c); c 101 drivers/base/regmap/regmap-debugfs.c struct regmap_debugfs_off_cache *c = NULL; c 121 drivers/base/regmap/regmap-debugfs.c if (c) { c 122 drivers/base/regmap/regmap-debugfs.c c->max = p - 1; c 123 drivers/base/regmap/regmap-debugfs.c c->max_reg = i - map->reg_stride; c 124 drivers/base/regmap/regmap-debugfs.c list_add_tail(&c->list, c 126 drivers/base/regmap/regmap-debugfs.c c = NULL; c 133 drivers/base/regmap/regmap-debugfs.c if (!c) { c 134 drivers/base/regmap/regmap-debugfs.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 135 drivers/base/regmap/regmap-debugfs.c if (!c) { c 140 drivers/base/regmap/regmap-debugfs.c c->min = p; c 141 drivers/base/regmap/regmap-debugfs.c c->base_reg = i; c 149 drivers/base/regmap/regmap-debugfs.c if (c) { c 150 drivers/base/regmap/regmap-debugfs.c c->max = p - 1; c 151 drivers/base/regmap/regmap-debugfs.c c->max_reg = i - map->reg_stride; c 152 drivers/base/regmap/regmap-debugfs.c list_add_tail(&c->list, c 165 drivers/base/regmap/regmap-debugfs.c list_for_each_entry(c, &map->debugfs_off_cache, list) { c 166 drivers/base/regmap/regmap-debugfs.c if (from >= c->min && from <= c->max) { c 167 drivers/base/regmap/regmap-debugfs.c fpos_offset = from - c->min; c 169 drivers/base/regmap/regmap-debugfs.c *pos = c->min + (reg_offset * map->debugfs_tot_len); c 171 drivers/base/regmap/regmap-debugfs.c return c->base_reg + (reg_offset * map->reg_stride); c 174 drivers/base/regmap/regmap-debugfs.c *pos = c->max; c 175 drivers/base/regmap/regmap-debugfs.c ret = c->max_reg; c 196 drivers/base/regmap/regmap-debugfs.c struct regmap_debugfs_off_cache *c; c 203 drivers/base/regmap/regmap-debugfs.c list_for_each_entry(c, &map->debugfs_off_cache, list) { c 204 drivers/base/regmap/regmap-debugfs.c if (reg > c->max_reg) c 206 drivers/base/regmap/regmap-debugfs.c if (reg < c->base_reg) { c 207 drivers/base/regmap/regmap-debugfs.c ret = c->base_reg; c 363 drivers/base/regmap/regmap-debugfs.c struct regmap_debugfs_off_cache *c; c 396 drivers/base/regmap/regmap-debugfs.c list_for_each_entry(c, &map->debugfs_off_cache, list) { c 398 drivers/base/regmap/regmap-debugfs.c c->base_reg, c->max_reg); c 534 drivers/base/swnode.c struct swnode *c = to_swnode(child); c 537 drivers/base/swnode.c (c && list_is_last(&c->entry, &p->children))) c 540 drivers/base/swnode.c if (c) c 541 drivers/base/swnode.c c = list_next_entry(c, entry); c 543 drivers/base/swnode.c c = list_first_entry(&p->children, struct swnode, entry); c 544 drivers/base/swnode.c return &c->fwnode; c 1091 drivers/block/amiflop.c register unsigned char *CRCT1, *CRCT2, *data, c, crch, crcl; c 1099 drivers/block/amiflop.c c = (*data++) ^ crch; c 1100 drivers/block/amiflop.c crch = CRCT1[c] ^ crcl; c 1101 drivers/block/amiflop.c crcl = CRCT2[c]; c 119 drivers/block/aoe/aoeblk.c char c; c 134 drivers/block/aoe/aoeblk.c c = '\t'; c 147 drivers/block/aoe/aoeblk.c seq_printf(s, "%c%s", c, ifp->nd->name); c 148 drivers/block/aoe/aoeblk.c c = ','; c 796 drivers/block/drbd/drbd_actlog.c unsigned long c; c 804 drivers/block/drbd/drbd_actlog.c c = drbd_bm_count_bits(device, sbnr, tbnr); c 806 drivers/block/drbd/drbd_actlog.c c = drbd_bm_clear_bits(device, sbnr, tbnr); c 808 drivers/block/drbd/drbd_actlog.c c = drbd_bm_set_bits(device, sbnr, tbnr); c 810 drivers/block/drbd/drbd_actlog.c if (c) { c 812 drivers/block/drbd/drbd_actlog.c cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode); c 814 drivers/block/drbd/drbd_actlog.c count += c; c 584 drivers/block/drbd/drbd_bitmap.c static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) c 606 drivers/block/drbd/drbd_bitmap.c memset(bm, c, do_now * sizeof(long)); c 1358 drivers/block/drbd/drbd_bitmap.c int c = 0; c 1371 drivers/block/drbd/drbd_bitmap.c if (c < 0) c 1373 drivers/block/drbd/drbd_bitmap.c else if (c > 0) c 1375 drivers/block/drbd/drbd_bitmap.c changed_total += c; c 1376 drivers/block/drbd/drbd_bitmap.c c = 0; c 1381 drivers/block/drbd/drbd_bitmap.c c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); c 1383 drivers/block/drbd/drbd_bitmap.c c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); c 1387 drivers/block/drbd/drbd_bitmap.c if (c < 0) c 1389 drivers/block/drbd/drbd_bitmap.c else if (c > 0) c 1391 drivers/block/drbd/drbd_bitmap.c changed_total += c; c 1405 drivers/block/drbd/drbd_bitmap.c int c = 0; c 1416 drivers/block/drbd/drbd_bitmap.c c = __bm_change_bits_to(device, s, e, val); c 1419 drivers/block/drbd/drbd_bitmap.c return c; c 1584 drivers/block/drbd/drbd_bitmap.c int c = 0; c 1607 drivers/block/drbd/drbd_bitmap.c c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); c 1614 drivers/block/drbd/drbd_bitmap.c return c; c 204 drivers/block/drbd/drbd_int.h const char *direction, struct bm_xfer_ctx *c); c 206 drivers/block/drbd/drbd_int.h static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) c 216 drivers/block/drbd/drbd_int.h c->word_offset = c->bit_offset >> 6; c 218 drivers/block/drbd/drbd_int.h c->word_offset = c->bit_offset >> 5; c 219 drivers/block/drbd/drbd_int.h c->word_offset &= ~(1UL); c 792 drivers/block/drbd/drbd_int.h #define update_worker_timing_details(c, cb) \ c 793 drivers/block/drbd/drbd_int.h __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ ) c 794 drivers/block/drbd/drbd_int.h #define update_receiver_timing_details(c, cb) \ c 795 drivers/block/drbd/drbd_int.h __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ ) c 1111 drivers/block/drbd/drbd_main.c struct bm_xfer_ctx *c) c 1128 drivers/block/drbd/drbd_main.c if (c->bit_offset >= c->bm_bits) c 1145 drivers/block/drbd/drbd_main.c tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset) c 1146 drivers/block/drbd/drbd_main.c : _drbd_bm_find_next(device, c->bit_offset); c 1148 drivers/block/drbd/drbd_main.c tmp = c->bm_bits; c 1149 drivers/block/drbd/drbd_main.c rl = tmp - c->bit_offset; c 1167 drivers/block/drbd/drbd_main.c "t:%u bo:%lu\n", toggle, c->bit_offset); c 1181 drivers/block/drbd/drbd_main.c c->bit_offset = tmp; c 1182 drivers/block/drbd/drbd_main.c } while (c->bit_offset < c->bm_bits); c 1189 drivers/block/drbd/drbd_main.c c->bit_offset -= plain_bits; c 1190 drivers/block/drbd/drbd_main.c bm_xfer_ctx_bit_to_word_offset(c); c 1191 drivers/block/drbd/drbd_main.c c->bit_offset = c->word_offset * BITS_PER_LONG; c 1197 drivers/block/drbd/drbd_main.c bm_xfer_ctx_bit_to_word_offset(c); c 1212 drivers/block/drbd/drbd_main.c send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) c 1220 drivers/block/drbd/drbd_main.c DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c); c 1229 drivers/block/drbd/drbd_main.c c->packets[0]++; c 1230 drivers/block/drbd/drbd_main.c c->bytes[0] += header_size + sizeof(*p) + len; c 1232 drivers/block/drbd/drbd_main.c if (c->bit_offset >= c->bm_bits) c 1243 drivers/block/drbd/drbd_main.c c->bm_words - c->word_offset); c 1246 drivers/block/drbd/drbd_main.c drbd_bm_get_lel(device, c->word_offset, num_words, p); c 1248 drivers/block/drbd/drbd_main.c c->word_offset += num_words; c 1249 drivers/block/drbd/drbd_main.c c->bit_offset = c->word_offset * BITS_PER_LONG; c 1251 drivers/block/drbd/drbd_main.c c->packets[1]++; c 1252 drivers/block/drbd/drbd_main.c c->bytes[1] += header_size + len; c 1254 drivers/block/drbd/drbd_main.c if (c->bit_offset > c->bm_bits) c 1255 drivers/block/drbd/drbd_main.c c->bit_offset = c->bm_bits; c 1259 drivers/block/drbd/drbd_main.c INFO_bm_xfer_stats(device, "send", c); c 1270 drivers/block/drbd/drbd_main.c struct bm_xfer_ctx c; c 1293 drivers/block/drbd/drbd_main.c c = (struct bm_xfer_ctx) { c 1299 drivers/block/drbd/drbd_main.c err = send_bitmap_rle_or_plain(device, &c); c 4680 drivers/block/drbd/drbd_receiver.c unsigned long *p, struct bm_xfer_ctx *c) c 4685 drivers/block/drbd/drbd_receiver.c c->bm_words - c->word_offset); c 4699 drivers/block/drbd/drbd_receiver.c drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p); c 4701 drivers/block/drbd/drbd_receiver.c c->word_offset += num_words; c 4702 drivers/block/drbd/drbd_receiver.c c->bit_offset = c->word_offset * BITS_PER_LONG; c 4703 drivers/block/drbd/drbd_receiver.c if (c->bit_offset > c->bm_bits) c 4704 drivers/block/drbd/drbd_receiver.c c->bit_offset = c->bm_bits; c 4733 drivers/block/drbd/drbd_receiver.c struct bm_xfer_ctx *c, c 4740 drivers/block/drbd/drbd_receiver.c unsigned long s = c->bit_offset; c 4759 drivers/block/drbd/drbd_receiver.c if (e >= c->bm_bits) { c 4787 drivers/block/drbd/drbd_receiver.c c->bit_offset = s; c 4788 drivers/block/drbd/drbd_receiver.c bm_xfer_ctx_bit_to_word_offset(c); c 4790 drivers/block/drbd/drbd_receiver.c return (s != c->bm_bits); c 4802 drivers/block/drbd/drbd_receiver.c struct bm_xfer_ctx *c, c 4806 drivers/block/drbd/drbd_receiver.c return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p)); c 4818 drivers/block/drbd/drbd_receiver.c const char *direction, struct bm_xfer_ctx *c) c 4824 drivers/block/drbd/drbd_receiver.c header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) + c 4825 drivers/block/drbd/drbd_receiver.c c->bm_words * sizeof(unsigned long); c 4826 drivers/block/drbd/drbd_receiver.c unsigned int total = c->bytes[0] + c->bytes[1]; c 4848 drivers/block/drbd/drbd_receiver.c c->bytes[1], c->packets[1], c 4849 drivers/block/drbd/drbd_receiver.c c->bytes[0], c->packets[0], c 4865 drivers/block/drbd/drbd_receiver.c struct bm_xfer_ctx c; c 4877 drivers/block/drbd/drbd_receiver.c c = (struct bm_xfer_ctx) { c 4884 drivers/block/drbd/drbd_receiver.c err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c); c 4903 drivers/block/drbd/drbd_receiver.c err = decode_bitmap_c(peer_device, p, &c, pi->size); c 4910 drivers/block/drbd/drbd_receiver.c c.packets[pi->cmd == P_BITMAP]++; c 4911 drivers/block/drbd/drbd_receiver.c c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size; c 4923 drivers/block/drbd/drbd_receiver.c INFO_bm_xfer_stats(device, "receive", &c); c 72 drivers/block/paride/aten.c { int k, a, b, c, d; c 80 drivers/block/paride/aten.c w2(0); d = r1(); w0(0x48); c = r1(); c 81 drivers/block/paride/aten.c buf[2*k] = j44(c,d); c 66 drivers/block/paride/fit2.c { int k, a, b, c, d; c 74 drivers/block/paride/fit2.c w0(3); c = r1(); w0(2); d = r1(); c 76 drivers/block/paride/fit2.c buf[4*k+1] = j44(d,c); c 80 drivers/block/paride/fit2.c w0(1); c = r1(); w0(0); d = r1(); c 81 drivers/block/paride/fit2.c buf[4*k+2] = j44(d,c); c 97 drivers/block/paride/fit3.c { int k, a, b, c, d; c 105 drivers/block/paride/fit3.c w2(0xc); c = r1(); c 108 drivers/block/paride/fit3.c buf[2*k+1] = j44(c,d); c 488 drivers/block/paride/pcd.c int r, c; c 496 drivers/block/paride/pcd.c c = 2; c 501 drivers/block/paride/pcd.c c = buf[2] & 0xf; c 503 drivers/block/paride/pcd.c c | ((buf[12] & 0xff) << 8) | ((buf[13] & 0xff) << 16); c 505 drivers/block/paride/pcd.c if ((c == 2) || (c == 6)) c 779 drivers/block/paride/pf.c static int pf_start(struct pf_unit *pf, int cmd, int b, int c) c 789 drivers/block/paride/pf.c io_cmd[8] = c & 0xff; c 790 drivers/block/paride/pf.c io_cmd[7] = (c >> 8) & 0xff; c 792 drivers/block/paride/pf.c i = pf_command(pf, io_cmd, c * 512, "start i/o"); c 428 drivers/block/paride/pg.c char c = *buf++; c 429 drivers/block/paride/pg.c if (c != ' ' && c != l) c 430 drivers/block/paride/pg.c l = *targ++ = c; c 353 drivers/block/pktcdvd.c static ssize_t device_map_show(struct class *c, struct class_attribute *attr, c 374 drivers/block/pktcdvd.c static ssize_t add_store(struct class *c, struct class_attribute *attr, c 395 drivers/block/pktcdvd.c static ssize_t remove_store(struct class *c, struct class_attribute *attr, c 320 drivers/block/ps3disk.c unsigned int c; c 323 drivers/block/ps3disk.c c = id[ofs] >> 8; c 324 drivers/block/ps3disk.c *s = c; c 327 drivers/block/ps3disk.c c = id[ofs] & 0xff; c 328 drivers/block/ps3disk.c *s = c; c 879 drivers/block/rbd.c static int parse_rbd_opts_token(char *c, void *private) c 885 drivers/block/rbd.c token = match_token(c, rbd_opts_tokens, argstr); c 889 drivers/block/rbd.c pr_err("bad option arg (not int) at '%s'\n", c); c 117 drivers/bluetooth/hci_bcsp.c static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c) c 122 drivers/bluetooth/hci_bcsp.c switch (c) { c 130 drivers/bluetooth/hci_bcsp.c skb_put_data(skb, &c, 1); c 71 drivers/bluetooth/hci_h5.c int (*rx_func)(struct hci_uart *hu, u8 c); c 406 drivers/bluetooth/hci_h5.c static int h5_rx_crc(struct hci_uart *hu, unsigned char c) c 413 drivers/bluetooth/hci_h5.c static int h5_rx_payload(struct hci_uart *hu, unsigned char c) c 428 drivers/bluetooth/hci_h5.c static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c) c 464 drivers/bluetooth/hci_h5.c static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c) c 468 drivers/bluetooth/hci_h5.c if (c == SLIP_DELIMITER) c 486 drivers/bluetooth/hci_h5.c static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c) c 490 drivers/bluetooth/hci_h5.c if (c == SLIP_DELIMITER) c 496 drivers/bluetooth/hci_h5.c static void h5_unslip_one_byte(struct h5 *h5, unsigned char c) c 499 drivers/bluetooth/hci_h5.c const u8 *byte = &c; c 501 drivers/bluetooth/hci_h5.c if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) { c 507 drivers/bluetooth/hci_h5.c switch (c) { c 515 drivers/bluetooth/hci_h5.c BT_ERR("Invalid esc byte 0x%02hhx", c); c 616 drivers/bluetooth/hci_h5.c static void h5_slip_one_byte(struct sk_buff *skb, u8 c) c 621 drivers/bluetooth/hci_h5.c switch (c) { c 629 drivers/bluetooth/hci_h5.c skb_put_data(skb, &c, 1); c 20 drivers/bus/omap-ocp2scp.c static int ocp2scp_remove_devices(struct device *dev, void *c) c 156 drivers/cdrom/gdrom.c int c; c 171 drivers/cdrom/gdrom.c for (c = 0; c < 40; c++) c 172 drivers/cdrom/gdrom.c data[c] = __raw_readw(GDROM_DATA_REG); c 366 drivers/char/applicom.c int c; c 402 drivers/char/applicom.c for (c = 0; c < sizeof(struct st_ram_io);) { c 404 drivers/char/applicom.c printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]); c 406 drivers/char/applicom.c for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) { c 407 drivers/char/applicom.c printk(" %2.2X", ((unsigned char *) &st_loc)[c]); c 413 drivers/char/applicom.c for (c = 0; c < sizeof(struct mailbox);) { c 414 drivers/char/applicom.c printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]); c 416 drivers/char/applicom.c for (c++; c % 8 && c < sizeof(struct mailbox); c++) { c 417 drivers/char/applicom.c printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]); c 468 drivers/char/applicom.c int c; c 470 drivers/char/applicom.c for (c = 0; c < sizeof(struct mailbox); c++) c 491 drivers/char/applicom.c int c; c 499 drivers/char/applicom.c int c; c 501 drivers/char/applicom.c for (c = 0; c < sizeof(struct mailbox); c++) c 517 drivers/char/applicom.c for (c = 0; c < sizeof(struct st_ram_io);) { c 518 drivers/char/applicom.c printk("\n%5.5X: %2.2X", c, ((unsigned char *)st_loc)[c]); c 520 drivers/char/applicom.c for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) { c 521 drivers/char/applicom.c printk(" %2.2X", ((unsigned char *)st_loc)[c]); c 527 drivers/char/applicom.c for (c = 0; c < sizeof(struct mailbox);) { c 528 drivers/char/applicom.c printk("\n%5.5X: %2.2X", c, ((unsigned char *)mailbox)[c]); c 530 drivers/char/applicom.c for (c++; c % 8 && c < sizeof(struct mailbox); c++) { c 531 drivers/char/applicom.c printk(" %2.2X", ((unsigned char *)mailbox)[c]); c 131 drivers/char/hw_random/via-rng.c struct cpuinfo_x86 *c = &cpu_data(0); c 138 drivers/char/hw_random/via-rng.c if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ c 165 drivers/char/hw_random/via-rng.c if ((c->x86_model == 9) && (c->x86_stepping > 7)) c 169 drivers/char/hw_random/via-rng.c if (c->x86_model >= 10) c 143 drivers/char/ipmi/bt-bmc.c static void bt_write(struct bt_bmc *bt_bmc, u8 c) c 145 drivers/char/ipmi/bt-bmc.c bt_outb(bt_bmc, c, BT_BMC2HOST); c 303 drivers/char/ipmi/ipmi_msghandler.c struct ipmi_channel c[IPMI_MAX_CHANNELS]; c 1957 drivers/char/ipmi/ipmi_msghandler.c chans = READ_ONCE(intf->channel_list)->c; c 2096 drivers/char/ipmi/ipmi_msghandler.c chans = READ_ONCE(intf->channel_list)->c; c 3271 drivers/char/ipmi/ipmi_msghandler.c intf->wchannels[set].c[0].medium c 3273 drivers/char/ipmi/ipmi_msghandler.c intf->wchannels[set].c[0].protocol c 3288 drivers/char/ipmi/ipmi_msghandler.c chans = intf->wchannels[set].c; c 3357 drivers/char/ipmi/ipmi_msghandler.c intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; c 3358 drivers/char/ipmi/ipmi_msghandler.c intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; c 4265 drivers/char/ipmi/ipmi_msghandler.c chans = READ_ONCE(intf->channel_list)->c; c 4358 drivers/char/ipmi/ipmi_msghandler.c chans = READ_ONCE(intf->channel_list)->c; c 760 drivers/char/ipmi/ipmi_watchdog.c char c; c 762 drivers/char/ipmi/ipmi_watchdog.c if (get_user(c, buf + i)) c 764 drivers/char/ipmi/ipmi_watchdog.c if (c == 'V') c 654 drivers/char/mem.c char c; c 656 drivers/char/mem.c if (__get_user(c, tmp)) { c 661 drivers/char/mem.c outb(c, i); c 103 drivers/char/misc.c struct miscdevice *c; c 109 drivers/char/misc.c list_for_each_entry(c, &misc_list, list) { c 110 drivers/char/misc.c if (c->minor == minor) { c 111 drivers/char/misc.c new_fops = fops_get(c->fops); c 121 drivers/char/misc.c list_for_each_entry(c, &misc_list, list) { c 122 drivers/char/misc.c if (c->minor == minor) { c 123 drivers/char/misc.c new_fops = fops_get(c->fops); c 136 drivers/char/misc.c file->private_data = c; c 193 drivers/char/misc.c struct miscdevice *c; c 195 drivers/char/misc.c list_for_each_entry(c, &misc_list, list) { c 196 drivers/char/misc.c if (c->minor == misc->minor) { c 257 drivers/char/misc.c struct miscdevice *c = dev_get_drvdata(dev); c 259 drivers/char/misc.c if (mode && c->mode) c 260 drivers/char/misc.c *mode = c->mode; c 261 drivers/char/misc.c if (c->nodename) c 262 drivers/char/misc.c return kstrdup(c->nodename, GFP_KERNEL); c 52 drivers/char/nsc_gpio.c char c; c 53 drivers/char/nsc_gpio.c if (get_user(c, data + i)) c 55 drivers/char/nsc_gpio.c switch (c) { c 95 drivers/char/nsc_gpio.c m, (int)c); c 92 drivers/char/nvram.c unsigned char c; c 95 drivers/char/nvram.c c = __nvram_read_byte(i); c 97 drivers/char/nvram.c return c; c 101 drivers/char/nvram.c static void __nvram_write_byte(unsigned char c, int i) c 103 drivers/char/nvram.c CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); c 106 drivers/char/nvram.c static void pc_nvram_write_byte(unsigned char c, int i) c 111 drivers/char/nvram.c __nvram_write_byte(c, i); c 535 drivers/char/nwflash.c char c, c1; c 536 drivers/char/nwflash.c if (__get_user(c, buf)) c 539 drivers/char/nwflash.c if ((c1 = *pWritePtr++) != c) { c 541 drivers/char/nwflash.c pWritePtr - FLASH_BASE, c1, c); c 1001 drivers/char/pcmcia/synclink_cs.c int c; c 1022 drivers/char/pcmcia/synclink_cs.c c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get))); c 1024 drivers/char/pcmcia/synclink_cs.c if (c == 1) { c 1030 drivers/char/pcmcia/synclink_cs.c info->tx_count -= c; c 1031 drivers/char/pcmcia/synclink_cs.c info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1); c 1032 drivers/char/pcmcia/synclink_cs.c fifo_count -= c; c 1557 drivers/char/pcmcia/synclink_cs.c int c, ret = 0; c 1581 drivers/char/pcmcia/synclink_cs.c c = min(count, c 1584 drivers/char/pcmcia/synclink_cs.c if (c <= 0) c 1587 drivers/char/pcmcia/synclink_cs.c memcpy(info->tx_buf + info->tx_put, buf, c); c 1590 drivers/char/pcmcia/synclink_cs.c info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1); c 1591 drivers/char/pcmcia/synclink_cs.c info->tx_count += c; c 1594 drivers/char/pcmcia/synclink_cs.c buf += c; c 1595 drivers/char/pcmcia/synclink_cs.c count -= c; c 1596 drivers/char/pcmcia/synclink_cs.c ret += c; c 668 drivers/char/random.c __u32 c = f->pool[2], d = f->pool[3]; c 670 drivers/char/random.c a += b; c += d; c 672 drivers/char/random.c d ^= a; b ^= c; c 674 drivers/char/random.c a += b; c += d; c 676 drivers/char/random.c d ^= a; b ^= c; c 678 drivers/char/random.c a += b; c += d; c 680 drivers/char/random.c d ^= a; b ^= c; c 682 drivers/char/random.c a += b; c += d; c 684 drivers/char/random.c d ^= a; b ^= c; c 687 drivers/char/random.c f->pool[2] = c; f->pool[3] = d; c 902 drivers/char/sonypi.c unsigned char c; c 914 drivers/char/sonypi.c (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c), c 915 drivers/char/sonypi.c &sonypi_device.fifo_lock) == sizeof(c))) { c 916 drivers/char/sonypi.c if (put_user(c, buf++)) c 189 drivers/char/tb0219.c char c; c 204 drivers/char/tb0219.c if (get_user(c, data + i)) c 209 drivers/char/tb0219.c retval = set_led(c); c 212 drivers/char/tb0219.c retval = set_gpio_output_pin(minor - 32, c); c 207 drivers/char/tpm/tpm.h __be32 c; c 371 drivers/char/tpm/tpm1-cmd.c timeout_chip[2] = be32_to_cpu(cap.timeout.c); c 123 drivers/clk/analogbits/wrpll-cln28hpc.c static u8 __wrpll_calc_fbdiv(const struct wrpll_cfg *c) c 125 drivers/clk/analogbits/wrpll-cln28hpc.c return (c->flags & WRPLL_FLAGS_INT_FEEDBACK_MASK) ? 2 : 1; c 183 drivers/clk/analogbits/wrpll-cln28hpc.c static int __wrpll_update_parent_rate(struct wrpll_cfg *c, c 191 drivers/clk/analogbits/wrpll-cln28hpc.c c->parent_rate = parent_rate; c 193 drivers/clk/analogbits/wrpll-cln28hpc.c c->max_r = min_t(u8, MAX_DIVR_DIVISOR, max_r_for_parent); c 195 drivers/clk/analogbits/wrpll-cln28hpc.c c->init_r = DIV_ROUND_UP_ULL(parent_rate, MAX_POST_DIVR_FREQ); c 221 drivers/clk/analogbits/wrpll-cln28hpc.c int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate, c 230 drivers/clk/analogbits/wrpll-cln28hpc.c if (c->flags == 0) { c 236 drivers/clk/analogbits/wrpll-cln28hpc.c if (parent_rate != c->parent_rate) { c 237 drivers/clk/analogbits/wrpll-cln28hpc.c if (__wrpll_update_parent_rate(c, parent_rate)) { c 244 drivers/clk/analogbits/wrpll-cln28hpc.c c->flags &= ~WRPLL_FLAGS_RESET_MASK; c 248 drivers/clk/analogbits/wrpll-cln28hpc.c c->flags |= WRPLL_FLAGS_BYPASS_MASK; c 252 drivers/clk/analogbits/wrpll-cln28hpc.c c->flags &= ~WRPLL_FLAGS_BYPASS_MASK; c 258 drivers/clk/analogbits/wrpll-cln28hpc.c c->divq = divq; c 263 drivers/clk/analogbits/wrpll-cln28hpc.c fbdiv = __wrpll_calc_fbdiv(c); c 272 drivers/clk/analogbits/wrpll-cln28hpc.c for (r = c->init_r; r <= c->max_r; ++r) { c 298 drivers/clk/analogbits/wrpll-cln28hpc.c c->divr = best_r - 1; c 299 drivers/clk/analogbits/wrpll-cln28hpc.c c->divf = best_f - 1; c 307 drivers/clk/analogbits/wrpll-cln28hpc.c c->range = range; c 330 drivers/clk/analogbits/wrpll-cln28hpc.c unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c, c 336 drivers/clk/analogbits/wrpll-cln28hpc.c if (c->flags & WRPLL_FLAGS_EXT_FEEDBACK_MASK) { c 341 drivers/clk/analogbits/wrpll-cln28hpc.c fbdiv = __wrpll_calc_fbdiv(c); c 342 drivers/clk/analogbits/wrpll-cln28hpc.c n = parent_rate * fbdiv * (c->divf + 1); c 343 drivers/clk/analogbits/wrpll-cln28hpc.c n = div_u64(n, c->divr + 1); c 344 drivers/clk/analogbits/wrpll-cln28hpc.c n >>= c->divq; c 361 drivers/clk/analogbits/wrpll-cln28hpc.c unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c) c 26 drivers/clk/clk-bd718x7.c struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw); c 28 drivers/clk/clk-bd718x7.c return regmap_update_bits(c->mfd->regmap, c->reg, c->mask, status); c 34 drivers/clk/clk-bd718x7.c struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw); c 38 drivers/clk/clk-bd718x7.c dev_dbg(&c->pdev->dev, "Failed to disable 32K clk (%d)\n", rv); c 50 drivers/clk/clk-bd718x7.c struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw); c 52 drivers/clk/clk-bd718x7.c rval = regmap_read(c->mfd->regmap, c->reg, &enabled); c 57 drivers/clk/clk-bd718x7.c return enabled & c->mask; c 68 drivers/clk/clk-bd718x7.c struct bd718xx_clk *c; c 78 drivers/clk/clk-bd718x7.c c = devm_kzalloc(&pdev->dev, sizeof(*c), GFP_KERNEL); c 79 drivers/clk/clk-bd718x7.c if (!c) c 93 drivers/clk/clk-bd718x7.c c->reg = BD718XX_REG_OUT32K; c 94 drivers/clk/clk-bd718x7.c c->mask = BD718XX_OUT32K_EN; c 97 drivers/clk/clk-bd718x7.c c->reg = BD70528_REG_CLK_OUT; c 98 drivers/clk/clk-bd718x7.c c->mask = BD70528_CLK_OUT_EN_MASK; c 104 drivers/clk/clk-bd718x7.c c->mfd = mfd; c 105 drivers/clk/clk-bd718x7.c c->pdev = pdev; c 106 drivers/clk/clk-bd718x7.c c->hw.init = &init; c 111 drivers/clk/clk-bd718x7.c rval = devm_clk_hw_register(&pdev->dev, &c->hw); c 117 drivers/clk/clk-bd718x7.c &c->hw); c 121 drivers/clk/clk-devres.c struct clk **c = res; c 122 drivers/clk/clk-devres.c if (!c || !*c) { c 123 drivers/clk/clk-devres.c WARN_ON(!c || !*c); c 126 drivers/clk/clk-devres.c return *c == data; c 450 drivers/clk/clk-si5351.c unsigned long rfrac, denom, a, b, c; c 474 drivers/clk/clk-si5351.c c = 1; c 477 drivers/clk/clk-si5351.c SI5351_PLL_B_MAX, SI5351_PLL_C_MAX, &b, &c); c 480 drivers/clk/clk-si5351.c hwdata->params.p3 = c; c 481 drivers/clk/clk-si5351.c hwdata->params.p2 = (128 * b) % c; c 483 drivers/clk/clk-si5351.c hwdata->params.p1 += (128 * b / c); c 489 drivers/clk/clk-si5351.c do_div(lltmp, c); c 496 drivers/clk/clk-si5351.c __func__, clk_hw_get_name(hw), a, b, c, c 649 drivers/clk/clk-si5351.c unsigned long a, b, c; c 680 drivers/clk/clk-si5351.c c = 1; c 692 drivers/clk/clk-si5351.c c = 1; c 717 drivers/clk/clk-si5351.c c = 1; c 721 drivers/clk/clk-si5351.c &b, &c); c 726 drivers/clk/clk-si5351.c lltmp *= c; c 727 drivers/clk/clk-si5351.c do_div(lltmp, a * c + b); c 740 drivers/clk/clk-si5351.c hwdata->params.p3 = c; c 741 drivers/clk/clk-si5351.c hwdata->params.p2 = (128 * b) % c; c 743 drivers/clk/clk-si5351.c hwdata->params.p1 += (128 * b / c); c 749 drivers/clk/clk-si5351.c __func__, clk_hw_get_name(hw), a, b, c, divby4, c 2890 drivers/clk/clk.c static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, c 2897 drivers/clk/clk.c 30 - level * 3, c->name, c 2898 drivers/clk/clk.c c->enable_count, c->prepare_count, c->protect_count, c 2899 drivers/clk/clk.c clk_core_get_rate(c), clk_core_get_accuracy(c)); c 2901 drivers/clk/clk.c phase = clk_core_get_phase(c); c 2907 drivers/clk/clk.c seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000)); c 2910 drivers/clk/clk.c static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, c 2915 drivers/clk/clk.c clk_summary_show_one(s, c, level); c 2917 drivers/clk/clk.c hlist_for_each_entry(child, &c->children, child_node) c 2923 drivers/clk/clk.c struct clk_core *c; c 2933 drivers/clk/clk.c hlist_for_each_entry(c, *lists, child_node) c 2934 drivers/clk/clk.c clk_summary_show_subtree(s, c, 0); c 2942 drivers/clk/clk.c static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) c 2947 drivers/clk/clk.c clk_core_get_boundaries(c, &min_rate, &max_rate); c 2950 drivers/clk/clk.c seq_printf(s, "\"%s\": { ", c->name); c 2951 drivers/clk/clk.c seq_printf(s, "\"enable_count\": %d,", c->enable_count); c 2952 drivers/clk/clk.c seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); c 2953 drivers/clk/clk.c seq_printf(s, "\"protect_count\": %d,", c->protect_count); c 2954 drivers/clk/clk.c seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); c 2957 drivers/clk/clk.c seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); c 2958 drivers/clk/clk.c phase = clk_core_get_phase(c); c 2962 drivers/clk/clk.c clk_core_get_scaled_duty_cycle(c, 100000)); c 2965 drivers/clk/clk.c static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) c 2969 drivers/clk/clk.c clk_dump_one(s, c, level); c 2971 drivers/clk/clk.c hlist_for_each_entry(child, &c->children, child_node) { c 2981 drivers/clk/clk.c struct clk_core *c; c 2989 drivers/clk/clk.c hlist_for_each_entry(c, *lists, child_node) { c 2993 drivers/clk/clk.c clk_dump_subtree(s, c, 0); c 4034 drivers/clk/clk.c struct clk *c = res; c 4035 drivers/clk/clk.c if (WARN_ON(!c)) c 4037 drivers/clk/clk.c return c == data; c 25 drivers/clk/davinci/psc.h #define LPSC_CLKDEV(c, d) { \ c 26 drivers/clk/davinci/psc.h .con_id = (c), \ c 30 drivers/clk/davinci/psc.h #define LPSC_CLKDEV1(n, c, d) \ c 32 drivers/clk/davinci/psc.h LPSC_CLKDEV((c), (d)), \ c 69 drivers/clk/davinci/psc.h #define LPSC(m, d, n, p, c, f) \ c 73 drivers/clk/davinci/psc.h .cdevs = (c), \ c 69 drivers/clk/mediatek/clk-pll.c u8 c = 0; c 79 drivers/clk/mediatek/clk-pll.c c = 1; c 83 drivers/clk/mediatek/clk-pll.c if (c) c 193 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0); c 229 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0); c 240 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0); c 280 drivers/clk/meson/axg-audio.c static AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1); c 291 drivers/clk/meson/axg-audio.c static AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0); c 302 drivers/clk/meson/axg-audio.c static AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1); c 140 drivers/clk/qcom/clk-rpmh.c static inline bool has_state_changed(struct clk_rpmh *c, u32 state) c 142 drivers/clk/qcom/clk-rpmh.c return (c->last_sent_aggr_state & BIT(state)) c 143 drivers/clk/qcom/clk-rpmh.c != (c->aggr_state & BIT(state)); c 146 drivers/clk/qcom/clk-rpmh.c static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) c 153 drivers/clk/qcom/clk-rpmh.c cmd.addr = c->res_addr; c 154 drivers/clk/qcom/clk-rpmh.c cmd_state = c->aggr_state; c 155 drivers/clk/qcom/clk-rpmh.c on_val = c->res_on_val; c 158 drivers/clk/qcom/clk-rpmh.c if (has_state_changed(c, state)) { c 162 drivers/clk/qcom/clk-rpmh.c ret = rpmh_write_async(c->dev, state, &cmd, 1); c 164 drivers/clk/qcom/clk-rpmh.c dev_err(c->dev, "set %s state of %s failed: (%d)\n", c 167 drivers/clk/qcom/clk-rpmh.c "wake" : "active", c->res_name, ret); c 173 drivers/clk/qcom/clk-rpmh.c c->last_sent_aggr_state = c->aggr_state; c 174 drivers/clk/qcom/clk-rpmh.c c->peer->last_sent_aggr_state = c->last_sent_aggr_state; c 182 drivers/clk/qcom/clk-rpmh.c static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c, c 188 drivers/clk/qcom/clk-rpmh.c if (enable == c->state) c 191 drivers/clk/qcom/clk-rpmh.c c->state = enable ? c->valid_state_mask : 0; c 192 drivers/clk/qcom/clk-rpmh.c c->aggr_state = c->state | c->peer->state; c 193 drivers/clk/qcom/clk-rpmh.c c->peer->aggr_state = c->aggr_state; c 195 drivers/clk/qcom/clk-rpmh.c ret = clk_rpmh_send_aggregate_command(c); c 200 drivers/clk/qcom/clk-rpmh.c c->state = 0; c 202 drivers/clk/qcom/clk-rpmh.c c->state = c->valid_state_mask; c 204 drivers/clk/qcom/clk-rpmh.c WARN(1, "clk: %s failed to %s\n", c->res_name, c 211 drivers/clk/qcom/clk-rpmh.c struct clk_rpmh *c = to_clk_rpmh(hw); c 215 drivers/clk/qcom/clk-rpmh.c ret = clk_rpmh_aggregate_state_send_command(c, true); c 223 drivers/clk/qcom/clk-rpmh.c struct clk_rpmh *c = to_clk_rpmh(hw); c 226 drivers/clk/qcom/clk-rpmh.c clk_rpmh_aggregate_state_send_command(c, false); c 247 drivers/clk/qcom/clk-rpmh.c static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) c 258 drivers/clk/qcom/clk-rpmh.c if (c->aggr_state) c 259 drivers/clk/qcom/clk-rpmh.c cmd_state = c->aggr_state; c 262 drivers/clk/qcom/clk-rpmh.c if (c->last_sent_aggr_state == cmd_state) { c 267 drivers/clk/qcom/clk-rpmh.c cmd.addr = c->res_addr; c 270 drivers/clk/qcom/clk-rpmh.c ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1); c 272 drivers/clk/qcom/clk-rpmh.c dev_err(c->dev, "set active state of %s failed: (%d)\n", c 273 drivers/clk/qcom/clk-rpmh.c c->res_name, ret); c 278 drivers/clk/qcom/clk-rpmh.c c->last_sent_aggr_state = cmd_state; c 287 drivers/clk/qcom/clk-rpmh.c struct clk_rpmh *c = to_clk_rpmh(hw); c 289 drivers/clk/qcom/clk-rpmh.c return clk_rpmh_bcm_send_cmd(c, true); c 294 drivers/clk/qcom/clk-rpmh.c struct clk_rpmh *c = to_clk_rpmh(hw); c 296 drivers/clk/qcom/clk-rpmh.c clk_rpmh_bcm_send_cmd(c, false); c 302 drivers/clk/qcom/clk-rpmh.c struct clk_rpmh *c = to_clk_rpmh(hw); c 304 drivers/clk/qcom/clk-rpmh.c c->aggr_state = rate / c->unit; c 310 drivers/clk/qcom/clk-rpmh.c clk_rpmh_bcm_send_cmd(c, true); c 324 drivers/clk/qcom/clk-rpmh.c struct clk_rpmh *c = to_clk_rpmh(hw); c 326 drivers/clk/qcom/clk-rpmh.c return c->aggr_state * c->unit; c 164 drivers/clk/renesas/clk-r8a73a4.c struct div4_clk *c; c 166 drivers/clk/renesas/clk-r8a73a4.c for (c = div4_clks; c->name; c++) { c 167 drivers/clk/renesas/clk-r8a73a4.c if (!strcmp(name, c->name)) c 170 drivers/clk/renesas/clk-r8a73a4.c if (!c->name) c 175 drivers/clk/renesas/clk-r8a73a4.c reg = c->reg; c 176 drivers/clk/renesas/clk-r8a73a4.c shift = c->shift; c 121 drivers/clk/renesas/clk-r8a7740.c struct div4_clk *c; c 122 drivers/clk/renesas/clk-r8a7740.c for (c = div4_clks; c->name; c++) { c 123 drivers/clk/renesas/clk-r8a7740.c if (!strcmp(name, c->name)) { c 126 drivers/clk/renesas/clk-r8a7740.c reg = c->reg; c 127 drivers/clk/renesas/clk-r8a7740.c shift = c->shift; c 131 drivers/clk/renesas/clk-r8a7740.c if (!c->name) c 136 drivers/clk/renesas/clk-sh73a0.c const struct div4_clk *c; c 138 drivers/clk/renesas/clk-sh73a0.c for (c = div4_clks; c->name; c++) { c 139 drivers/clk/renesas/clk-sh73a0.c if (!strcmp(name, c->name)) { c 140 drivers/clk/renesas/clk-sh73a0.c parent_name = c->parent; c 142 drivers/clk/renesas/clk-sh73a0.c reg = c->reg; c 143 drivers/clk/renesas/clk-sh73a0.c shift = c->shift; c 148 drivers/clk/renesas/clk-sh73a0.c if (!c->name) c 162 drivers/clk/sifive/fu540-prci.c struct wrpll_cfg c; c 234 drivers/clk/sifive/fu540-prci.c static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r) c 240 drivers/clk/sifive/fu540-prci.c c->divr = v; c 244 drivers/clk/sifive/fu540-prci.c c->divf = v; c 248 drivers/clk/sifive/fu540-prci.c c->divq = v; c 252 drivers/clk/sifive/fu540-prci.c c->range = v; c 254 drivers/clk/sifive/fu540-prci.c c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK | c 258 drivers/clk/sifive/fu540-prci.c c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK; c 276 drivers/clk/sifive/fu540-prci.c static u32 __prci_wrpll_pack(const struct wrpll_cfg *c) c 280 drivers/clk/sifive/fu540-prci.c r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT; c 281 drivers/clk/sifive/fu540-prci.c r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT; c 282 drivers/clk/sifive/fu540-prci.c r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT; c 283 drivers/clk/sifive/fu540-prci.c r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT; c 306 drivers/clk/sifive/fu540-prci.c __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs)); c 325 drivers/clk/sifive/fu540-prci.c struct wrpll_cfg *c) c 327 drivers/clk/sifive/fu540-prci.c __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd); c 329 drivers/clk/sifive/fu540-prci.c memcpy(&pwd->c, c, sizeof(*c)); c 387 drivers/clk/sifive/fu540-prci.c return wrpll_calc_output_rate(&pwd->c, parent_rate); c 396 drivers/clk/sifive/fu540-prci.c struct wrpll_cfg c; c 398 drivers/clk/sifive/fu540-prci.c memcpy(&c, &pwd->c, sizeof(c)); c 400 drivers/clk/sifive/fu540-prci.c wrpll_configure_for_rate(&c, rate, *parent_rate); c 402 drivers/clk/sifive/fu540-prci.c return wrpll_calc_output_rate(&c, *parent_rate); c 414 drivers/clk/sifive/fu540-prci.c r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate); c 421 drivers/clk/sifive/fu540-prci.c __prci_wrpll_write_cfg(pd, pwd, &pwd->c); c 423 drivers/clk/sifive/fu540-prci.c udelay(wrpll_calc_max_lock_us(&pwd->c)); c 169 drivers/clk/ti/adpll.c const struct ti_adpll_platform_data *c; c 474 drivers/clk/ti/adpll.c if (d->c->is_type_s) { c 512 drivers/clk/ti/adpll.c if (d->c->output_index < 0) c 517 drivers/clk/ti/adpll.c init.name = ti_adpll_clk_get_name(d, d->c->output_index, postfix); c 522 drivers/clk/ti/adpll.c init.num_parents = d->c->nr_max_inputs; c 527 drivers/clk/ti/adpll.c if (d->c->is_type_s) c 544 drivers/clk/ti/adpll.c return ti_adpll_setup_clock(d, clock, TI_ADPLL_DCO, d->c->output_index, c 651 drivers/clk/ti/adpll.c if (!d->c->is_type_s) c 725 drivers/clk/ti/adpll.c if (d->c->is_type_s) c 805 drivers/clk/ti/adpll.c if (d->c->is_type_s) { c 822 drivers/clk/ti/adpll.c if (nr_inputs < d->c->nr_max_inputs) { c 842 drivers/clk/ti/adpll.c if (d->c->is_type_s) { c 896 drivers/clk/ti/adpll.c d->c = pdata; c 88 drivers/clk/ti/autoidle.c struct clk_hw_omap *c = to_clk_hw_omap(hw); c 90 drivers/clk/ti/autoidle.c return _omap2_clk_deny_idle(c); c 107 drivers/clk/ti/autoidle.c struct clk_hw_omap *c = to_clk_hw_omap(hw); c 109 drivers/clk/ti/autoidle.c return _omap2_clk_allow_idle(c); c 151 drivers/clk/ti/autoidle.c struct clk_ti_autoidle *c; c 153 drivers/clk/ti/autoidle.c list_for_each_entry(c, &autoidle_clks, node) c 154 drivers/clk/ti/autoidle.c _allow_autoidle(c); c 165 drivers/clk/ti/autoidle.c struct clk_ti_autoidle *c; c 167 drivers/clk/ti/autoidle.c list_for_each_entry(c, &autoidle_clks, node) c 168 drivers/clk/ti/autoidle.c _deny_autoidle(c); c 133 drivers/clk/ti/clk.c struct ti_dt_clk *c; c 149 drivers/clk/ti/clk.c for (c = oclks; c->node_name != NULL; c++) { c 150 drivers/clk/ti/clk.c strcpy(buf, c->node_name); c 159 drivers/clk/ti/clk.c c->node_name); c 184 drivers/clk/ti/clk.c c->node_name, i, tags[i]); c 192 drivers/clk/ti/clk.c c->lk.clk = clk; c 193 drivers/clk/ti/clk.c clkdev_add(&c->lk); c 212 drivers/clk/ti/clk.c c->node_name, PTR_ERR(clk)); c 452 drivers/clk/ti/clkctrl.c char *c; c 566 drivers/clk/ti/clkctrl.c c = provider->clkdm_name; c 568 drivers/clk/ti/clkctrl.c while (*c) { c 569 drivers/clk/ti/clkctrl.c if (*c == '-') c 570 drivers/clk/ti/clkctrl.c *c = '_'; c 571 drivers/clk/ti/clkctrl.c c++; c 15 drivers/clocksource/mmio.c static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c) c 17 drivers/clocksource/mmio.c return container_of(c, struct clocksource_mmio, clksrc); c 20 drivers/clocksource/mmio.c u64 clocksource_mmio_readl_up(struct clocksource *c) c 22 drivers/clocksource/mmio.c return (u64)readl_relaxed(to_mmio_clksrc(c)->reg); c 25 drivers/clocksource/mmio.c u64 clocksource_mmio_readl_down(struct clocksource *c) c 27 drivers/clocksource/mmio.c return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; c 30 drivers/clocksource/mmio.c u64 clocksource_mmio_readw_up(struct clocksource *c) c 32 drivers/clocksource/mmio.c return (u64)readw_relaxed(to_mmio_clksrc(c)->reg); c 35 drivers/clocksource/mmio.c u64 clocksource_mmio_readw_down(struct clocksource *c) c 37 drivers/clocksource/mmio.c return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; c 44 drivers/clocksource/mps2-timer.c static inline struct clockevent_mps2 *to_mps2_clkevt(struct clock_event_device *c) c 46 drivers/clocksource/mps2-timer.c return container_of(c, struct clockevent_mps2, clkevt); c 49 drivers/clocksource/mps2-timer.c static void clockevent_mps2_writel(u32 val, struct clock_event_device *c, u32 offset) c 51 drivers/clocksource/mps2-timer.c writel_relaxed(val, to_mps2_clkevt(c)->reg + offset); c 200 drivers/clocksource/mxs_timer.c unsigned int c = clk_get_rate(timer_clk); c 203 drivers/clocksource/mxs_timer.c clocksource_register_hz(&clocksource_mxs, c); c 206 drivers/clocksource/mxs_timer.c "mxs_timer", c, 200, 32, clocksource_mmio_readl_down); c 207 drivers/clocksource/mxs_timer.c sched_clock_register(mxs_read_sched_clock_v2, 32, c); c 307 drivers/clocksource/samsung_pwm_timer.c static u64 notrace samsung_clocksource_read(struct clocksource *c) c 37 drivers/clocksource/timer-atcpit100.c #define CH_INT_EN(c, i) ((1<<i)<<(4*c)) c 20 drivers/clocksource/timer-fsl-ftm.c #define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT) c 155 drivers/clocksource/timer-imx-gpt.c unsigned int c = clk_get_rate(imxtm->clk_per); c 160 drivers/clocksource/timer-imx-gpt.c imx_delay_timer.freq = c; c 166 drivers/clocksource/timer-imx-gpt.c sched_clock_register(mxc_read_sched_clock, 32, c); c 167 drivers/clocksource/timer-imx-gpt.c return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32, c 88 drivers/clocksource/timer-ixp4xx.c static u64 ixp4xx_clocksource_read(struct clocksource *c) c 105 drivers/clocksource/timer-mp-csky.c static u64 clksrc_read(struct clocksource *c) c 68 drivers/clocksource/timer-pxa.c struct clock_event_device *c = dev_id; c 73 drivers/clocksource/timer-pxa.c c->event_handler(c); c 228 drivers/clocksource/timer-ti-dm.c int c; c 234 drivers/clocksource/timer-ti-dm.c c = timer->get_context_loss_count(&timer->pdev->dev); c 235 drivers/clocksource/timer-ti-dm.c if (c != timer->ctx_loss_count) { c 237 drivers/clocksource/timer-ti-dm.c timer->ctx_loss_count = c; c 609 drivers/cpufreq/acpi-cpufreq.c static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) c 616 drivers/cpufreq/acpi-cpufreq.c if (c->x86_vendor == X86_VENDOR_INTEL) { c 617 drivers/cpufreq/acpi-cpufreq.c if ((c->x86 == 15) && c 618 drivers/cpufreq/acpi-cpufreq.c (c->x86_model == 6) && c 619 drivers/cpufreq/acpi-cpufreq.c (c->x86_stepping == 8)) { c 635 drivers/cpufreq/acpi-cpufreq.c struct cpuinfo_x86 *c = &cpu_data(policy->cpu); c 647 drivers/cpufreq/acpi-cpufreq.c blacklisted = acpi_cpufreq_blacklist(c); c 665 drivers/cpufreq/acpi-cpufreq.c if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) c 45 drivers/cpufreq/bmips-cpufreq.c #define BMIPS(c, t, m, f) { \ c 46 drivers/cpufreq/bmips-cpufreq.c .compatible = c, \ c 183 drivers/cpufreq/e_powersaver.c struct cpuinfo_x86 *c = &cpu_data(0); c 197 drivers/cpufreq/e_powersaver.c switch (c->x86_model) { c 146 drivers/cpufreq/elanfreq.c struct cpuinfo_x86 *c = &cpu_data(0); c 150 drivers/cpufreq/elanfreq.c if ((c->x86_vendor != X86_VENDOR_AMD) || c 151 drivers/cpufreq/elanfreq.c (c->x86 != 4) || (c->x86_model != 10)) c 762 drivers/cpufreq/longhaul.c struct cpuinfo_x86 *c = &cpu_data(0); c 768 drivers/cpufreq/longhaul.c switch (c->x86_model) { c 778 drivers/cpufreq/longhaul.c switch (c->x86_stepping) { c 790 drivers/cpufreq/longhaul.c if (c->x86_stepping < 8) { c 817 drivers/cpufreq/longhaul.c switch (c->x86_stepping) { c 920 drivers/cpufreq/longhaul.c struct cpuinfo_x86 *c = &cpu_data(0); c 941 drivers/cpufreq/longhaul.c switch (c->x86_model) { c 166 drivers/cpufreq/longrun.c struct cpuinfo_x86 *c = &cpu_data(0); c 171 drivers/cpufreq/longrun.c if (cpu_has(c, X86_FEATURE_LRTI)) { c 118 drivers/cpufreq/p4-clockmod.c static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) c 120 drivers/cpufreq/p4-clockmod.c if (c->x86 == 0x06) { c 121 drivers/cpufreq/p4-clockmod.c if (cpu_has(c, X86_FEATURE_EST)) c 123 drivers/cpufreq/p4-clockmod.c switch (c->x86_model) { c 138 drivers/cpufreq/p4-clockmod.c if (c->x86 != 0xF) c 157 drivers/cpufreq/p4-clockmod.c struct cpuinfo_x86 *c = &cpu_data(policy->cpu); c 166 drivers/cpufreq/p4-clockmod.c cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; c 177 drivers/cpufreq/p4-clockmod.c c->x86_model < 2) { c 183 drivers/cpufreq/p4-clockmod.c stock_freq = cpufreq_p4_get_frequency(c); c 119 drivers/cpufreq/powernow-k7.c struct cpuinfo_x86 *c = &cpu_data(0); c 134 drivers/cpufreq/powernow-k7.c if ((c->x86_model == 6) && (c->x86_stepping == 0)) { c 92 drivers/cpufreq/s3c24xx-cpufreq.c static inline int closer(unsigned int target, unsigned int n, unsigned int c) c 94 drivers/cpufreq/s3c24xx-cpufreq.c int diff_cur = abs(target - c); c 73 drivers/cpufreq/sc520_freq.c struct cpuinfo_x86 *c = &cpu_data(0); c 76 drivers/cpufreq/sc520_freq.c if (c->x86_vendor != X86_VENDOR_AMD || c 77 drivers/cpufreq/sc520_freq.c c->x86 != 4 || c->x86_model != 9) c 71 drivers/cpufreq/speedstep-centrino.c static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, c 276 drivers/cpufreq/speedstep-centrino.c static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, c 279 drivers/cpufreq/speedstep-centrino.c if ((c->x86 == x->x86) && c 280 drivers/cpufreq/speedstep-centrino.c (c->x86_model == x->x86_model) && c 281 drivers/cpufreq/speedstep-centrino.c (c->x86_stepping == x->x86_stepping)) c 175 drivers/cpufreq/speedstep-lib.c struct cpuinfo_x86 *c = &boot_cpu_data; c 186 drivers/cpufreq/speedstep-lib.c if (c->x86_model < 2) c 256 drivers/cpufreq/speedstep-lib.c struct cpuinfo_x86 *c = &cpu_data(0); c 259 drivers/cpufreq/speedstep-lib.c pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); c 261 drivers/cpufreq/speedstep-lib.c if ((c->x86_vendor != X86_VENDOR_INTEL) || c 262 drivers/cpufreq/speedstep-lib.c ((c->x86 != 6) && (c->x86 != 0xF))) c 265 drivers/cpufreq/speedstep-lib.c if (c->x86 == 0xF) { c 268 drivers/cpufreq/speedstep-lib.c if (c->x86_model != 2) c 274 drivers/cpufreq/speedstep-lib.c pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); c 276 drivers/cpufreq/speedstep-lib.c switch (c->x86_stepping) { c 314 drivers/cpufreq/speedstep-lib.c (strstr(c->x86_model_id, c 324 drivers/cpufreq/speedstep-lib.c switch (c->x86_model) { c 363 drivers/cpufreq/speedstep-lib.c if (c->x86_stepping == 0x01) { c 32 drivers/crypto/amcc/crypto4xx_alg.c u32 hdr_proc, u32 h, u32 c, u32 pad_type, c 42 drivers/crypto/amcc/crypto4xx_alg.c sa->sa_command_0.bf.cipher_alg = c; c 156 drivers/crypto/bcm/cipher.c sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, c 170 drivers/crypto/bcm/cipher.c sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN); c 401 drivers/crypto/bcm/cipher.c cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak; c 531 drivers/crypto/bcm/cipher.c packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak, c 159 drivers/crypto/bcm/cipher.h } c; c 462 drivers/crypto/caam/pdb.h u8 *c; c 475 drivers/crypto/caam/pdb.h u8 *c; c 98 drivers/crypto/cavium/cpt/cpt_common.h #define CPTX_PF_VFX_MBOXX(a, b, c) \ c 99 drivers/crypto/cavium/cpt/cpt_common.h (0x8001000ll + ((u64)(a) << 36) + ((b) << 20) + ((c) << 8)) c 117 drivers/crypto/cavium/cpt/cpt_common.h #define CPTX_VFX_PF_MBOXX(a, b, c) \ c 118 drivers/crypto/cavium/cpt/cpt_common.h (0x1000ll + ((u64)(a) << 36) + ((b) << 20) + ((c) << 3)) c 993 drivers/crypto/chelsio/chcr_algo.c u32 c, prev; c 998 drivers/crypto/chelsio/chcr_algo.c c = prev + add; c 999 drivers/crypto/chelsio/chcr_algo.c *b = cpu_to_be32(c); c 1000 drivers/crypto/chelsio/chcr_algo.c if (prev < c) c 1010 drivers/crypto/chelsio/chcr_algo.c u64 c; c 1014 drivers/crypto/chelsio/chcr_algo.c c = (u64)temp + 1; // No of block can processed withou overflow c 1015 drivers/crypto/chelsio/chcr_algo.c if ((bytes / AES_BLOCK_SIZE) > c) c 1016 drivers/crypto/chelsio/chcr_algo.c bytes = c * AES_BLOCK_SIZE; c 964 drivers/crypto/n2_core.c struct n2_crypto_chunk *c, *tmp; c 970 drivers/crypto/n2_core.c list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { c 971 drivers/crypto/n2_core.c list_del(&c->entry); c 972 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) c 973 drivers/crypto/n2_core.c kfree(c); c 983 drivers/crypto/n2_core.c struct n2_crypto_chunk *c, *tmp; c 997 drivers/crypto/n2_core.c list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { c 998 drivers/crypto/n2_core.c err = __n2_crypt_chunk(tfm, c, qp, encrypt); c 1001 drivers/crypto/n2_core.c list_del(&c->entry); c 1002 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) c 1003 drivers/crypto/n2_core.c kfree(c); c 1036 drivers/crypto/n2_core.c struct n2_crypto_chunk *c, *tmp; c 1054 drivers/crypto/n2_core.c list_for_each_entry_safe(c, tmp, &rctx->chunk_list, c 1056 drivers/crypto/n2_core.c c->iv_paddr = iv_paddr; c 1057 drivers/crypto/n2_core.c err = __n2_crypt_chunk(tfm, c, qp, true); c 1060 drivers/crypto/n2_core.c iv_paddr = c->dest_final - rctx->walk.blocksize; c 1061 drivers/crypto/n2_core.c list_del(&c->entry); c 1062 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) c 1063 drivers/crypto/n2_core.c kfree(c); c 1067 drivers/crypto/n2_core.c list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, c 1069 drivers/crypto/n2_core.c if (c == &rctx->chunk) { c 1079 drivers/crypto/n2_core.c pa = (c->arr[c->arr_len-1].src_paddr + c 1080 drivers/crypto/n2_core.c c->arr[c->arr_len-1].src_len - c 1086 drivers/crypto/n2_core.c c->iv_paddr = iv_paddr; c 1087 drivers/crypto/n2_core.c err = __n2_crypt_chunk(tfm, c, qp, false); c 1090 drivers/crypto/n2_core.c list_del(&c->entry); c 1091 drivers/crypto/n2_core.c if (unlikely(c != &rctx->chunk)) c 1092 drivers/crypto/n2_core.c kfree(c); c 134 drivers/crypto/nx/nx-842.c static void check_constraints(struct nx842_constraints *c) c 137 drivers/crypto/nx/nx-842.c if (c->maximum > BOUNCE_BUFFER_SIZE) c 138 drivers/crypto/nx/nx-842.c c->maximum = BOUNCE_BUFFER_SIZE; c 161 drivers/crypto/nx/nx-842.c struct nx842_constraints *c, c 174 drivers/crypto/nx/nx-842.c if (p->oremain == 0 || hdrsize + c->minimum > dlen) c 177 drivers/crypto/nx/nx-842.c if (slen % c->multiple) c 178 drivers/crypto/nx/nx-842.c adj_slen = round_up(slen, c->multiple); c 179 drivers/crypto/nx/nx-842.c if (slen < c->minimum) c 180 drivers/crypto/nx/nx-842.c adj_slen = c->minimum; c 181 drivers/crypto/nx/nx-842.c if (slen > c->maximum) c 182 drivers/crypto/nx/nx-842.c adj_slen = slen = c->maximum; c 183 drivers/crypto/nx/nx-842.c if (adj_slen > slen || (u64)src % c->alignment) { c 197 drivers/crypto/nx/nx-842.c if ((u64)dst % c->alignment) { c 198 drivers/crypto/nx/nx-842.c dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); c 202 drivers/crypto/nx/nx-842.c if (dlen % c->multiple) c 203 drivers/crypto/nx/nx-842.c dlen = round_down(dlen, c->multiple); c 204 drivers/crypto/nx/nx-842.c if (dlen < c->minimum) { c 208 drivers/crypto/nx/nx-842.c dlen = round_down(dlen, c->multiple); c 212 drivers/crypto/nx/nx-842.c if (dlen > c->maximum) c 213 drivers/crypto/nx/nx-842.c dlen = c->maximum; c 256 drivers/crypto/nx/nx-842.c struct nx842_constraints c = *ctx->driver->constraints; c 262 drivers/crypto/nx/nx-842.c check_constraints(&c); c 273 drivers/crypto/nx/nx-842.c DIV_ROUND_UP(p.iremain, c.maximum)); c 279 drivers/crypto/nx/nx-842.c add_header = (p.iremain % c.multiple || c 280 drivers/crypto/nx/nx-842.c p.iremain < c.minimum || c 281 drivers/crypto/nx/nx-842.c p.iremain > c.maximum || c 282 drivers/crypto/nx/nx-842.c (u64)p.in % c.alignment || c 283 drivers/crypto/nx/nx-842.c p.oremain % c.multiple || c 284 drivers/crypto/nx/nx-842.c p.oremain < c.minimum || c 285 drivers/crypto/nx/nx-842.c p.oremain > c.maximum || c 286 drivers/crypto/nx/nx-842.c (u64)p.out % c.alignment); c 304 drivers/crypto/nx/nx-842.c ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); c 338 drivers/crypto/nx/nx-842.c struct nx842_constraints *c, c 361 drivers/crypto/nx/nx-842.c if (slen % c->multiple) c 362 drivers/crypto/nx/nx-842.c adj_slen = round_up(slen, c->multiple); c 363 drivers/crypto/nx/nx-842.c if (slen < c->minimum) c 364 drivers/crypto/nx/nx-842.c adj_slen = c->minimum; c 365 drivers/crypto/nx/nx-842.c if (slen > c->maximum) c 367 drivers/crypto/nx/nx-842.c if (slen < adj_slen || (u64)src % c->alignment) { c 381 drivers/crypto/nx/nx-842.c if (dlen % c->multiple) c 382 drivers/crypto/nx/nx-842.c dlen = round_down(dlen, c->multiple); c 383 drivers/crypto/nx/nx-842.c if (dlen < required_len || (u64)dst % c->alignment) { c 388 drivers/crypto/nx/nx-842.c if (dlen < c->minimum) c 390 drivers/crypto/nx/nx-842.c if (dlen > c->maximum) c 391 drivers/crypto/nx/nx-842.c dlen = c->maximum; c 439 drivers/crypto/nx/nx-842.c struct nx842_constraints c = *ctx->driver->constraints; c 443 drivers/crypto/nx/nx-842.c check_constraints(&c); c 467 drivers/crypto/nx/nx-842.c ret = decompress(ctx, &p, &g, &c, 0); c 500 drivers/crypto/nx/nx-842.c ret = decompress(ctx, &p, &hdr->group[n], &c, ignore); c 104 drivers/crypto/nx/nx_csbcpb.h #define NX_CPB_FDM(c) ((c)->cpb.hdr.fdm) c 105 drivers/crypto/nx/nx_csbcpb.h #define NX_CPB_KS_DS(c) ((c)->cpb.hdr.ks_ds) c 107 drivers/crypto/nx/nx_csbcpb.h #define NX_CPB_KEY_SIZE(c) (NX_CPB_KS_DS(c) >> 4) c 108 drivers/crypto/nx/nx_csbcpb.h #define NX_CPB_SET_KEY_SIZE(c, x) NX_CPB_KS_DS(c) |= ((x) << 4) c 109 drivers/crypto/nx/nx_csbcpb.h #define NX_CPB_SET_DIGEST_SIZE(c, x) NX_CPB_KS_DS(c) |= (x) c 499 drivers/crypto/padlock-aes.c struct cpuinfo_x86 *c = &cpu_data(0); c 520 drivers/crypto/padlock-aes.c if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { c 507 drivers/crypto/padlock-sha.c struct cpuinfo_x86 *c = &cpu_data(0); c 516 drivers/crypto/padlock-sha.c if (c->x86_model < 0x0f) { c 546 drivers/crypto/padlock-sha.c struct cpuinfo_x86 *c = &cpu_data(0); c 548 drivers/crypto/padlock-sha.c if (c->x86_model >= 0x0f) { c 75 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_addr_t c; c 80 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_addr_t c; c 94 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_addr_t c; c 576 drivers/crypto/qat/qat_common/qat_asym_algs.c req->out.rsa.enc.c); c 578 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, c 751 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), c 755 drivers/crypto/qat/qat_common/qat_asym_algs.c if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) c 760 drivers/crypto/qat/qat_common/qat_asym_algs.c &qat_req->out.rsa.enc.c, c 774 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c, c 804 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->out.rsa.enc.c); c 806 drivers/crypto/qat/qat_common/qat_asym_algs.c if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) c 807 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_unmap_single(dev, qat_req->out.rsa.enc.c, c 876 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), c 878 drivers/crypto/qat/qat_common/qat_asym_algs.c if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) c 885 drivers/crypto/qat/qat_common/qat_asym_algs.c &qat_req->in.rsa.dec.c, c 916 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c, c 964 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->in.rsa.dec.c); c 966 drivers/crypto/qat/qat_common/qat_asym_algs.c if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) c 967 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_unmap_single(dev, qat_req->in.rsa.dec.c, c 413 drivers/crypto/stm32/stm32-cryp.c static void stm32_cryp_hw_write_key(struct stm32_cryp *c) c 418 drivers/crypto/stm32/stm32-cryp.c if (is_des(c)) { c 419 drivers/crypto/stm32/stm32-cryp.c stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0])); c 420 drivers/crypto/stm32/stm32-cryp.c stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1])); c 423 drivers/crypto/stm32/stm32-cryp.c for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) c 424 drivers/crypto/stm32/stm32-cryp.c stm32_cryp_write(c, r_id, c 425 drivers/crypto/stm32/stm32-cryp.c cpu_to_be32(c->ctx->key[i - 1])); c 88 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c; c 177 drivers/dma/bcm2835-dma.c static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c) c 180 drivers/dma/bcm2835-dma.c return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN; c 195 drivers/dma/bcm2835-dma.c static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) c 197 drivers/dma/bcm2835-dma.c return container_of(c, struct bcm2835_chan, vc.chan); c 211 drivers/dma/bcm2835-dma.c dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, c 266 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c, c 273 drivers/dma/bcm2835-dma.c size_t plength = bcm2835_dma_max_frame_length(c); c 308 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 323 drivers/dma/bcm2835-dma.c d->c = c; c 333 drivers/dma/bcm2835-dma.c cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp, c 349 drivers/dma/bcm2835-dma.c c, control_block, c 392 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 398 drivers/dma/bcm2835-dma.c max_len = bcm2835_dma_max_frame_length(c); c 412 drivers/dma/bcm2835-dma.c static void bcm2835_dma_abort(struct bcm2835_chan *c) c 414 drivers/dma/bcm2835-dma.c void __iomem *chan_base = c->chan_base; c 434 drivers/dma/bcm2835-dma.c dev_err(c->vc.chan.device->dev, c 440 drivers/dma/bcm2835-dma.c static void bcm2835_dma_start_desc(struct bcm2835_chan *c) c 442 drivers/dma/bcm2835-dma.c struct virt_dma_desc *vd = vchan_next_desc(&c->vc); c 446 drivers/dma/bcm2835-dma.c c->desc = NULL; c 452 drivers/dma/bcm2835-dma.c c->desc = d = to_bcm2835_dma_desc(&vd->tx); c 454 drivers/dma/bcm2835-dma.c writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); c 455 drivers/dma/bcm2835-dma.c writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); c 460 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = data; c 465 drivers/dma/bcm2835-dma.c if (c->irq_flags & IRQF_SHARED) { c 467 drivers/dma/bcm2835-dma.c flags = readl(c->chan_base + BCM2835_DMA_CS); c 473 drivers/dma/bcm2835-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 483 drivers/dma/bcm2835-dma.c c->chan_base + BCM2835_DMA_CS); c 485 drivers/dma/bcm2835-dma.c d = c->desc; c 491 drivers/dma/bcm2835-dma.c } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { c 492 drivers/dma/bcm2835-dma.c vchan_cookie_complete(&c->desc->vd); c 493 drivers/dma/bcm2835-dma.c bcm2835_dma_start_desc(c); c 497 drivers/dma/bcm2835-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 504 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 505 drivers/dma/bcm2835-dma.c struct device *dev = c->vc.chan.device->dev; c 507 drivers/dma/bcm2835-dma.c dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); c 513 drivers/dma/bcm2835-dma.c c->cb_pool = dma_pool_create(dev_name(dev), dev, c 515 drivers/dma/bcm2835-dma.c if (!c->cb_pool) { c 520 drivers/dma/bcm2835-dma.c return request_irq(c->irq_number, bcm2835_dma_callback, c 521 drivers/dma/bcm2835-dma.c c->irq_flags, "DMA IRQ", c); c 526 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 528 drivers/dma/bcm2835-dma.c vchan_free_chan_resources(&c->vc); c 529 drivers/dma/bcm2835-dma.c free_irq(c->irq_number, c); c 530 drivers/dma/bcm2835-dma.c dma_pool_destroy(c->cb_pool); c 532 drivers/dma/bcm2835-dma.c dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); c 567 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 576 drivers/dma/bcm2835-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 577 drivers/dma/bcm2835-dma.c vd = vchan_find_desc(&c->vc, cookie); c 581 drivers/dma/bcm2835-dma.c } else if (c->desc && c->desc->vd.tx.cookie == cookie) { c 582 drivers/dma/bcm2835-dma.c struct bcm2835_desc *d = c->desc; c 586 drivers/dma/bcm2835-dma.c pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); c 588 drivers/dma/bcm2835-dma.c pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); c 597 drivers/dma/bcm2835-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 604 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 607 drivers/dma/bcm2835-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 608 drivers/dma/bcm2835-dma.c if (vchan_issue_pending(&c->vc) && !c->desc) c 609 drivers/dma/bcm2835-dma.c bcm2835_dma_start_desc(c); c 611 drivers/dma/bcm2835-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 618 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 622 drivers/dma/bcm2835-dma.c size_t max_len = bcm2835_dma_max_frame_length(c); c 639 drivers/dma/bcm2835-dma.c return vchan_tx_prep(&c->vc, &d->vd, flags); c 648 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 661 drivers/dma/bcm2835-dma.c if (c->dreq != 0) c 662 drivers/dma/bcm2835-dma.c info |= BCM2835_DMA_PER_MAP(c->dreq); c 665 drivers/dma/bcm2835-dma.c if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) c 667 drivers/dma/bcm2835-dma.c src = c->cfg.src_addr; c 670 drivers/dma/bcm2835-dma.c if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) c 672 drivers/dma/bcm2835-dma.c dst = c->cfg.dst_addr; c 677 drivers/dma/bcm2835-dma.c frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len); c 691 drivers/dma/bcm2835-dma.c return vchan_tx_prep(&c->vc, &d->vd, flags); c 700 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 705 drivers/dma/bcm2835-dma.c size_t max_len = bcm2835_dma_max_frame_length(c); c 735 drivers/dma/bcm2835-dma.c if (c->dreq != 0) c 736 drivers/dma/bcm2835-dma.c info |= BCM2835_DMA_PER_MAP(c->dreq); c 739 drivers/dma/bcm2835-dma.c if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) c 741 drivers/dma/bcm2835-dma.c src = c->cfg.src_addr; c 745 drivers/dma/bcm2835-dma.c if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) c 747 drivers/dma/bcm2835-dma.c dst = c->cfg.dst_addr; c 752 drivers/dma/bcm2835-dma.c if (buf_addr == od->zero_page && !c->is_lite_channel) c 777 drivers/dma/bcm2835-dma.c return vchan_tx_prep(&c->vc, &d->vd, flags); c 783 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 785 drivers/dma/bcm2835-dma.c c->cfg = *cfg; c 792 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 796 drivers/dma/bcm2835-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 799 drivers/dma/bcm2835-dma.c if (c->desc) { c 800 drivers/dma/bcm2835-dma.c if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT) c 801 drivers/dma/bcm2835-dma.c vchan_terminate_vdesc(&c->desc->vd); c 803 drivers/dma/bcm2835-dma.c vchan_vdesc_fini(&c->desc->vd); c 804 drivers/dma/bcm2835-dma.c c->desc = NULL; c 805 drivers/dma/bcm2835-dma.c bcm2835_dma_abort(c); c 808 drivers/dma/bcm2835-dma.c vchan_get_all_descriptors(&c->vc, &head); c 809 drivers/dma/bcm2835-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 810 drivers/dma/bcm2835-dma.c vchan_dma_desc_free_list(&c->vc, &head); c 817 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); c 819 drivers/dma/bcm2835-dma.c vchan_synchronize(&c->vc); c 825 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c; c 827 drivers/dma/bcm2835-dma.c c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); c 828 drivers/dma/bcm2835-dma.c if (!c) c 831 drivers/dma/bcm2835-dma.c c->vc.desc_free = bcm2835_dma_desc_free; c 832 drivers/dma/bcm2835-dma.c vchan_init(&c->vc, &d->ddev); c 834 drivers/dma/bcm2835-dma.c c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); c 835 drivers/dma/bcm2835-dma.c c->ch = chan_id; c 836 drivers/dma/bcm2835-dma.c c->irq_number = irq; c 837 drivers/dma/bcm2835-dma.c c->irq_flags = irq_flags; c 840 drivers/dma/bcm2835-dma.c if (readl(c->chan_base + BCM2835_DMA_DEBUG) & c 842 drivers/dma/bcm2835-dma.c c->is_lite_channel = true; c 849 drivers/dma/bcm2835-dma.c struct bcm2835_chan *c, *next; c 851 drivers/dma/bcm2835-dma.c list_for_each_entry_safe(c, next, &od->ddev.channels, c 853 drivers/dma/bcm2835-dma.c list_del(&c->vc.chan.device_node); c 854 drivers/dma/bcm2835-dma.c tasklet_kill(&c->vc.task); c 147 drivers/dma/dma-axi-dmac.c static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) c 149 drivers/dma/dma-axi-dmac.c return container_of(c, struct axi_dmac_chan, vchan.chan); c 426 drivers/dma/dma-axi-dmac.c static int axi_dmac_terminate_all(struct dma_chan *c) c 428 drivers/dma/dma-axi-dmac.c struct axi_dmac_chan *chan = to_axi_dmac_chan(c); c 445 drivers/dma/dma-axi-dmac.c static void axi_dmac_synchronize(struct dma_chan *c) c 447 drivers/dma/dma-axi-dmac.c struct axi_dmac_chan *chan = to_axi_dmac_chan(c); c 452 drivers/dma/dma-axi-dmac.c static void axi_dmac_issue_pending(struct dma_chan *c) c 454 drivers/dma/dma-axi-dmac.c struct axi_dmac_chan *chan = to_axi_dmac_chan(c); c 527 drivers/dma/dma-axi-dmac.c struct dma_chan *c, struct scatterlist *sgl, c 531 drivers/dma/dma-axi-dmac.c struct axi_dmac_chan *chan = to_axi_dmac_chan(c); c 568 drivers/dma/dma-axi-dmac.c struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, c 572 drivers/dma/dma-axi-dmac.c struct axi_dmac_chan *chan = to_axi_dmac_chan(c); c 602 drivers/dma/dma-axi-dmac.c struct dma_chan *c, struct dma_interleaved_template *xt, c 605 drivers/dma/dma-axi-dmac.c struct axi_dmac_chan *chan = to_axi_dmac_chan(c); c 672 drivers/dma/dma-axi-dmac.c static void axi_dmac_free_chan_resources(struct dma_chan *c) c 674 drivers/dma/dma-axi-dmac.c vchan_free_chan_resources(to_virt_chan(c)); c 880 drivers/dma/dw/core.c static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) c 885 drivers/dma/dw/core.c if (desc->txd.cookie == c) c 260 drivers/dma/fsldma.h #define CPU_TO_DMA(fsl_chan, c, width) \ c 262 drivers/dma/fsldma.h (__force v##width)cpu_to_be##width(c) : \ c 263 drivers/dma/fsldma.h (__force v##width)cpu_to_le##width(c)) c 136 drivers/dma/idma64.c static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, c 139 drivers/dma/idma64.c struct idma64_chan *idma64c = &idma64->chan[c]; c 145 drivers/dma/idma64.c if (status_err & (1 << c)) { c 148 drivers/dma/idma64.c } else if (status_xfer & (1 << c)) { c 169 drivers/dma/img-mdc-dma.c static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c) c 171 drivers/dma/img-mdc-dma.c return container_of(to_virt_chan(c), struct mdc_chan, vc); c 171 drivers/dma/ioat/dma.c void ioat_issue_pending(struct dma_chan *c) c 173 drivers/dma/ioat/dma.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 300 drivers/dma/ioat/dma.c struct dma_chan *c = tx->chan; c 301 drivers/dma/ioat/dma.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 360 drivers/dma/ioat/dma.c ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) c 362 drivers/dma/ioat/dma.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 398 drivers/dma/ioat/dma.c ring[i] = ioat_alloc_ring_ent(c, i, flags); c 403 drivers/dma/ioat/dma.c ioat_free_ring_ent(ring[i], c); c 963 drivers/dma/ioat/dma.c ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, c 966 drivers/dma/ioat/dma.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 969 drivers/dma/ioat/dma.c ret = dma_cookie_status(c, cookie, txstate); c 975 drivers/dma/ioat/dma.c return dma_cookie_status(c, cookie, txstate); c 202 drivers/dma/ioat/dma.h static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) c 204 drivers/dma/ioat/dma.h return container_of(c, struct ioatdma_chan, dma_chan); c 229 drivers/dma/ioat/dma.h #define dump_desc_dbg(c, d) \ c 230 drivers/dma/ioat/dma.h ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) c 358 drivers/dma/ioat/dma.h ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, c 361 drivers/dma/ioat/dma.h ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); c 389 drivers/dma/ioat/dma.h ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); c 394 drivers/dma/ioat/dma.h ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, c 268 drivers/dma/ioat/hw.h uint64_t c[8]; c 611 drivers/dma/ioat/init.c static void ioat_free_chan_resources(struct dma_chan *c) c 613 drivers/dma/ioat/init.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 640 drivers/dma/ioat/init.c ioat_free_ring_ent(desc, c); c 650 drivers/dma/ioat/init.c ioat_free_ring_ent(desc, c); c 678 drivers/dma/ioat/init.c static int ioat_alloc_chan_resources(struct dma_chan *c) c 680 drivers/dma/ioat/init.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 708 drivers/dma/ioat/init.c ring = ioat_alloc_ring(c, order, GFP_NOWAIT); c 760 drivers/dma/ioat/init.c ioat_free_chan_resources(c); c 770 drivers/dma/ioat/init.c struct dma_chan *c = &ioat_chan->dma_chan; c 771 drivers/dma/ioat/init.c unsigned long data = (unsigned long) c; c 1066 drivers/dma/ioat/init.c struct dma_chan *c; c 1077 drivers/dma/ioat/init.c list_for_each_entry(c, &dma->channels, device_node) { c 1078 drivers/dma/ioat/init.c ioat_chan = to_ioat_chan(c); c 1094 drivers/dma/ioat/init.c struct dma_chan *c; c 1180 drivers/dma/ioat/init.c list_for_each_entry(c, &dma->channels, device_node) { c 1181 drivers/dma/ioat/init.c ioat_chan = to_ioat_chan(c); c 101 drivers/dma/ioat/prep.c ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, c 104 drivers/dma/ioat/prep.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 152 drivers/dma/ioat/prep.c __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, c 156 drivers/dma/ioat/prep.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 339 drivers/dma/ioat/prep.c __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, c 344 drivers/dma/ioat/prep.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 463 drivers/dma/ioat/prep.c __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, c 468 drivers/dma/ioat/prep.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 705 drivers/dma/ioat/prep.c ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) c 707 drivers/dma/ioat/prep.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 17 drivers/dma/ioat/sysfs.c static ssize_t cap_show(struct dma_chan *c, char *page) c 19 drivers/dma/ioat/sysfs.c struct dma_device *dma = c->device; c 31 drivers/dma/ioat/sysfs.c static ssize_t version_show(struct dma_chan *c, char *page) c 33 drivers/dma/ioat/sysfs.c struct dma_device *dma = c->device; c 78 drivers/dma/ioat/sysfs.c struct dma_chan *c; c 80 drivers/dma/ioat/sysfs.c list_for_each_entry(c, &dma->channels, device_node) { c 81 drivers/dma/ioat/sysfs.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 82 drivers/dma/ioat/sysfs.c struct kobject *parent = &c->dev->device.kobj; c 99 drivers/dma/ioat/sysfs.c struct dma_chan *c; c 101 drivers/dma/ioat/sysfs.c list_for_each_entry(c, &dma->channels, device_node) { c 102 drivers/dma/ioat/sysfs.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 111 drivers/dma/ioat/sysfs.c static ssize_t ring_size_show(struct dma_chan *c, char *page) c 113 drivers/dma/ioat/sysfs.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 119 drivers/dma/ioat/sysfs.c static ssize_t ring_active_show(struct dma_chan *c, char *page) c 121 drivers/dma/ioat/sysfs.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 128 drivers/dma/ioat/sysfs.c static ssize_t intr_coalesce_show(struct dma_chan *c, char *page) c 130 drivers/dma/ioat/sysfs.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 135 drivers/dma/ioat/sysfs.c static ssize_t intr_coalesce_store(struct dma_chan *c, const char *page, c 139 drivers/dma/ioat/sysfs.c struct ioatdma_chan *ioat_chan = to_ioat_chan(c); c 214 drivers/dma/k3dma.c struct k3_dma_chan *c; c 229 drivers/dma/k3dma.c c = p->vchan; c 230 drivers/dma/k3dma.c if (c && (tc1 & BIT(i))) { c 231 drivers/dma/k3dma.c spin_lock_irqsave(&c->vc.lock, flags); c 237 drivers/dma/k3dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 239 drivers/dma/k3dma.c if (c && (tc2 & BIT(i))) { c 240 drivers/dma/k3dma.c spin_lock_irqsave(&c->vc.lock, flags); c 243 drivers/dma/k3dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 265 drivers/dma/k3dma.c static int k3_dma_start_txd(struct k3_dma_chan *c) c 267 drivers/dma/k3dma.c struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device); c 268 drivers/dma/k3dma.c struct virt_dma_desc *vd = vchan_next_desc(&c->vc); c 270 drivers/dma/k3dma.c if (!c->phy) c 273 drivers/dma/k3dma.c if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) c 277 drivers/dma/k3dma.c if (c->phy->ds_run) c 289 drivers/dma/k3dma.c c->phy->ds_run = ds; c 290 drivers/dma/k3dma.c c->phy->ds_done = NULL; c 292 drivers/dma/k3dma.c k3_dma_set_desc(c->phy, &ds->desc_hw[0]); c 295 drivers/dma/k3dma.c c->phy->ds_run = NULL; c 296 drivers/dma/k3dma.c c->phy->ds_done = NULL; c 304 drivers/dma/k3dma.c struct k3_dma_chan *c, *cn; c 308 drivers/dma/k3dma.c list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { c 309 drivers/dma/k3dma.c spin_lock_irq(&c->vc.lock); c 310 drivers/dma/k3dma.c p = c->phy; c 312 drivers/dma/k3dma.c if (k3_dma_start_txd(c)) { c 316 drivers/dma/k3dma.c c->phy = NULL; c 320 drivers/dma/k3dma.c spin_unlock_irq(&c->vc.lock); c 332 drivers/dma/k3dma.c c = list_first_entry(&d->chan_pending, c 335 drivers/dma/k3dma.c list_del_init(&c->node); c 338 drivers/dma/k3dma.c p->vchan = c; c 339 drivers/dma/k3dma.c c->phy = p; c 340 drivers/dma/k3dma.c dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); c 351 drivers/dma/k3dma.c c = p->vchan; c 352 drivers/dma/k3dma.c if (c) { c 353 drivers/dma/k3dma.c spin_lock_irq(&c->vc.lock); c 354 drivers/dma/k3dma.c k3_dma_start_txd(c); c 355 drivers/dma/k3dma.c spin_unlock_irq(&c->vc.lock); c 363 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 368 drivers/dma/k3dma.c list_del_init(&c->node); c 371 drivers/dma/k3dma.c vchan_free_chan_resources(&c->vc); c 372 drivers/dma/k3dma.c c->ccfg = 0; c 378 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 386 drivers/dma/k3dma.c ret = dma_cookie_status(&c->vc.chan, cookie, state); c 390 drivers/dma/k3dma.c spin_lock_irqsave(&c->vc.lock, flags); c 391 drivers/dma/k3dma.c p = c->phy; c 392 drivers/dma/k3dma.c ret = c->status; c 398 drivers/dma/k3dma.c vd = vchan_find_desc(&c->vc, cookie); c 399 drivers/dma/k3dma.c if (vd && !c->cyclic) { c 418 drivers/dma/k3dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 425 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 429 drivers/dma/k3dma.c spin_lock_irqsave(&c->vc.lock, flags); c 431 drivers/dma/k3dma.c if (vchan_issue_pending(&c->vc)) { c 433 drivers/dma/k3dma.c if (!c->phy) { c 434 drivers/dma/k3dma.c if (list_empty(&c->node)) { c 436 drivers/dma/k3dma.c list_add_tail(&c->node, &d->chan_pending); c 439 drivers/dma/k3dma.c dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); c 444 drivers/dma/k3dma.c dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); c 445 drivers/dma/k3dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 465 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 472 drivers/dma/k3dma.c &c->vc, num, lli_limit); c 482 drivers/dma/k3dma.c dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); c 494 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 508 drivers/dma/k3dma.c c->cyclic = 0; c 512 drivers/dma/k3dma.c if (!c->ccfg) { c 514 drivers/dma/k3dma.c c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; c 515 drivers/dma/k3dma.c c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ c 516 drivers/dma/k3dma.c c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ c 521 drivers/dma/k3dma.c k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); c 529 drivers/dma/k3dma.c return vchan_tx_prep(&c->vc, &ds->vd, flags); c 536 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 546 drivers/dma/k3dma.c c->cyclic = 0; c 558 drivers/dma/k3dma.c k3_dma_config_write(chan, dir, &c->slave_config); c 570 drivers/dma/k3dma.c dst = c->dev_addr; c 572 drivers/dma/k3dma.c src = c->dev_addr; c 576 drivers/dma/k3dma.c k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); c 585 drivers/dma/k3dma.c return vchan_tx_prep(&c->vc, &ds->vd, flags); c 594 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 614 drivers/dma/k3dma.c c->cyclic = 1; c 619 drivers/dma/k3dma.c k3_dma_config_write(chan, dir, &c->slave_config); c 629 drivers/dma/k3dma.c dst = c->dev_addr; c 631 drivers/dma/k3dma.c src = c->dev_addr; c 642 drivers/dma/k3dma.c k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2); c 653 drivers/dma/k3dma.c return vchan_tx_prep(&c->vc, &ds->vd, flags); c 659 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 661 drivers/dma/k3dma.c memcpy(&c->slave_config, cfg, sizeof(*cfg)); c 670 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 675 drivers/dma/k3dma.c c->ccfg = CX_CFG_DSTINCR; c 676 drivers/dma/k3dma.c c->dev_addr = cfg->src_addr; c 680 drivers/dma/k3dma.c c->ccfg = CX_CFG_SRCINCR; c 681 drivers/dma/k3dma.c c->dev_addr = cfg->dst_addr; c 696 drivers/dma/k3dma.c c->ccfg |= (val << 12) | (val << 16); c 702 drivers/dma/k3dma.c c->ccfg |= (val << 20) | (val << 24); c 703 drivers/dma/k3dma.c c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; c 706 drivers/dma/k3dma.c c->ccfg |= c->vc.chan.chan_id << 4; c 723 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 725 drivers/dma/k3dma.c struct k3_dma_phy *p = c->phy; c 729 drivers/dma/k3dma.c dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); c 733 drivers/dma/k3dma.c list_del_init(&c->node); c 737 drivers/dma/k3dma.c spin_lock_irqsave(&c->vc.lock, flags); c 738 drivers/dma/k3dma.c vchan_get_all_descriptors(&c->vc, &head); c 742 drivers/dma/k3dma.c c->phy = NULL; c 750 drivers/dma/k3dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 751 drivers/dma/k3dma.c vchan_dma_desc_free_list(&c->vc, &head); c 758 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 760 drivers/dma/k3dma.c vchan_synchronize(&c->vc); c 765 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 767 drivers/dma/k3dma.c struct k3_dma_phy *p = c->phy; c 769 drivers/dma/k3dma.c dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); c 770 drivers/dma/k3dma.c if (c->status == DMA_IN_PROGRESS) { c 771 drivers/dma/k3dma.c c->status = DMA_PAUSED; c 776 drivers/dma/k3dma.c list_del_init(&c->node); c 786 drivers/dma/k3dma.c struct k3_dma_chan *c = to_k3_chan(chan); c 788 drivers/dma/k3dma.c struct k3_dma_phy *p = c->phy; c 791 drivers/dma/k3dma.c dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); c 792 drivers/dma/k3dma.c spin_lock_irqsave(&c->vc.lock, flags); c 793 drivers/dma/k3dma.c if (c->status == DMA_PAUSED) { c 794 drivers/dma/k3dma.c c->status = DMA_IN_PROGRESS; c 797 drivers/dma/k3dma.c } else if (!list_empty(&c->vc.desc_issued)) { c 799 drivers/dma/k3dma.c list_add_tail(&c->node, &d->chan_pending); c 803 drivers/dma/k3dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 942 drivers/dma/k3dma.c struct k3_dma_chan *c = &d->chans[i]; c 944 drivers/dma/k3dma.c c->status = DMA_IN_PROGRESS; c 945 drivers/dma/k3dma.c INIT_LIST_HEAD(&c->node); c 946 drivers/dma/k3dma.c c->vc.desc_free = k3_dma_free_desc; c 947 drivers/dma/k3dma.c vchan_init(&c->vc, &d->slave); c 985 drivers/dma/k3dma.c struct k3_dma_chan *c, *cn; c 993 drivers/dma/k3dma.c list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) { c 994 drivers/dma/k3dma.c list_del(&c->vc.chan.device_node); c 995 drivers/dma/k3dma.c tasklet_kill(&c->vc.task); c 419 drivers/dma/mediatek/mtk-cqdma.c static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, c 422 drivers/dma/mediatek/mtk-cqdma.c struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); c 441 drivers/dma/mediatek/mtk-cqdma.c static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, c 445 drivers/dma/mediatek/mtk-cqdma.c struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); c 452 drivers/dma/mediatek/mtk-cqdma.c ret = dma_cookie_status(c, cookie, txstate); c 457 drivers/dma/mediatek/mtk-cqdma.c vd = mtk_cqdma_find_active_desc(c, cookie); c 470 drivers/dma/mediatek/mtk-cqdma.c static void mtk_cqdma_issue_pending(struct dma_chan *c) c 472 drivers/dma/mediatek/mtk-cqdma.c struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); c 488 drivers/dma/mediatek/mtk-cqdma.c mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, c 520 drivers/dma/mediatek/mtk-cqdma.c cvd[i]->ch = c; c 529 drivers/dma/mediatek/mtk-cqdma.c tx = vchan_tx_prep(to_virt_chan(c), &cvd[i]->vd, flags); c 551 drivers/dma/mediatek/mtk-cqdma.c static void mtk_cqdma_free_inactive_desc(struct dma_chan *c) c 553 drivers/dma/mediatek/mtk-cqdma.c struct virt_dma_chan *vc = to_virt_chan(c); c 571 drivers/dma/mediatek/mtk-cqdma.c static void mtk_cqdma_free_active_desc(struct dma_chan *c) c 573 drivers/dma/mediatek/mtk-cqdma.c struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); c 602 drivers/dma/mediatek/mtk-cqdma.c static int mtk_cqdma_terminate_all(struct dma_chan *c) c 605 drivers/dma/mediatek/mtk-cqdma.c mtk_cqdma_free_inactive_desc(c); c 608 drivers/dma/mediatek/mtk-cqdma.c mtk_cqdma_free_active_desc(c); c 613 drivers/dma/mediatek/mtk-cqdma.c static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c) c 615 drivers/dma/mediatek/mtk-cqdma.c struct mtk_cqdma_device *cqdma = to_cqdma_dev(c); c 616 drivers/dma/mediatek/mtk-cqdma.c struct mtk_cqdma_vchan *vc = to_cqdma_vchan(c); c 658 drivers/dma/mediatek/mtk-cqdma.c static void mtk_cqdma_free_chan_resources(struct dma_chan *c) c 660 drivers/dma/mediatek/mtk-cqdma.c struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); c 664 drivers/dma/mediatek/mtk-cqdma.c mtk_cqdma_terminate_all(c); c 675 drivers/dma/mediatek/mtk-cqdma.c dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); c 660 drivers/dma/mediatek/mtk-hsdma.c static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c, c 663 drivers/dma/mediatek/mtk-hsdma.c struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); c 677 drivers/dma/mediatek/mtk-hsdma.c static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, c 681 drivers/dma/mediatek/mtk-hsdma.c struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); c 688 drivers/dma/mediatek/mtk-hsdma.c ret = dma_cookie_status(c, cookie, txstate); c 693 drivers/dma/mediatek/mtk-hsdma.c vd = mtk_hsdma_find_active_desc(c, cookie); c 706 drivers/dma/mediatek/mtk-hsdma.c static void mtk_hsdma_issue_pending(struct dma_chan *c) c 708 drivers/dma/mediatek/mtk-hsdma.c struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); c 709 drivers/dma/mediatek/mtk-hsdma.c struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); c 721 drivers/dma/mediatek/mtk-hsdma.c mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, c 735 drivers/dma/mediatek/mtk-hsdma.c return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags); c 738 drivers/dma/mediatek/mtk-hsdma.c static int mtk_hsdma_free_inactive_desc(struct dma_chan *c) c 740 drivers/dma/mediatek/mtk-hsdma.c struct virt_dma_chan *vc = to_virt_chan(c); c 756 drivers/dma/mediatek/mtk-hsdma.c static void mtk_hsdma_free_active_desc(struct dma_chan *c) c 758 drivers/dma/mediatek/mtk-hsdma.c struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); c 789 drivers/dma/mediatek/mtk-hsdma.c static int mtk_hsdma_terminate_all(struct dma_chan *c) c 795 drivers/dma/mediatek/mtk-hsdma.c mtk_hsdma_free_inactive_desc(c); c 803 drivers/dma/mediatek/mtk-hsdma.c mtk_hsdma_free_active_desc(c); c 808 drivers/dma/mediatek/mtk-hsdma.c static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c) c 810 drivers/dma/mediatek/mtk-hsdma.c struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); c 834 drivers/dma/mediatek/mtk-hsdma.c static void mtk_hsdma_free_chan_resources(struct dma_chan *c) c 836 drivers/dma/mediatek/mtk-hsdma.c struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); c 839 drivers/dma/mediatek/mtk-hsdma.c mtk_hsdma_terminate_all(c); c 110 drivers/dma/mediatek/mtk-uart-apdma.c static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c) c 112 drivers/dma/mediatek/mtk-uart-apdma.c return container_of(c, struct mtk_chan, vc.chan); c 121 drivers/dma/mediatek/mtk-uart-apdma.c static void mtk_uart_apdma_write(struct mtk_chan *c, c 124 drivers/dma/mediatek/mtk-uart-apdma.c writel(val, c->base + reg); c 127 drivers/dma/mediatek/mtk-uart-apdma.c static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg) c 129 drivers/dma/mediatek/mtk-uart-apdma.c return readl(c->base + reg); c 135 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 137 drivers/dma/mediatek/mtk-uart-apdma.c kfree(c->desc); c 140 drivers/dma/mediatek/mtk-uart-apdma.c static void mtk_uart_apdma_start_tx(struct mtk_chan *c) c 143 drivers/dma/mediatek/mtk-uart-apdma.c to_mtk_uart_apdma_dev(c->vc.chan.device); c 144 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_uart_apdma_desc *d = c->desc; c 147 drivers/dma/mediatek/mtk-uart-apdma.c vff_sz = c->cfg.dst_port_window_size; c 148 drivers/dma/mediatek/mtk-uart-apdma.c if (!mtk_uart_apdma_read(c, VFF_LEN)) { c 149 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_ADDR, d->addr); c 150 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_LEN, vff_sz); c 151 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz)); c 152 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_WPT, 0); c 153 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); c 156 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); c 159 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); c 160 drivers/dma/mediatek/mtk-uart-apdma.c if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) c 161 drivers/dma/mediatek/mtk-uart-apdma.c dev_err(c->vc.chan.device->dev, "Enable TX fail\n"); c 163 drivers/dma/mediatek/mtk-uart-apdma.c if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) { c 164 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); c 168 drivers/dma/mediatek/mtk-uart-apdma.c wpt = mtk_uart_apdma_read(c, VFF_WPT); c 170 drivers/dma/mediatek/mtk-uart-apdma.c wpt += c->desc->avail_len; c 175 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_WPT, wpt); c 178 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); c 179 drivers/dma/mediatek/mtk-uart-apdma.c if (!mtk_uart_apdma_read(c, VFF_FLUSH)) c 180 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); c 183 drivers/dma/mediatek/mtk-uart-apdma.c static void mtk_uart_apdma_start_rx(struct mtk_chan *c) c 186 drivers/dma/mediatek/mtk-uart-apdma.c to_mtk_uart_apdma_dev(c->vc.chan.device); c 187 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_uart_apdma_desc *d = c->desc; c 190 drivers/dma/mediatek/mtk-uart-apdma.c vff_sz = c->cfg.src_port_window_size; c 191 drivers/dma/mediatek/mtk-uart-apdma.c if (!mtk_uart_apdma_read(c, VFF_LEN)) { c 192 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_ADDR, d->addr); c 193 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_LEN, vff_sz); c 194 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz)); c 195 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_RPT, 0); c 196 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); c 199 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); c 202 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B); c 203 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); c 204 drivers/dma/mediatek/mtk-uart-apdma.c if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) c 205 drivers/dma/mediatek/mtk-uart-apdma.c dev_err(c->vc.chan.device->dev, "Enable RX fail\n"); c 208 drivers/dma/mediatek/mtk-uart-apdma.c static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) c 210 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_uart_apdma_desc *d = c->desc; c 212 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); c 213 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); c 214 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); c 220 drivers/dma/mediatek/mtk-uart-apdma.c static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) c 222 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_uart_apdma_desc *d = c->desc; c 226 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); c 228 drivers/dma/mediatek/mtk-uart-apdma.c if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE)) c 231 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); c 232 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); c 234 drivers/dma/mediatek/mtk-uart-apdma.c len = c->cfg.src_port_window_size; c 235 drivers/dma/mediatek/mtk-uart-apdma.c rg = mtk_uart_apdma_read(c, VFF_RPT); c 236 drivers/dma/mediatek/mtk-uart-apdma.c wg = mtk_uart_apdma_read(c, VFF_WPT); c 246 drivers/dma/mediatek/mtk-uart-apdma.c c->rx_status = d->avail_len - cnt; c 247 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_RPT, wg); c 256 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 259 drivers/dma/mediatek/mtk-uart-apdma.c spin_lock_irqsave(&c->vc.lock, flags); c 260 drivers/dma/mediatek/mtk-uart-apdma.c if (c->dir == DMA_DEV_TO_MEM) c 261 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_rx_handler(c); c 262 drivers/dma/mediatek/mtk-uart-apdma.c else if (c->dir == DMA_MEM_TO_DEV) c 263 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_tx_handler(c); c 264 drivers/dma/mediatek/mtk-uart-apdma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 272 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 282 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_ADDR, 0); c 283 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_THRE, 0); c 284 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_LEN, 0); c 285 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B); c 287 drivers/dma/mediatek/mtk-uart-apdma.c ret = readx_poll_timeout(readl, c->base + VFF_EN, c 292 drivers/dma/mediatek/mtk-uart-apdma.c ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, c 300 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); c 308 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 310 drivers/dma/mediatek/mtk-uart-apdma.c free_irq(c->irq, chan); c 312 drivers/dma/mediatek/mtk-uart-apdma.c tasklet_kill(&c->vc.task); c 314 drivers/dma/mediatek/mtk-uart-apdma.c vchan_free_chan_resources(&c->vc); c 323 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 330 drivers/dma/mediatek/mtk-uart-apdma.c dma_set_residue(txstate, c->rx_status); c 344 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 357 drivers/dma/mediatek/mtk-uart-apdma.c c->dir = dir; c 359 drivers/dma/mediatek/mtk-uart-apdma.c return vchan_tx_prep(&c->vc, &d->vd, tx_flags); c 364 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 368 drivers/dma/mediatek/mtk-uart-apdma.c spin_lock_irqsave(&c->vc.lock, flags); c 369 drivers/dma/mediatek/mtk-uart-apdma.c if (vchan_issue_pending(&c->vc)) { c 370 drivers/dma/mediatek/mtk-uart-apdma.c vd = vchan_next_desc(&c->vc); c 371 drivers/dma/mediatek/mtk-uart-apdma.c c->desc = to_mtk_uart_apdma_desc(&vd->tx); c 373 drivers/dma/mediatek/mtk-uart-apdma.c if (c->dir == DMA_DEV_TO_MEM) c 374 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_start_rx(c); c 375 drivers/dma/mediatek/mtk-uart-apdma.c else if (c->dir == DMA_MEM_TO_DEV) c 376 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_start_tx(c); c 379 drivers/dma/mediatek/mtk-uart-apdma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 385 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 387 drivers/dma/mediatek/mtk-uart-apdma.c memcpy(&c->cfg, config, sizeof(*config)); c 394 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 400 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); c 402 drivers/dma/mediatek/mtk-uart-apdma.c ret = readx_poll_timeout(readl, c->base + VFF_FLUSH, c 405 drivers/dma/mediatek/mtk-uart-apdma.c dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n", c 406 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); c 414 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B); c 415 drivers/dma/mediatek/mtk-uart-apdma.c ret = readx_poll_timeout(readl, c->base + VFF_EN, c 418 drivers/dma/mediatek/mtk-uart-apdma.c dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n", c 419 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); c 421 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B); c 422 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); c 424 drivers/dma/mediatek/mtk-uart-apdma.c if (c->dir == DMA_DEV_TO_MEM) c 425 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); c 426 drivers/dma/mediatek/mtk-uart-apdma.c else if (c->dir == DMA_MEM_TO_DEV) c 427 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); c 429 drivers/dma/mediatek/mtk-uart-apdma.c synchronize_irq(c->irq); c 431 drivers/dma/mediatek/mtk-uart-apdma.c spin_lock_irqsave(&c->vc.lock, flags); c 432 drivers/dma/mediatek/mtk-uart-apdma.c vchan_get_all_descriptors(&c->vc, &head); c 433 drivers/dma/mediatek/mtk-uart-apdma.c vchan_dma_desc_free_list(&c->vc, &head); c 434 drivers/dma/mediatek/mtk-uart-apdma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 441 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); c 444 drivers/dma/mediatek/mtk-uart-apdma.c spin_lock_irqsave(&c->vc.lock, flags); c 446 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); c 447 drivers/dma/mediatek/mtk-uart-apdma.c mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); c 449 drivers/dma/mediatek/mtk-uart-apdma.c synchronize_irq(c->irq); c 451 drivers/dma/mediatek/mtk-uart-apdma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 459 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels, c 462 drivers/dma/mediatek/mtk-uart-apdma.c list_del(&c->vc.chan.device_node); c 463 drivers/dma/mediatek/mtk-uart-apdma.c tasklet_kill(&c->vc.task); c 479 drivers/dma/mediatek/mtk-uart-apdma.c struct mtk_chan *c; c 529 drivers/dma/mediatek/mtk-uart-apdma.c c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL); c 530 drivers/dma/mediatek/mtk-uart-apdma.c if (!c) { c 541 drivers/dma/mediatek/mtk-uart-apdma.c c->base = devm_ioremap_resource(&pdev->dev, res); c 542 drivers/dma/mediatek/mtk-uart-apdma.c if (IS_ERR(c->base)) { c 543 drivers/dma/mediatek/mtk-uart-apdma.c rc = PTR_ERR(c->base); c 546 drivers/dma/mediatek/mtk-uart-apdma.c c->vc.desc_free = mtk_uart_apdma_desc_free; c 547 drivers/dma/mediatek/mtk-uart-apdma.c vchan_init(&c->vc, &mtkd->ddev); c 552 drivers/dma/mediatek/mtk-uart-apdma.c c->irq = rc; c 1151 drivers/dma/mmp_pdma.c struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); c 1156 drivers/dma/mmp_pdma.c c->drcmr = *(unsigned int *)param; c 170 drivers/dma/moxart-dma.c static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c) c 172 drivers/dma/moxart-dma.c return container_of(c, struct moxart_chan, vc.chan); c 233 drivers/dma/mpc512x_dma.c static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) c 235 drivers/dma/mpc512x_dma.c return container_of(c, struct mpc_dma_chan, chan); c 239 drivers/dma/mpc512x_dma.c static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) c 241 drivers/dma/mpc512x_dma.c struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); c 243 drivers/dma/mpc512x_dma.c return container_of(mchan, struct mpc_dma, channels[c->chan_id]); c 240 drivers/dma/pl330.c #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) c 1363 drivers/dma/pl330.c unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); c 1369 drivers/dma/pl330.c c = bursts; c 1370 drivers/dma/pl330.c off += _loop(pl330, dry_run, &buf[off], &c, pxs); c 1371 drivers/dma/pl330.c bursts -= c; c 1224 drivers/dma/pxa_dma.c struct pxad_chan *c, *cn; c 1226 drivers/dma/pxa_dma.c list_for_each_entry_safe(c, cn, &dmadev->channels, c 1228 drivers/dma/pxa_dma.c list_del(&c->vc.chan.device_node); c 1229 drivers/dma/pxa_dma.c tasklet_kill(&c->vc.task); c 1315 drivers/dma/pxa_dma.c struct pxad_chan *c; c 1338 drivers/dma/pxa_dma.c c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL); c 1339 drivers/dma/pxa_dma.c if (!c) c 1342 drivers/dma/pxa_dma.c c->drcmr = U32_MAX; c 1343 drivers/dma/pxa_dma.c c->prio = PXAD_PRIO_LOWEST; c 1344 drivers/dma/pxa_dma.c c->vc.desc_free = pxad_free_desc; c 1345 drivers/dma/pxa_dma.c vchan_init(&c->vc, &pdev->slave); c 1346 drivers/dma/pxa_dma.c init_waitqueue_head(&c->wq_state); c 1456 drivers/dma/pxa_dma.c struct pxad_chan *c = to_pxad_chan(chan); c 1462 drivers/dma/pxa_dma.c c->drcmr = p->drcmr; c 1463 drivers/dma/pxa_dma.c c->prio = p->prio; c 136 drivers/dma/sa11x0-dma.c static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) c 138 drivers/dma/sa11x0-dma.c struct virt_dma_desc *vd = vchan_next_desc(&c->vc); c 159 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c) c 178 drivers/dma/sa11x0-dma.c struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); c 223 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c) c 245 drivers/dma/sa11x0-dma.c sa11x0_dma_start_sg(p, c); c 252 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c; c 275 drivers/dma/sa11x0-dma.c c = p->vchan; c 276 drivers/dma/sa11x0-dma.c if (c) { c 279 drivers/dma/sa11x0-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 287 drivers/dma/sa11x0-dma.c if (c->phy == p) { c 289 drivers/dma/sa11x0-dma.c sa11x0_dma_complete(p, c); c 291 drivers/dma/sa11x0-dma.c sa11x0_dma_complete(p, c); c 293 drivers/dma/sa11x0-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 299 drivers/dma/sa11x0-dma.c static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) c 301 drivers/dma/sa11x0-dma.c struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); c 305 drivers/dma/sa11x0-dma.c struct sa11x0_dma_phy *p = c->phy; c 321 drivers/dma/sa11x0-dma.c sa11x0_dma_start_sg(p, c); c 322 drivers/dma/sa11x0-dma.c sa11x0_dma_start_sg(p, c); c 330 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c; c 335 drivers/dma/sa11x0-dma.c list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { c 336 drivers/dma/sa11x0-dma.c spin_lock_irq(&c->vc.lock); c 337 drivers/dma/sa11x0-dma.c p = c->phy; c 339 drivers/dma/sa11x0-dma.c sa11x0_dma_start_txd(c); c 345 drivers/dma/sa11x0-dma.c c->phy = NULL; c 349 drivers/dma/sa11x0-dma.c spin_unlock_irq(&c->vc.lock); c 357 drivers/dma/sa11x0-dma.c c = list_first_entry(&d->chan_pending, c 359 drivers/dma/sa11x0-dma.c list_del_init(&c->node); c 364 drivers/dma/sa11x0-dma.c p->vchan = c; c 366 drivers/dma/sa11x0-dma.c dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); c 374 drivers/dma/sa11x0-dma.c c = p->vchan; c 376 drivers/dma/sa11x0-dma.c spin_lock_irq(&c->vc.lock); c 377 drivers/dma/sa11x0-dma.c c->phy = p; c 379 drivers/dma/sa11x0-dma.c sa11x0_dma_start_txd(c); c 380 drivers/dma/sa11x0-dma.c spin_unlock_irq(&c->vc.lock); c 390 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 395 drivers/dma/sa11x0-dma.c list_del_init(&c->node); c 398 drivers/dma/sa11x0-dma.c vchan_free_chan_resources(&c->vc); c 420 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 427 drivers/dma/sa11x0-dma.c ret = dma_cookie_status(&c->vc.chan, cookie, state); c 432 drivers/dma/sa11x0-dma.c return c->status; c 434 drivers/dma/sa11x0-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 435 drivers/dma/sa11x0-dma.c p = c->phy; c 441 drivers/dma/sa11x0-dma.c vd = vchan_find_desc(&c->vc, cookie); c 457 drivers/dma/sa11x0-dma.c ret = c->status; c 488 drivers/dma/sa11x0-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 502 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 506 drivers/dma/sa11x0-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 507 drivers/dma/sa11x0-dma.c if (vchan_issue_pending(&c->vc)) { c 508 drivers/dma/sa11x0-dma.c if (!c->phy) { c 510 drivers/dma/sa11x0-dma.c if (list_empty(&c->node)) { c 511 drivers/dma/sa11x0-dma.c list_add_tail(&c->node, &d->chan_pending); c 513 drivers/dma/sa11x0-dma.c dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); c 518 drivers/dma/sa11x0-dma.c dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); c 519 drivers/dma/sa11x0-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 526 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 533 drivers/dma/sa11x0-dma.c if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { c 535 drivers/dma/sa11x0-dma.c &c->vc, c->ddar, dir); c 551 drivers/dma/sa11x0-dma.c &c->vc, &addr); c 558 drivers/dma/sa11x0-dma.c dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); c 594 drivers/dma/sa11x0-dma.c txd->ddar = c->ddar; c 599 drivers/dma/sa11x0-dma.c &c->vc, &txd->vd, txd->size, txd->sglen); c 601 drivers/dma/sa11x0-dma.c return vchan_tx_prep(&c->vc, &txd->vd, flags); c 608 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 613 drivers/dma/sa11x0-dma.c if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { c 615 drivers/dma/sa11x0-dma.c &c->vc, c->ddar, dir); c 628 drivers/dma/sa11x0-dma.c dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); c 654 drivers/dma/sa11x0-dma.c txd->ddar = c->ddar; c 660 drivers/dma/sa11x0-dma.c return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); c 666 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 667 drivers/dma/sa11x0-dma.c u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); c 692 drivers/dma/sa11x0-dma.c dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n", c 693 drivers/dma/sa11x0-dma.c &c->vc, &addr, width, maxburst); c 695 drivers/dma/sa11x0-dma.c c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; c 702 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 707 drivers/dma/sa11x0-dma.c dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); c 708 drivers/dma/sa11x0-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 709 drivers/dma/sa11x0-dma.c if (c->status == DMA_IN_PROGRESS) { c 710 drivers/dma/sa11x0-dma.c c->status = DMA_PAUSED; c 712 drivers/dma/sa11x0-dma.c p = c->phy; c 717 drivers/dma/sa11x0-dma.c list_del_init(&c->node); c 721 drivers/dma/sa11x0-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 728 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 733 drivers/dma/sa11x0-dma.c dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); c 734 drivers/dma/sa11x0-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 735 drivers/dma/sa11x0-dma.c if (c->status == DMA_PAUSED) { c 736 drivers/dma/sa11x0-dma.c c->status = DMA_IN_PROGRESS; c 738 drivers/dma/sa11x0-dma.c p = c->phy; c 741 drivers/dma/sa11x0-dma.c } else if (!list_empty(&c->vc.desc_issued)) { c 743 drivers/dma/sa11x0-dma.c list_add_tail(&c->node, &d->chan_pending); c 747 drivers/dma/sa11x0-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 754 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 760 drivers/dma/sa11x0-dma.c dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); c 762 drivers/dma/sa11x0-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 763 drivers/dma/sa11x0-dma.c vchan_get_all_descriptors(&c->vc, &head); c 765 drivers/dma/sa11x0-dma.c p = c->phy; c 783 drivers/dma/sa11x0-dma.c c->phy = NULL; c 789 drivers/dma/sa11x0-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 790 drivers/dma/sa11x0-dma.c vchan_dma_desc_free_list(&c->vc, &head); c 829 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); c 832 drivers/dma/sa11x0-dma.c return !strcmp(c->name, p); c 851 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c; c 853 drivers/dma/sa11x0-dma.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 854 drivers/dma/sa11x0-dma.c if (!c) { c 859 drivers/dma/sa11x0-dma.c c->status = DMA_IN_PROGRESS; c 860 drivers/dma/sa11x0-dma.c c->ddar = chan_desc[i].ddar; c 861 drivers/dma/sa11x0-dma.c c->name = chan_desc[i].name; c 862 drivers/dma/sa11x0-dma.c INIT_LIST_HEAD(&c->node); c 864 drivers/dma/sa11x0-dma.c c->vc.desc_free = sa11x0_dma_free_desc; c 865 drivers/dma/sa11x0-dma.c vchan_init(&c->vc, dmadev); c 892 drivers/dma/sa11x0-dma.c struct sa11x0_dma_chan *c, *cn; c 894 drivers/dma/sa11x0-dma.c list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { c 895 drivers/dma/sa11x0-dma.c list_del(&c->vc.chan.device_node); c 896 drivers/dma/sa11x0-dma.c tasklet_kill(&c->vc.task); c 897 drivers/dma/sa11x0-dma.c kfree(c); c 186 drivers/dma/sh/rcar-dmac.c #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) c 36 drivers/dma/sh/shdma-base.c #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) c 72 drivers/dma/sh/shdma-base.c struct shdma_desc *chunk, *c, *desc = c 86 drivers/dma/sh/shdma-base.c list_for_each_entry_safe(chunk, c, desc->node.prev, node) { c 17 drivers/dma/sh/shdma-of.c #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) c 87 drivers/dma/sh/usb-dmac.c #define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan) c 150 drivers/dma/sirf-dma.c struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) c 152 drivers/dma/sirf-dma.c return container_of(c, struct sirfsoc_dma_chan, chan); c 156 drivers/dma/sirf-dma.c static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) c 158 drivers/dma/sirf-dma.c struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); c 159 drivers/dma/sirf-dma.c return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); c 221 drivers/dma/sprd-dma.c static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c) c 223 drivers/dma/sprd-dma.c return container_of(c, struct sprd_dma_chn, vc.chan); c 226 drivers/dma/sprd-dma.c static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c) c 228 drivers/dma/sprd-dma.c struct sprd_dma_chn *schan = to_sprd_dma_chan(c); c 230 drivers/dma/sprd-dma.c return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]); c 1203 drivers/dma/sprd-dma.c struct sprd_dma_chn *c, *cn; c 1214 drivers/dma/sprd-dma.c list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels, c 1216 drivers/dma/sprd-dma.c list_del(&c->vc.chan.device_node); c 1217 drivers/dma/sprd-dma.c tasklet_kill(&c->vc.task); c 21 drivers/dma/st_fdma.c static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c) c 23 drivers/dma/st_fdma.c return container_of(c, struct st_fdma_chan, vchan.chan); c 221 drivers/dma/stm32-dma.c static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) c 223 drivers/dma/stm32-dma.c return container_of(c, struct stm32_dma_chan, vchan.chan); c 366 drivers/dma/stm32-dma.c static int stm32_dma_slave_config(struct dma_chan *c, c 369 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 483 drivers/dma/stm32-dma.c static int stm32_dma_terminate_all(struct dma_chan *c) c 485 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 503 drivers/dma/stm32-dma.c static void stm32_dma_synchronize(struct dma_chan *c) c 505 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 678 drivers/dma/stm32-dma.c static void stm32_dma_issue_pending(struct dma_chan *c) c 680 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 829 drivers/dma/stm32-dma.c struct dma_chan *c, struct scatterlist *sgl, c 833 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 894 drivers/dma/stm32-dma.c struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, c 898 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 975 drivers/dma/stm32-dma.c struct dma_chan *c, dma_addr_t dest, c 978 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 1140 drivers/dma/stm32-dma.c static enum dma_status stm32_dma_tx_status(struct dma_chan *c, c 1144 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 1150 drivers/dma/stm32-dma.c status = dma_cookie_status(c, cookie, state); c 1169 drivers/dma/stm32-dma.c static int stm32_dma_alloc_chan_resources(struct dma_chan *c) c 1171 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 1188 drivers/dma/stm32-dma.c static void stm32_dma_free_chan_resources(struct dma_chan *c) c 1190 drivers/dma/stm32-dma.c struct stm32_dma_chan *chan = to_stm32_dma_chan(c); c 1205 drivers/dma/stm32-dma.c vchan_free_chan_resources(to_virt_chan(c)); c 1234 drivers/dma/stm32-dma.c struct dma_chan *c; c 1254 drivers/dma/stm32-dma.c c = dma_get_slave_channel(&chan->vchan.chan); c 1255 drivers/dma/stm32-dma.c if (!c) { c 1262 drivers/dma/stm32-dma.c return c; c 291 drivers/dma/stm32-mdma.c static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c) c 293 drivers/dma/stm32-mdma.c return container_of(c, struct stm32_mdma_chan, vchan.chan); c 781 drivers/dma/stm32-mdma.c stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl, c 785 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 821 drivers/dma/stm32-mdma.c stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr, c 826 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 912 drivers/dma/stm32-mdma.c stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, c 915 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1166 drivers/dma/stm32-mdma.c static void stm32_mdma_issue_pending(struct dma_chan *c) c 1168 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1185 drivers/dma/stm32-mdma.c static int stm32_mdma_pause(struct dma_chan *c) c 1187 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1201 drivers/dma/stm32-mdma.c static int stm32_mdma_resume(struct dma_chan *c) c 1203 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1238 drivers/dma/stm32-mdma.c static int stm32_mdma_terminate_all(struct dma_chan *c) c 1240 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1257 drivers/dma/stm32-mdma.c static void stm32_mdma_synchronize(struct dma_chan *c) c 1259 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1264 drivers/dma/stm32-mdma.c static int stm32_mdma_slave_config(struct dma_chan *c, c 1267 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1302 drivers/dma/stm32-mdma.c static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, c 1306 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1312 drivers/dma/stm32-mdma.c status = dma_cookie_status(c, cookie, state); c 1433 drivers/dma/stm32-mdma.c static int stm32_mdma_alloc_chan_resources(struct dma_chan *c) c 1435 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1439 drivers/dma/stm32-mdma.c chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device), c 1440 drivers/dma/stm32-mdma.c c->device->dev, c 1460 drivers/dma/stm32-mdma.c static void stm32_mdma_free_chan_resources(struct dma_chan *c) c 1462 drivers/dma/stm32-mdma.c struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); c 1476 drivers/dma/stm32-mdma.c vchan_free_chan_resources(to_virt_chan(c)); c 1486 drivers/dma/stm32-mdma.c struct dma_chan *c; c 1510 drivers/dma/stm32-mdma.c c = dma_get_any_slave_channel(&dmadev->ddev); c 1511 drivers/dma/stm32-mdma.c if (!c) { c 1516 drivers/dma/stm32-mdma.c chan = to_stm32_mdma_chan(c); c 1519 drivers/dma/stm32-mdma.c return c; c 249 drivers/dma/ti/cppi41.c static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c) c 251 drivers/dma/ti/cppi41.c return container_of(c, struct cppi41_channel, chan); c 256 drivers/dma/ti/cppi41.c struct cppi41_channel *c; c 269 drivers/dma/ti/cppi41.c c = cdd->chan_busy[desc_num]; c 275 drivers/dma/ti/cppi41.c return c; c 307 drivers/dma/ti/cppi41.c struct cppi41_channel *c; c 342 drivers/dma/ti/cppi41.c c = desc_to_chan(cdd, desc); c 343 drivers/dma/ti/cppi41.c if (WARN_ON(!c)) { c 349 drivers/dma/ti/cppi41.c if (c->desc->pd2 & PD2_ZERO_LENGTH) c 352 drivers/dma/ti/cppi41.c len = pd_trans_len(c->desc->pd0); c 354 drivers/dma/ti/cppi41.c c->residue = pd_trans_len(c->desc->pd6) - len; c 355 drivers/dma/ti/cppi41.c dma_cookie_complete(&c->txd); c 356 drivers/dma/ti/cppi41.c dmaengine_desc_get_callback_invoke(&c->txd, NULL); c 373 drivers/dma/ti/cppi41.c struct cppi41_channel *c = to_cpp41_chan(chan); c 374 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; c 387 drivers/dma/ti/cppi41.c dma_async_tx_descriptor_init(&c->txd, chan); c 388 drivers/dma/ti/cppi41.c c->txd.tx_submit = cppi41_tx_submit; c 390 drivers/dma/ti/cppi41.c if (!c->is_tx) c 391 drivers/dma/ti/cppi41.c cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); c 401 drivers/dma/ti/cppi41.c struct cppi41_channel *c = to_cpp41_chan(chan); c 402 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; c 421 drivers/dma/ti/cppi41.c struct cppi41_channel *c = to_cpp41_chan(chan); c 426 drivers/dma/ti/cppi41.c dma_set_residue(txstate, c->residue); c 431 drivers/dma/ti/cppi41.c static void push_desc_queue(struct cppi41_channel *c) c 433 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; c 438 drivers/dma/ti/cppi41.c c->residue = 0; c 441 drivers/dma/ti/cppi41.c if (!c->is_tx) { c 444 drivers/dma/ti/cppi41.c reg |= c->q_comp_num; c 447 drivers/dma/ti/cppi41.c cppi_writel(reg, c->gcr_reg); c 465 drivers/dma/ti/cppi41.c desc_phys = lower_32_bits(c->desc_phys); c 468 drivers/dma/ti/cppi41.c cdd->chan_busy[desc_num] = c; c 472 drivers/dma/ti/cppi41.c cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); c 482 drivers/dma/ti/cppi41.c struct cppi41_channel *c, *_c; c 484 drivers/dma/ti/cppi41.c list_for_each_entry_safe(c, _c, &cdd->pending, node) { c 485 drivers/dma/ti/cppi41.c push_desc_queue(c); c 486 drivers/dma/ti/cppi41.c list_del(&c->node); c 492 drivers/dma/ti/cppi41.c struct cppi41_channel *c = to_cpp41_chan(chan); c 493 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; c 507 drivers/dma/ti/cppi41.c list_add_tail(&c->node, &cdd->pending); c 526 drivers/dma/ti/cppi41.c static u32 get_host_pd1(struct cppi41_channel *c) c 535 drivers/dma/ti/cppi41.c static u32 get_host_pd2(struct cppi41_channel *c) c 540 drivers/dma/ti/cppi41.c reg |= c->q_comp_num; c 588 drivers/dma/ti/cppi41.c struct cppi41_channel *c = to_cpp41_chan(chan); c 590 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; c 606 drivers/dma/ti/cppi41.c d = c->desc; c 616 drivers/dma/ti/cppi41.c d->pd1 = get_host_pd1(c); c 617 drivers/dma/ti/cppi41.c d->pd2 = get_host_pd2(c); c 627 drivers/dma/ti/cppi41.c txd = &c->txd; c 641 drivers/dma/ti/cppi41.c static int cppi41_tear_down_chan(struct cppi41_channel *c) c 644 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; c 656 drivers/dma/ti/cppi41.c if (!c->td_queued) { c 666 drivers/dma/ti/cppi41.c if (!c->is_tx) { c 672 drivers/dma/ti/cppi41.c cppi_writel(reg, c->gcr_reg); c 673 drivers/dma/ti/cppi41.c c->td_queued = 1; c 674 drivers/dma/ti/cppi41.c c->td_retry = 500; c 677 drivers/dma/ti/cppi41.c if (!c->td_seen || !c->td_desc_seen) { c 680 drivers/dma/ti/cppi41.c if (!desc_phys && c->is_tx) c 681 drivers/dma/ti/cppi41.c desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); c 683 drivers/dma/ti/cppi41.c if (desc_phys == c->desc_phys) { c 684 drivers/dma/ti/cppi41.c c->td_desc_seen = 1; c 692 drivers/dma/ti/cppi41.c WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); c 693 drivers/dma/ti/cppi41.c WARN_ON((pd0 & 0x1f) != c->port_num); c 694 drivers/dma/ti/cppi41.c c->td_seen = 1; c 699 drivers/dma/ti/cppi41.c c->td_retry--; c 710 drivers/dma/ti/cppi41.c if (!c->td_seen && c->td_retry) { c 714 drivers/dma/ti/cppi41.c WARN_ON(!c->td_retry); c 716 drivers/dma/ti/cppi41.c if (!c->td_desc_seen) { c 717 drivers/dma/ti/cppi41.c desc_phys = cppi41_pop_desc(cdd, c->q_num); c 719 drivers/dma/ti/cppi41.c desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); c 723 drivers/dma/ti/cppi41.c c->td_queued = 0; c 724 drivers/dma/ti/cppi41.c c->td_seen = 0; c 725 drivers/dma/ti/cppi41.c c->td_desc_seen = 0; c 726 drivers/dma/ti/cppi41.c cppi_writel(0, c->gcr_reg); c 730 drivers/dma/ti/cppi41.c dma_cookie_complete(&c->txd); c 731 drivers/dma/ti/cppi41.c dmaengine_desc_get_callback_invoke(&c->txd, &abort_result); c 738 drivers/dma/ti/cppi41.c struct cppi41_channel *c = to_cpp41_chan(chan); c 739 drivers/dma/ti/cppi41.c struct cppi41_dd *cdd = c->cdd; c 744 drivers/dma/ti/cppi41.c desc_phys = lower_32_bits(c->desc_phys); c 755 drivers/dma/ti/cppi41.c if (cc != c) c 763 drivers/dma/ti/cppi41.c ret = cppi41_tear_down_chan(c); c 1192 drivers/dma/ti/cppi41.c struct cppi41_channel *c; c 1198 drivers/dma/ti/cppi41.c list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) c 1199 drivers/dma/ti/cppi41.c if (!c->is_tx) c 1200 drivers/dma/ti/cppi41.c cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); c 746 drivers/dma/ti/edma.c static inline struct edma_chan *to_edma_chan(struct dma_chan *c) c 748 drivers/dma/ti/edma.c return container_of(c, struct edma_chan, vchan.chan); c 216 drivers/dma/ti/omap-dma.c static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) c 218 drivers/dma/ti/omap-dma.c return container_of(c, struct omap_chan, vc.chan); c 347 drivers/dma/ti/omap-dma.c static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) c 349 drivers/dma/ti/omap-dma.c const struct omap_dma_reg *r = c->reg_map + reg; c 351 drivers/dma/ti/omap-dma.c omap_dma_write(val, r->type, c->channel_base + r->offset); c 354 drivers/dma/ti/omap-dma.c static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) c 356 drivers/dma/ti/omap-dma.c const struct omap_dma_reg *r = c->reg_map + reg; c 358 drivers/dma/ti/omap-dma.c return omap_dma_read(r->type, c->channel_base + r->offset); c 361 drivers/dma/ti/omap-dma.c static void omap_dma_clear_csr(struct omap_chan *c) c 364 drivers/dma/ti/omap-dma.c omap_dma_chan_read(c, CSR); c 366 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CSR, ~0); c 369 drivers/dma/ti/omap-dma.c static unsigned omap_dma_get_csr(struct omap_chan *c) c 371 drivers/dma/ti/omap-dma.c unsigned val = omap_dma_chan_read(c, CSR); c 374 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CSR, val); c 379 drivers/dma/ti/omap-dma.c static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, c 382 drivers/dma/ti/omap-dma.c c->channel_base = od->base + od->plat->channel_stride * lch; c 384 drivers/dma/ti/omap-dma.c od->lch_map[lch] = c; c 387 drivers/dma/ti/omap-dma.c static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) c 389 drivers/dma/ti/omap-dma.c struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); c 393 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CPC, 0); c 395 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CDAC, 0); c 397 drivers/dma/ti/omap-dma.c omap_dma_clear_csr(c); c 406 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CDP, cdp); c 408 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr); c 409 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCDN, 0); c 410 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCFN, 0xffff); c 411 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCEN, 0xffffff); c 415 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CDP, 0); c 419 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CICR, cicr); c 422 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); c 424 drivers/dma/ti/omap-dma.c c->running = true; c 427 drivers/dma/ti/omap-dma.c static void omap_dma_drain_chan(struct omap_chan *c) c 434 drivers/dma/ti/omap-dma.c val = omap_dma_chan_read(c, CCR); c 445 drivers/dma/ti/omap-dma.c dev_err(c->vc.chan.device->dev, c 447 drivers/dma/ti/omap-dma.c c->dma_ch); c 450 drivers/dma/ti/omap-dma.c static int omap_dma_stop(struct omap_chan *c) c 452 drivers/dma/ti/omap-dma.c struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); c 456 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CICR, 0); c 458 drivers/dma/ti/omap-dma.c omap_dma_clear_csr(c); c 460 drivers/dma/ti/omap-dma.c val = omap_dma_chan_read(c, CCR); c 469 drivers/dma/ti/omap-dma.c val = omap_dma_chan_read(c, CCR); c 471 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCR, val); c 473 drivers/dma/ti/omap-dma.c if (!(c->ccr & CCR_BUFFERING_DISABLE)) c 474 drivers/dma/ti/omap-dma.c omap_dma_drain_chan(c); c 482 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCR, val); c 484 drivers/dma/ti/omap-dma.c if (!(c->ccr & CCR_BUFFERING_DISABLE)) c 485 drivers/dma/ti/omap-dma.c omap_dma_drain_chan(c); c 490 drivers/dma/ti/omap-dma.c if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { c 491 drivers/dma/ti/omap-dma.c val = omap_dma_chan_read(c, CLNK_CTRL); c 498 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CLNK_CTRL, val); c 500 drivers/dma/ti/omap-dma.c c->running = false; c 504 drivers/dma/ti/omap-dma.c static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) c 506 drivers/dma/ti/omap-dma.c struct omap_sg *sg = d->sg + c->sgidx; c 519 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, cxsa, sg->addr); c 520 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, cxei, sg->ei); c 521 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, cxfi, sg->fi); c 522 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CEN, sg->en); c 523 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CFN, sg->fn); c 525 drivers/dma/ti/omap-dma.c omap_dma_start(c, d); c 526 drivers/dma/ti/omap-dma.c c->sgidx++; c 529 drivers/dma/ti/omap-dma.c static void omap_dma_start_desc(struct omap_chan *c) c 531 drivers/dma/ti/omap-dma.c struct virt_dma_desc *vd = vchan_next_desc(&c->vc); c 536 drivers/dma/ti/omap-dma.c c->desc = NULL; c 542 drivers/dma/ti/omap-dma.c c->desc = d = to_omap_dma_desc(&vd->tx); c 543 drivers/dma/ti/omap-dma.c c->sgidx = 0; c 552 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCR, d->ccr); c 554 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CCR2, d->ccr >> 16); c 566 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, cxsa, d->dev_addr); c 567 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, cxei, d->ei); c 568 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, cxfi, d->fi); c 569 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CSDP, d->csdp); c 570 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); c 572 drivers/dma/ti/omap-dma.c omap_dma_start_sg(c, d); c 577 drivers/dma/ti/omap-dma.c struct omap_chan *c = data; c 581 drivers/dma/ti/omap-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 582 drivers/dma/ti/omap-dma.c d = c->desc; c 584 drivers/dma/ti/omap-dma.c if (c->cyclic) { c 586 drivers/dma/ti/omap-dma.c } else if (d->using_ll || c->sgidx == d->sglen) { c 587 drivers/dma/ti/omap-dma.c omap_dma_start_desc(c); c 590 drivers/dma/ti/omap-dma.c omap_dma_start_sg(c, d); c 593 drivers/dma/ti/omap-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 612 drivers/dma/ti/omap-dma.c struct omap_chan *c; c 618 drivers/dma/ti/omap-dma.c c = od->lch_map[channel]; c 619 drivers/dma/ti/omap-dma.c if (c == NULL) { c 625 drivers/dma/ti/omap-dma.c csr = omap_dma_get_csr(c); c 628 drivers/dma/ti/omap-dma.c omap_dma_callback(channel, csr, c); c 639 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 644 drivers/dma/ti/omap-dma.c ret = omap_request_dma(c->dma_sig, "DMA engine", c 645 drivers/dma/ti/omap-dma.c omap_dma_callback, c, &c->dma_ch); c 647 drivers/dma/ti/omap-dma.c ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, c 648 drivers/dma/ti/omap-dma.c &c->dma_ch); c 651 drivers/dma/ti/omap-dma.c dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); c 654 drivers/dma/ti/omap-dma.c omap_dma_assign(od, c, c->dma_ch); c 660 drivers/dma/ti/omap-dma.c val = BIT(c->dma_ch); c 666 drivers/dma/ti/omap-dma.c val &= ~BIT(c->dma_ch); c 674 drivers/dma/ti/omap-dma.c c->ccr = CCR_OMAP31_DISABLE; c 676 drivers/dma/ti/omap-dma.c c->ccr |= c->dma_ch + 1; c 678 drivers/dma/ti/omap-dma.c c->ccr = c->dma_sig & 0x1f; c 681 drivers/dma/ti/omap-dma.c c->ccr = c->dma_sig & 0x1f; c 682 drivers/dma/ti/omap-dma.c c->ccr |= (c->dma_sig & ~0x1f) << 14; c 685 drivers/dma/ti/omap-dma.c c->ccr |= CCR_BUFFERING_DISABLE; c 693 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 697 drivers/dma/ti/omap-dma.c od->irq_enable_mask &= ~BIT(c->dma_ch); c 702 drivers/dma/ti/omap-dma.c c->channel_base = NULL; c 703 drivers/dma/ti/omap-dma.c od->lch_map[c->dma_ch] = NULL; c 704 drivers/dma/ti/omap-dma.c vchan_free_chan_resources(&c->vc); c 705 drivers/dma/ti/omap-dma.c omap_free_dma(c->dma_ch); c 707 drivers/dma/ti/omap-dma.c dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, c 708 drivers/dma/ti/omap-dma.c c->dma_sig); c 709 drivers/dma/ti/omap-dma.c c->dma_sig = 0; c 749 drivers/dma/ti/omap-dma.c static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) c 751 drivers/dma/ti/omap-dma.c struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); c 754 drivers/dma/ti/omap-dma.c val = omap_dma_chan_read(c, reg); c 756 drivers/dma/ti/omap-dma.c val = omap_dma_chan_read(c, reg); c 761 drivers/dma/ti/omap-dma.c static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) c 763 drivers/dma/ti/omap-dma.c struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); c 767 drivers/dma/ti/omap-dma.c addr = omap_dma_chan_read(c, CPC); c 769 drivers/dma/ti/omap-dma.c addr = omap_dma_chan_read_3_3(c, CSAC); c 770 drivers/dma/ti/omap-dma.c cdac = omap_dma_chan_read_3_3(c, CDAC); c 778 drivers/dma/ti/omap-dma.c addr = omap_dma_chan_read(c, CSSA); c 782 drivers/dma/ti/omap-dma.c addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; c 787 drivers/dma/ti/omap-dma.c static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) c 789 drivers/dma/ti/omap-dma.c struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); c 793 drivers/dma/ti/omap-dma.c addr = omap_dma_chan_read(c, CPC); c 795 drivers/dma/ti/omap-dma.c addr = omap_dma_chan_read_3_3(c, CDAC); c 804 drivers/dma/ti/omap-dma.c addr = omap_dma_chan_read(c, CDSA); c 808 drivers/dma/ti/omap-dma.c addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; c 816 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 825 drivers/dma/ti/omap-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 826 drivers/dma/ti/omap-dma.c if (c->desc && c->desc->vd.tx.cookie == cookie) c 827 drivers/dma/ti/omap-dma.c d = c->desc; c 836 drivers/dma/ti/omap-dma.c pos = omap_dma_get_src_pos(c); c 838 drivers/dma/ti/omap-dma.c pos = omap_dma_get_dst_pos(c); c 844 drivers/dma/ti/omap-dma.c struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie); c 854 drivers/dma/ti/omap-dma.c if (ret == DMA_IN_PROGRESS && c->paused) { c 856 drivers/dma/ti/omap-dma.c } else if (d && d->polled && c->running) { c 857 drivers/dma/ti/omap-dma.c uint32_t ccr = omap_dma_chan_read(c, CCR); c 864 drivers/dma/ti/omap-dma.c omap_dma_start_desc(c); c 869 drivers/dma/ti/omap-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 876 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 879 drivers/dma/ti/omap-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 880 drivers/dma/ti/omap-dma.c if (vchan_issue_pending(&c->vc) && !c->desc) c 881 drivers/dma/ti/omap-dma.c omap_dma_start_desc(c); c 882 drivers/dma/ti/omap-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 890 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 901 drivers/dma/ti/omap-dma.c dev_addr = c->cfg.src_addr; c 902 drivers/dma/ti/omap-dma.c dev_width = c->cfg.src_addr_width; c 903 drivers/dma/ti/omap-dma.c burst = c->cfg.src_maxburst; c 904 drivers/dma/ti/omap-dma.c port_window = c->cfg.src_port_window_size; c 906 drivers/dma/ti/omap-dma.c dev_addr = c->cfg.dst_addr; c 907 drivers/dma/ti/omap-dma.c dev_width = c->cfg.dst_addr_width; c 908 drivers/dma/ti/omap-dma.c burst = c->cfg.dst_maxburst; c 909 drivers/dma/ti/omap-dma.c port_window = c->cfg.dst_port_window_size; c 954 drivers/dma/ti/omap-dma.c d->ccr = c->ccr | CCR_SYNC_FRAME; c 1010 drivers/dma/ti/omap-dma.c d->clnk_ctrl = c->dma_ch; c 1064 drivers/dma/ti/omap-dma.c return vchan_tx_prep(&c->vc, &d->vd, tx_flags); c 1072 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1080 drivers/dma/ti/omap-dma.c dev_addr = c->cfg.src_addr; c 1081 drivers/dma/ti/omap-dma.c dev_width = c->cfg.src_addr_width; c 1082 drivers/dma/ti/omap-dma.c burst = c->cfg.src_maxburst; c 1084 drivers/dma/ti/omap-dma.c dev_addr = c->cfg.dst_addr; c 1085 drivers/dma/ti/omap-dma.c dev_width = c->cfg.dst_addr_width; c 1086 drivers/dma/ti/omap-dma.c burst = c->cfg.dst_maxburst; c 1121 drivers/dma/ti/omap-dma.c d->ccr = c->ccr; c 1161 drivers/dma/ti/omap-dma.c d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; c 1163 drivers/dma/ti/omap-dma.c c->cyclic = true; c 1165 drivers/dma/ti/omap-dma.c return vchan_tx_prep(&c->vc, &d->vd, flags); c 1172 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1192 drivers/dma/ti/omap-dma.c d->ccr = c->ccr; c 1211 drivers/dma/ti/omap-dma.c return vchan_tx_prep(&c->vc, &d->vd, tx_flags); c 1218 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1247 drivers/dma/ti/omap-dma.c d->ccr = c->ccr; c 1294 drivers/dma/ti/omap-dma.c return vchan_tx_prep(&c->vc, &d->vd, flags); c 1299 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1309 drivers/dma/ti/omap-dma.c memcpy(&c->cfg, cfg, sizeof(c->cfg)); c 1316 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1320 drivers/dma/ti/omap-dma.c spin_lock_irqsave(&c->vc.lock, flags); c 1327 drivers/dma/ti/omap-dma.c if (c->desc) { c 1328 drivers/dma/ti/omap-dma.c vchan_terminate_vdesc(&c->desc->vd); c 1329 drivers/dma/ti/omap-dma.c c->desc = NULL; c 1331 drivers/dma/ti/omap-dma.c if (!c->paused) c 1332 drivers/dma/ti/omap-dma.c omap_dma_stop(c); c 1335 drivers/dma/ti/omap-dma.c c->cyclic = false; c 1336 drivers/dma/ti/omap-dma.c c->paused = false; c 1338 drivers/dma/ti/omap-dma.c vchan_get_all_descriptors(&c->vc, &head); c 1339 drivers/dma/ti/omap-dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 1340 drivers/dma/ti/omap-dma.c vchan_dma_desc_free_list(&c->vc, &head); c 1347 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1349 drivers/dma/ti/omap-dma.c vchan_synchronize(&c->vc); c 1354 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1362 drivers/dma/ti/omap-dma.c if (!c->desc) c 1365 drivers/dma/ti/omap-dma.c if (c->cyclic) c 1391 drivers/dma/ti/omap-dma.c else if (c->desc->dir == DMA_DEV_TO_MEM) c 1394 drivers/dma/ti/omap-dma.c if (can_pause && !c->paused) { c 1395 drivers/dma/ti/omap-dma.c ret = omap_dma_stop(c); c 1397 drivers/dma/ti/omap-dma.c c->paused = true; c 1407 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1414 drivers/dma/ti/omap-dma.c if (c->paused && c->desc) { c 1418 drivers/dma/ti/omap-dma.c omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); c 1420 drivers/dma/ti/omap-dma.c omap_dma_start(c, c->desc); c 1421 drivers/dma/ti/omap-dma.c c->paused = false; c 1431 drivers/dma/ti/omap-dma.c struct omap_chan *c; c 1433 drivers/dma/ti/omap-dma.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 1434 drivers/dma/ti/omap-dma.c if (!c) c 1437 drivers/dma/ti/omap-dma.c c->reg_map = od->reg_map; c 1438 drivers/dma/ti/omap-dma.c c->vc.desc_free = omap_dma_desc_free; c 1439 drivers/dma/ti/omap-dma.c vchan_init(&c->vc, &od->ddev); c 1447 drivers/dma/ti/omap-dma.c struct omap_chan *c = list_first_entry(&od->ddev.channels, c 1450 drivers/dma/ti/omap-dma.c list_del(&c->vc.chan.device_node); c 1451 drivers/dma/ti/omap-dma.c tasklet_kill(&c->vc.task); c 1452 drivers/dma/ti/omap-dma.c kfree(c); c 1663 drivers/dma/ti/omap-dma.c struct omap_chan *c = to_omap_dma_chan(chan); c 1667 drivers/dma/ti/omap-dma.c c->dma_sig = req; c 187 drivers/dma/zx_dma.c static int zx_dma_start_txd(struct zx_dma_chan *c) c 189 drivers/dma/zx_dma.c struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device); c 190 drivers/dma/zx_dma.c struct virt_dma_desc *vd = vchan_next_desc(&c->vc); c 192 drivers/dma/zx_dma.c if (!c->phy) c 195 drivers/dma/zx_dma.c if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d)) c 206 drivers/dma/zx_dma.c c->phy->ds_run = ds; c 207 drivers/dma/zx_dma.c c->phy->ds_done = NULL; c 209 drivers/dma/zx_dma.c zx_dma_set_desc(c->phy, ds->desc_hw); c 212 drivers/dma/zx_dma.c c->phy->ds_done = NULL; c 213 drivers/dma/zx_dma.c c->phy->ds_run = NULL; c 220 drivers/dma/zx_dma.c struct zx_dma_chan *c, *cn; c 225 drivers/dma/zx_dma.c list_for_each_entry_safe(c, cn, &d->slave.channels, c 227 drivers/dma/zx_dma.c spin_lock_irqsave(&c->vc.lock, flags); c 228 drivers/dma/zx_dma.c p = c->phy; c 229 drivers/dma/zx_dma.c if (p && p->ds_done && zx_dma_start_txd(c)) { c 233 drivers/dma/zx_dma.c c->phy = NULL; c 236 drivers/dma/zx_dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 242 drivers/dma/zx_dma.c c = list_first_entry(&d->chan_pending, c 244 drivers/dma/zx_dma.c p = &d->phy[c->id]; c 247 drivers/dma/zx_dma.c list_del_init(&c->node); c 248 drivers/dma/zx_dma.c pch_alloc |= 1 << c->id; c 250 drivers/dma/zx_dma.c p->vchan = c; c 251 drivers/dma/zx_dma.c c->phy = p; c 253 drivers/dma/zx_dma.c dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id); c 261 drivers/dma/zx_dma.c c = p->vchan; c 262 drivers/dma/zx_dma.c if (c) { c 263 drivers/dma/zx_dma.c spin_lock_irqsave(&c->vc.lock, flags); c 264 drivers/dma/zx_dma.c zx_dma_start_txd(c); c 265 drivers/dma/zx_dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 275 drivers/dma/zx_dma.c struct zx_dma_chan *c; c 286 drivers/dma/zx_dma.c c = p->vchan; c 287 drivers/dma/zx_dma.c if (c) { c 290 drivers/dma/zx_dma.c spin_lock_irqsave(&c->vc.lock, flags); c 291 drivers/dma/zx_dma.c if (c->cyclic) { c 298 drivers/dma/zx_dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 319 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 324 drivers/dma/zx_dma.c list_del_init(&c->node); c 327 drivers/dma/zx_dma.c vchan_free_chan_resources(&c->vc); c 328 drivers/dma/zx_dma.c c->ccfg = 0; c 335 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 342 drivers/dma/zx_dma.c ret = dma_cookie_status(&c->vc.chan, cookie, state); c 346 drivers/dma/zx_dma.c spin_lock_irqsave(&c->vc.lock, flags); c 347 drivers/dma/zx_dma.c p = c->phy; c 348 drivers/dma/zx_dma.c ret = c->status; c 354 drivers/dma/zx_dma.c vd = vchan_find_desc(&c->vc, cookie); c 374 drivers/dma/zx_dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 381 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 386 drivers/dma/zx_dma.c spin_lock_irqsave(&c->vc.lock, flags); c 388 drivers/dma/zx_dma.c if (vchan_issue_pending(&c->vc)) { c 390 drivers/dma/zx_dma.c if (!c->phy && list_empty(&c->node)) { c 392 drivers/dma/zx_dma.c list_add_tail(&c->node, &d->chan_pending); c 394 drivers/dma/zx_dma.c dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); c 398 drivers/dma/zx_dma.c dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); c 400 drivers/dma/zx_dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 421 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 428 drivers/dma/zx_dma.c &c->vc, num, lli_limit); c 438 drivers/dma/zx_dma.c dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); c 459 drivers/dma/zx_dma.c static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir) c 461 drivers/dma/zx_dma.c struct dma_slave_config *cfg = &c->slave_cfg; c 468 drivers/dma/zx_dma.c c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ c 474 drivers/dma/zx_dma.c c->dev_addr = cfg->dst_addr; c 484 drivers/dma/zx_dma.c c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE c 490 drivers/dma/zx_dma.c c->dev_addr = cfg->src_addr; c 495 drivers/dma/zx_dma.c c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE c 510 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 518 drivers/dma/zx_dma.c if (zx_pre_config(c, DMA_MEM_TO_MEM)) c 532 drivers/dma/zx_dma.c zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); c 539 drivers/dma/zx_dma.c c->cyclic = 0; c 542 drivers/dma/zx_dma.c return vchan_tx_prep(&c->vc, &ds->vd, flags); c 549 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 559 drivers/dma/zx_dma.c if (zx_pre_config(c, dir)) c 572 drivers/dma/zx_dma.c c->cyclic = 0; c 584 drivers/dma/zx_dma.c dst = c->dev_addr; c 586 drivers/dma/zx_dma.c src = c->dev_addr; c 590 drivers/dma/zx_dma.c zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); c 600 drivers/dma/zx_dma.c return vchan_tx_prep(&c->vc, &ds->vd, flags); c 608 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 619 drivers/dma/zx_dma.c if (zx_pre_config(c, dir)) c 625 drivers/dma/zx_dma.c c->cyclic = 1; c 630 drivers/dma/zx_dma.c dst = c->dev_addr; c 632 drivers/dma/zx_dma.c src = c->dev_addr; c 636 drivers/dma/zx_dma.c c->ccfg | ZX_IRQ_ENABLE_ALL); c 643 drivers/dma/zx_dma.c return vchan_tx_prep(&c->vc, &ds->vd, flags); c 649 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 654 drivers/dma/zx_dma.c memcpy(&c->slave_cfg, cfg, sizeof(*cfg)); c 661 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 663 drivers/dma/zx_dma.c struct zx_dma_phy *p = c->phy; c 667 drivers/dma/zx_dma.c dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); c 671 drivers/dma/zx_dma.c list_del_init(&c->node); c 675 drivers/dma/zx_dma.c spin_lock_irqsave(&c->vc.lock, flags); c 676 drivers/dma/zx_dma.c vchan_get_all_descriptors(&c->vc, &head); c 680 drivers/dma/zx_dma.c c->phy = NULL; c 685 drivers/dma/zx_dma.c spin_unlock_irqrestore(&c->vc.lock, flags); c 686 drivers/dma/zx_dma.c vchan_dma_desc_free_list(&c->vc, &head); c 693 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 696 drivers/dma/zx_dma.c val = readl_relaxed(c->phy->base + REG_ZX_CTRL); c 698 drivers/dma/zx_dma.c writel_relaxed(val, c->phy->base + REG_ZX_CTRL); c 705 drivers/dma/zx_dma.c struct zx_dma_chan *c = to_zx_chan(chan); c 708 drivers/dma/zx_dma.c val = readl_relaxed(c->phy->base + REG_ZX_CTRL); c 710 drivers/dma/zx_dma.c writel_relaxed(val, c->phy->base + REG_ZX_CTRL); c 737 drivers/dma/zx_dma.c struct zx_dma_chan *c; c 747 drivers/dma/zx_dma.c c = to_zx_chan(chan); c 748 drivers/dma/zx_dma.c c->id = request; c 750 drivers/dma/zx_dma.c c->id, &c->vc); c 840 drivers/dma/zx_dma.c struct zx_dma_chan *c = &d->chans[i]; c 842 drivers/dma/zx_dma.c c->status = DMA_IN_PROGRESS; c 843 drivers/dma/zx_dma.c INIT_LIST_HEAD(&c->node); c 844 drivers/dma/zx_dma.c c->vc.desc_free = zx_dma_free_desc; c 845 drivers/dma/zx_dma.c vchan_init(&c->vc, &d->slave); c 883 drivers/dma/zx_dma.c struct zx_dma_chan *c, *cn; c 892 drivers/dma/zx_dma.c list_for_each_entry_safe(c, cn, &d->slave.channels, c 894 drivers/dma/zx_dma.c list_del(&c->vc.chan.device_node); c 229 drivers/edac/amd64_edac.h #define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1) c 230 drivers/edac/amd64_edac.h #define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7) c 439 drivers/edac/cpc925_edac.c u32 c; c 465 drivers/edac/cpc925_edac.c c = col & 0x1; c 467 drivers/edac/cpc925_edac.c pa |= c << (14 - i); c 475 drivers/edac/cpc925_edac.c c = row & 0x1; c 477 drivers/edac/cpc925_edac.c pa |= c << (26 - i); c 481 drivers/edac/cpc925_edac.c c = row & 0x1; c 483 drivers/edac/cpc925_edac.c pa |= c << (21 + i); c 487 drivers/edac/cpc925_edac.c c = row & 0x1; c 489 drivers/edac/cpc925_edac.c pa |= c << (18 - i); c 493 drivers/edac/cpc925_edac.c c = row & 0x1; c 495 drivers/edac/cpc925_edac.c pa |= c << (29 - i); c 1154 drivers/edac/mce_amd.c struct cpuinfo_x86 *c = &boot_cpu_data; c 1156 drivers/edac/mce_amd.c if (c->x86_vendor != X86_VENDOR_AMD && c 1157 drivers/edac/mce_amd.c c->x86_vendor != X86_VENDOR_HYGON) c 1164 drivers/edac/mce_amd.c switch (c->x86) { c 1196 drivers/edac/mce_amd.c xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f; c 1220 drivers/edac/mce_amd.c printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86); c 316 drivers/eisa/eisa-bus.c int i, c; c 357 drivers/eisa/eisa-bus.c for (c = 0, i = 1; i <= root->slots; i++) { c 391 drivers/eisa/eisa-bus.c c++; c 401 drivers/eisa/eisa-bus.c dev_info(root->dev, "EISA: Detected %d card%s\n", c, c == 1 ? "" : "s"); c 360 drivers/firewire/core-cdev.c struct client *c; c 363 drivers/firewire/core-cdev.c list_for_each_entry(c, &device->client_list, link) c 364 drivers/firewire/core-cdev.c callback(c); c 70 drivers/firewire/core-device.c char c; c 86 drivers/firewire/core-device.c c = block[i / 4] >> (24 - 8 * (i % 4)); c 87 drivers/firewire/core-device.c if (c == '\0') c 89 drivers/firewire/core-device.c buf[i] = c; c 351 drivers/firewire/core-iso.c int irm_id, ret, c = -EINVAL; c 358 drivers/firewire/core-iso.c c = manage_channel(card, irm_id, generation, channels_hi, c 361 drivers/firewire/core-iso.c if (channels_lo && c < 0) { c 362 drivers/firewire/core-iso.c c = manage_channel(card, irm_id, generation, channels_lo, c 365 drivers/firewire/core-iso.c if (c >= 0) c 366 drivers/firewire/core-iso.c c += 32; c 368 drivers/firewire/core-iso.c *channel = c; c 370 drivers/firewire/core-iso.c if (allocate && channels_mask != 0 && c < 0) c 381 drivers/firewire/core-iso.c if (c >= 0) c 382 drivers/firewire/core-iso.c deallocate_channel(card, irm_id, generation, c); c 141 drivers/firmware/arm_scmi/driver.c #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl) c 409 drivers/firmware/arm_scpi.c static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) c 411 drivers/firmware/arm_scpi.c struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); c 421 drivers/firmware/arm_scpi.c static void scpi_tx_prepare(struct mbox_client *c, void *msg) c 425 drivers/firmware/arm_scpi.c struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); c 101 drivers/firmware/dmi-id.c const char *c; c 104 drivers/firmware/dmi-id.c c = dmi_get_system_info(f->field); c 105 drivers/firmware/dmi-id.c if (!c) c 108 drivers/firmware/dmi-id.c t = kmalloc(strlen(c) + 1, GFP_KERNEL); c 111 drivers/firmware/dmi-id.c ascii_filter(t, c); c 484 drivers/firmware/dmi_scan.c int c = 0; c 488 drivers/firmware/dmi_scan.c return c; c 492 drivers/firmware/dmi_scan.c c += scnprintf(buf + c, len - c, "%c", *p); c 494 drivers/firmware/dmi_scan.c c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff); c 495 drivers/firmware/dmi_scan.c return c; c 500 drivers/firmware/dmi_scan.c int c = 0; c 503 drivers/firmware/dmi_scan.c c += print_filtered(buf + c, len - c, c 505 drivers/firmware/dmi_scan.c c += scnprintf(buf + c, len - c, " "); c 506 drivers/firmware/dmi_scan.c c += print_filtered(buf + c, len - c, c 511 drivers/firmware/dmi_scan.c c += scnprintf(buf + c, len - c, "/"); c 512 drivers/firmware/dmi_scan.c c += print_filtered(buf + c, len - c, board); c 514 drivers/firmware/dmi_scan.c c += scnprintf(buf + c, len - c, ", BIOS "); c 515 drivers/firmware/dmi_scan.c c += print_filtered(buf + c, len - c, c 517 drivers/firmware/dmi_scan.c c += scnprintf(buf + c, len - c, " "); c 518 drivers/firmware/dmi_scan.c c += print_filtered(buf + c, len - c, c 112 drivers/firmware/efi/earlycon.c static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h) c 120 drivers/firmware/efi/earlycon.c src = font->data + c * font->height; c 754 drivers/firmware/efi/libstub/efi-stub-helper.c static int efi_utf8_bytes(u16 c) c 756 drivers/firmware/efi/libstub/efi-stub-helper.c return 1 + (c >= 0x80) + (c >= 0x800); c 764 drivers/firmware/efi/libstub/efi-stub-helper.c unsigned int c; c 767 drivers/firmware/efi/libstub/efi-stub-helper.c c = *src++; c 768 drivers/firmware/efi/libstub/efi-stub-helper.c if (n && c >= 0xd800 && c <= 0xdbff && c 770 drivers/firmware/efi/libstub/efi-stub-helper.c c = 0x10000 + ((c & 0x3ff) << 10) + (*src & 0x3ff); c 774 drivers/firmware/efi/libstub/efi-stub-helper.c if (c >= 0xd800 && c <= 0xdfff) c 775 drivers/firmware/efi/libstub/efi-stub-helper.c c = 0xfffd; /* Unmatched surrogate */ c 776 drivers/firmware/efi/libstub/efi-stub-helper.c if (c < 0x80) { c 777 drivers/firmware/efi/libstub/efi-stub-helper.c *dst++ = c; c 780 drivers/firmware/efi/libstub/efi-stub-helper.c if (c < 0x800) { c 781 drivers/firmware/efi/libstub/efi-stub-helper.c *dst++ = 0xc0 + (c >> 6); c 784 drivers/firmware/efi/libstub/efi-stub-helper.c if (c < 0x10000) { c 785 drivers/firmware/efi/libstub/efi-stub-helper.c *dst++ = 0xe0 + (c >> 12); c 788 drivers/firmware/efi/libstub/efi-stub-helper.c *dst++ = 0xf0 + (c >> 18); c 789 drivers/firmware/efi/libstub/efi-stub-helper.c *dst++ = 0x80 + ((c >> 12) & 0x3f); c 791 drivers/firmware/efi/libstub/efi-stub-helper.c *dst++ = 0x80 + ((c >> 6) & 0x3f); c 793 drivers/firmware/efi/libstub/efi-stub-helper.c *dst++ = 0x80 + (c & 0x3f); c 35 drivers/firmware/efi/test/efi_test.c efi_char16_t *s = str, c; c 44 drivers/firmware/efi/test/efi_test.c if (get_user(c, s++)) { c 49 drivers/firmware/efi/test/efi_test.c while (c != 0) { c 50 drivers/firmware/efi/test/efi_test.c if (get_user(c, s++)) { c 631 drivers/firmware/efi/test/efi_test.c efi_capsule_header_t *c; c 637 drivers/firmware/efi/test/efi_test.c if (get_user(c, qcaps.capsule_header_array + i)) { c 641 drivers/firmware/efi/test/efi_test.c if (copy_from_user(&capsules[i], c, c 216 drivers/firmware/efi/vars.c char c = match_name[*match]; c 218 drivers/firmware/efi/vars.c switch (c) { c 233 drivers/firmware/efi/vars.c if (*match < len && c == var_name[*match]) c 368 drivers/firmware/efi/vars.c efi_char16_t c; c 375 drivers/firmware/efi/vars.c for (len = 2; len <= variable_name_size; len += sizeof(c)) { c 376 drivers/firmware/efi/vars.c c = variable_name[(len / sizeof(c)) - 1]; c 377 drivers/firmware/efi/vars.c if (!c) c 83 drivers/firmware/google/vpd.c int c; c 86 drivers/firmware/google/vpd.c c = *key++; c 88 drivers/firmware/google/vpd.c if (!isalnum(c) && c != '_') c 51 drivers/firmware/imx/imx-dsp.c static void imx_dsp_handle_rx(struct mbox_client *c, void *msg) c 53 drivers/firmware/imx/imx-dsp.c struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl); c 125 drivers/firmware/imx/imx-scu-irq.c static void imx_scu_irq_callback(struct mbox_client *c, void *msg) c 113 drivers/firmware/imx/imx-scu.c static void imx_scu_rx_callback(struct mbox_client *c, void *msg) c 115 drivers/firmware/imx/imx-scu.c struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl); c 17 drivers/firmware/qcom_scm-64.c #define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) c 29 drivers/firmware/qcom_scm-64.c #define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ c 32 drivers/firmware/qcom_scm-64.c (((c) & 0x3) << 8) | \ c 28 drivers/firmware/raspberrypi.c struct completion c; c 37 drivers/firmware/raspberrypi.c complete(&fw->c); c 53 drivers/firmware/raspberrypi.c reinit_completion(&fw->c); c 56 drivers/firmware/raspberrypi.c if (wait_for_completion_timeout(&fw->c, HZ)) { c 238 drivers/firmware/raspberrypi.c init_completion(&fw->c); c 138 drivers/firmware/ti_sci.c #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) c 149 drivers/gpio/gpio-bt8xx.c struct gpio_chip *c = &bg->gpio; c 151 drivers/gpio/gpio-bt8xx.c c->label = dev_name(&bg->pdev->dev); c 152 drivers/gpio/gpio-bt8xx.c c->owner = THIS_MODULE; c 153 drivers/gpio/gpio-bt8xx.c c->direction_input = bt8xxgpio_gpio_direction_input; c 154 drivers/gpio/gpio-bt8xx.c c->get = bt8xxgpio_gpio_get; c 155 drivers/gpio/gpio-bt8xx.c c->direction_output = bt8xxgpio_gpio_direction_output; c 156 drivers/gpio/gpio-bt8xx.c c->set = bt8xxgpio_gpio_set; c 157 drivers/gpio/gpio-bt8xx.c c->dbg_show = NULL; c 158 drivers/gpio/gpio-bt8xx.c c->base = modparam_gpiobase; c 159 drivers/gpio/gpio-bt8xx.c c->ngpio = BT8XXGPIO_NR_GPIOS; c 160 drivers/gpio/gpio-bt8xx.c c->can_sleep = false; c 203 drivers/gpio/gpio-cs5535.c static int chip_gpio_request(struct gpio_chip *c, unsigned offset) c 205 drivers/gpio/gpio-cs5535.c struct cs5535_gpio_chip *chip = gpiochip_get_data(c); c 243 drivers/gpio/gpio-cs5535.c static int chip_direction_input(struct gpio_chip *c, unsigned offset) c 245 drivers/gpio/gpio-cs5535.c struct cs5535_gpio_chip *chip = gpiochip_get_data(c); c 256 drivers/gpio/gpio-cs5535.c static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val) c 258 drivers/gpio/gpio-cs5535.c struct cs5535_gpio_chip *chip = gpiochip_get_data(c); c 622 drivers/gpio/gpio-max732x.c struct i2c_client *c; c 652 drivers/gpio/gpio-max732x.c c = devm_i2c_new_dummy_device(&client->dev, c 654 drivers/gpio/gpio-max732x.c if (IS_ERR(c)) { c 657 drivers/gpio/gpio-max732x.c return PTR_ERR(c); c 659 drivers/gpio/gpio-max732x.c chip->client_group_b = chip->client_dummy = c; c 665 drivers/gpio/gpio-max732x.c c = devm_i2c_new_dummy_device(&client->dev, c 667 drivers/gpio/gpio-max732x.c if (IS_ERR(c)) { c 670 drivers/gpio/gpio-max732x.c return PTR_ERR(c); c 672 drivers/gpio/gpio-max732x.c chip->client_group_a = chip->client_dummy = c; c 1146 drivers/gpio/gpio-omap.c int c; c 1167 drivers/gpio/gpio-omap.c c = bank->get_context_loss_count(dev); c 1168 drivers/gpio/gpio-omap.c if (c != bank->context_loss_count) { c 153 drivers/gpio/gpio-pxa.c static inline struct pxa_gpio_chip *chip_to_pxachip(struct gpio_chip *c) c 155 drivers/gpio/gpio-pxa.c struct pxa_gpio_chip *pxa_chip = gpiochip_get_data(c); c 160 drivers/gpio/gpio-pxa.c static inline void __iomem *gpio_bank_base(struct gpio_chip *c, int gpio) c 162 drivers/gpio/gpio-pxa.c struct pxa_gpio_chip *p = gpiochip_get_data(c); c 168 drivers/gpio/gpio-pxa.c static inline struct pxa_gpio_bank *gpio_to_pxabank(struct gpio_chip *c, c 171 drivers/gpio/gpio-pxa.c return chip_to_pxachip(c)->banks + gpio / 32; c 387 drivers/gpio/gpio-pxa.c static inline void update_edge_detect(struct pxa_gpio_bank *c) c 391 drivers/gpio/gpio-pxa.c grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask; c 392 drivers/gpio/gpio-pxa.c gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask; c 393 drivers/gpio/gpio-pxa.c grer |= c->irq_edge_rise & c->irq_mask; c 394 drivers/gpio/gpio-pxa.c gfer |= c->irq_edge_fall & c->irq_mask; c 395 drivers/gpio/gpio-pxa.c writel_relaxed(grer, c->regbase + GRER_OFFSET); c 396 drivers/gpio/gpio-pxa.c writel_relaxed(gfer, c->regbase + GFER_OFFSET); c 403 drivers/gpio/gpio-pxa.c struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio); c 410 drivers/gpio/gpio-pxa.c if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio)) c 419 drivers/gpio/gpio-pxa.c gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); c 422 drivers/gpio/gpio-pxa.c writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET); c 424 drivers/gpio/gpio-pxa.c writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET); c 427 drivers/gpio/gpio-pxa.c c->irq_edge_rise |= mask; c 429 drivers/gpio/gpio-pxa.c c->irq_edge_rise &= ~mask; c 432 drivers/gpio/gpio-pxa.c c->irq_edge_fall |= mask; c 434 drivers/gpio/gpio-pxa.c c->irq_edge_fall &= ~mask; c 436 drivers/gpio/gpio-pxa.c update_edge_detect(c); c 449 drivers/gpio/gpio-pxa.c struct pxa_gpio_bank *c; c 453 drivers/gpio/gpio-pxa.c for_each_gpio_bank(gpio, c, pchip) { c 454 drivers/gpio/gpio-pxa.c gedr = readl_relaxed(c->regbase + GEDR_OFFSET); c 455 drivers/gpio/gpio-pxa.c gedr = gedr & c->irq_mask; c 456 drivers/gpio/gpio-pxa.c writel_relaxed(gedr, c->regbase + GEDR_OFFSET); c 527 drivers/gpio/gpio-pxa.c struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio); c 529 drivers/gpio/gpio-pxa.c c->irq_mask |= GPIO_bit(gpio); c 530 drivers/gpio/gpio-pxa.c update_edge_detect(c); c 621 drivers/gpio/gpio-pxa.c struct pxa_gpio_bank *c; c 690 drivers/gpio/gpio-pxa.c for_each_gpio_bank(gpio, c, pchip) { c 691 drivers/gpio/gpio-pxa.c writel_relaxed(0, c->regbase + GFER_OFFSET); c 692 drivers/gpio/gpio-pxa.c writel_relaxed(0, c->regbase + GRER_OFFSET); c 693 drivers/gpio/gpio-pxa.c writel_relaxed(~0, c->regbase + GEDR_OFFSET); c 696 drivers/gpio/gpio-pxa.c writel_relaxed(~0, c->regbase + ED_MASK_OFFSET); c 770 drivers/gpio/gpio-pxa.c struct pxa_gpio_bank *c; c 776 drivers/gpio/gpio-pxa.c for_each_gpio_bank(gpio, c, pchip) { c 777 drivers/gpio/gpio-pxa.c c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET); c 778 drivers/gpio/gpio-pxa.c c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); c 779 drivers/gpio/gpio-pxa.c c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET); c 780 drivers/gpio/gpio-pxa.c c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET); c 783 drivers/gpio/gpio-pxa.c writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET); c 791 drivers/gpio/gpio-pxa.c struct pxa_gpio_bank *c; c 797 drivers/gpio/gpio-pxa.c for_each_gpio_bank(gpio, c, pchip) { c 799 drivers/gpio/gpio-pxa.c writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET); c 800 drivers/gpio/gpio-pxa.c writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET); c 802 drivers/gpio/gpio-pxa.c writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET); c 803 drivers/gpio/gpio-pxa.c writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET); c 804 drivers/gpio/gpio-pxa.c writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET); c 493 drivers/gpio/gpio-thunderx.c u64 c = readq(txgpio->register_base + GPIO_CONST); c 495 drivers/gpio/gpio-thunderx.c ngpio = c & GPIO_CONST_GPIOS_MASK; c 496 drivers/gpio/gpio-thunderx.c txgpio->base_msi = (c >> 8) & 0xff; c 211 drivers/gpio/gpio-vx855.c struct gpio_chip *c = &vg->gpio; c 213 drivers/gpio/gpio-vx855.c c->label = "VX855 South Bridge"; c 214 drivers/gpio/gpio-vx855.c c->owner = THIS_MODULE; c 215 drivers/gpio/gpio-vx855.c c->direction_input = vx855gpio_direction_input; c 216 drivers/gpio/gpio-vx855.c c->direction_output = vx855gpio_direction_output; c 217 drivers/gpio/gpio-vx855.c c->get = vx855gpio_get; c 218 drivers/gpio/gpio-vx855.c c->set = vx855gpio_set; c 219 drivers/gpio/gpio-vx855.c c->set_config = vx855gpio_set_config, c 220 drivers/gpio/gpio-vx855.c c->dbg_show = NULL; c 221 drivers/gpio/gpio-vx855.c c->base = 0; c 222 drivers/gpio/gpio-vx855.c c->ngpio = NR_VX855_GP; c 223 drivers/gpio/gpio-vx855.c c->can_sleep = false; c 224 drivers/gpio/gpio-vx855.c c->names = vx855gpio_names; c 60 drivers/gpio/gpio-zevio.c static inline u32 zevio_gpio_port_get(struct zevio_gpio *c, unsigned pin, c 64 drivers/gpio/gpio-zevio.c return readl(IOMEM(c->chip.regs + section_offset + port_offset)); c 67 drivers/gpio/gpio-zevio.c static inline void zevio_gpio_port_set(struct zevio_gpio *c, unsigned pin, c 71 drivers/gpio/gpio-zevio.c writel(val, IOMEM(c->chip.regs + section_offset + port_offset)); c 35 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) c 131 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c u32 c = 0; c 136 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].fpfn = 0; c 137 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = 0; c 138 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | c 142 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = visible_pfn; c 144 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags |= TTM_PL_FLAG_TOPDOWN; c 147 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; c 148 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c c++; c 152 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].fpfn = 0; c 153 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = 0; c 154 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags = TTM_PL_FLAG_TT; c 156 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags |= TTM_PL_FLAG_WC | c 159 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags |= TTM_PL_FLAG_CACHED; c 160 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c c++; c 164 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].fpfn = 0; c 165 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = 0; c 166 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags = TTM_PL_FLAG_SYSTEM; c 168 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags |= TTM_PL_FLAG_WC | c 171 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags |= TTM_PL_FLAG_CACHED; c 172 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c c++; c 176 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].fpfn = 0; c 177 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = 0; c 178 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; c 179 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c c++; c 183 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].fpfn = 0; c 184 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = 0; c 185 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; c 186 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c c++; c 190 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].fpfn = 0; c 191 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = 0; c 192 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; c 193 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c c++; c 196 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c if (!c) { c 197 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].fpfn = 0; c 198 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].lpfn = 0; c 199 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; c 200 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c c++; c 203 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS); c 205 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c placement->num_placement = c; c 208 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c placement->num_busy_placement = c; c 27 drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h #define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f) (((c & 0xFFFF) << 16) | (f & 0xFFFF)) c 28 drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h #define AMDGIM_ERROR_CODE(t,c) (((t&0xF)<<12)|(c&0xFFF)) c 897 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c fixed20_12 a, b, c; c 922 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c c.full = dfixed_const(lb_fill_bw); c 923 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b.full = dfixed_div(c, b); c 923 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c fixed20_12 a, b, c; c 948 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c c.full = dfixed_const(lb_fill_bw); c 949 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b.full = dfixed_div(c, b); c 696 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c fixed20_12 a, b, c; c 721 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_const(lb_fill_bw); c 722 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b.full = dfixed_div(c, b); c 833 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c fixed20_12 a, b, c; c 927 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_const(latency_watermark_a); c 928 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_mul(c, b); c 929 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_mul(c, amdgpu_crtc->hsc); c 930 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_div(c, a); c 932 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_div(c, a); c 933 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c priority_a_mark = dfixed_trunc(c); c 939 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_const(latency_watermark_b); c 940 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_mul(c, b); c 941 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_mul(c, amdgpu_crtc->hsc); c 942 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_div(c, a); c 944 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c c.full = dfixed_div(c, a); c 945 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c priority_b_mark = dfixed_trunc(c); c 832 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c fixed20_12 a, b, c; c 857 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c c.full = dfixed_const(lb_fill_bw); c 858 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b.full = dfixed_div(c, b); c 343 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \ c 347 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \ c 884 drivers/gpu/drm/amd/amdkfd/kfd_crat.c struct cpuinfo_x86 *c = &cpu_data(0); c 887 drivers/gpu/drm/amd/amdkfd/kfd_crat.c if (c->x86_vendor == X86_VENDOR_AMD) c 567 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = c 569 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = c 571 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = c 573 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; c 574 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000; c 618 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; c 1126 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; c 360 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { c 361 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; c 362 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, c 369 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.urgent_ns, prog_wm_value); c 372 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) { c 373 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; c 374 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns, c 379 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.pte_meta_urgent_ns, prog_wm_value); c 474 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns c 475 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { c 476 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = c 477 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; c 479 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, c 485 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); c 488 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns c 489 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { c 490 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns = c 491 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.cstate_exit_ns; c 493 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.cstate_exit_ns, c 499 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); c 573 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns c 574 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) { c 575 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c hubbub1->watermarks.c.cstate_pstate.pstate_change_ns = c 576 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.pstate_change_ns; c 578 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.pstate_change_ns, c 584 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); c 2707 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; c 2708 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; c 2709 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; c 2710 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; c 2711 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; c 206 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { c 207 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; c 208 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, c 216 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.urgent_ns, prog_wm_value); c 340 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns c 341 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { c 342 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = c 343 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; c 345 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, c 352 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); c 355 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns c 356 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { c 357 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns = c 358 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.cstate_exit_ns; c 360 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.cstate_exit_ns, c 367 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); c 444 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns c 445 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) { c 446 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c hubbub1->watermarks.c.cstate_pstate.pstate_change_ns = c 447 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.pstate_change_ns; c 449 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.pstate_change_ns, c 456 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); c 1046 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, c 38 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h static inline double dml_min3(double a, double b, double c) c 40 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h return dml_min(dml_min(a, b), c); c 43 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h static inline double dml_min4(double a, double b, double c, double d) c 45 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h return dml_min(dml_min(a, b), dml_min(c, d)); c 53 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h static inline double dml_max3(double a, double b, double c) c 55 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h return dml_max(dml_max(a, b), c); c 58 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h static inline double dml_max4(double a, double b, double c, double d) c 60 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h return dml_max(dml_max(a, b), dml_max(c, d)); c 63 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h static inline double dml_max5(double a, double b, double c, double d, double e) c 65 drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h return dml_max(dml_max4(a, b, c, d), e); c 164 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c int median3(int a, int b, int c) c 168 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c if (b > c) c 169 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c swap(b, c); c 171 drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c swap(b, c); c 53 drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h struct dcn_watermarks c; c 184 drivers/gpu/drm/amd/display/modules/color/color_gamma.c struct fixed31_32 c; c 192 drivers/gpu/drm/amd/display/modules/color/color_gamma.c c = dc_fixpt_from_fraction(55991073, 100000000); c 199 drivers/gpu/drm/amd/display/modules/color/color_gamma.c x = dc_fixpt_sub(in_x, c); c 215 drivers/gpu/drm/amd/display/modules/color/color_gamma.c struct fixed31_32 c; c 223 drivers/gpu/drm/amd/display/modules/color/color_gamma.c c = dc_fixpt_from_fraction(55991073, 100000000); c 236 drivers/gpu/drm/amd/display/modules/color/color_gamma.c *out_y = dc_fixpt_add(x, c); c 1529 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, c 1550 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c, c 1571 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c, c 1592 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, c 1613 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, c 1634 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, c 1719 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->dBtcGbGfxPll.c); c 1723 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->dBtcGbGfxAfll.c); c 1727 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->dBtcGbSoc.c); c 1739 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); c 1743 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); c 1780 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->ReservedEquation0.c); c 1784 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->ReservedEquation1.c); c 1788 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->ReservedEquation2.c); c 1792 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c pptable->ReservedEquation3.c); c 139 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c); c 156 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c); c 173 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c); c 190 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c); c 207 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c); c 224 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c); c 241 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c); c 258 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c); c 275 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c); c 292 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c); c 309 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c); c 384 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->CksVoltageOffset.c); c 447 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c); c 451 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c); c 455 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->dBtcGbGfxCksOn.c); c 459 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->dBtcGbGfxCksOff.c); c 463 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->dBtcGbGfxAfll.c); c 467 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->dBtcGbSoc.c); c 478 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); c 482 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); c 520 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->ReservedEquation0.c); c 524 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->ReservedEquation1.c); c 528 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->ReservedEquation2.c); c 532 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c pptable->ReservedEquation3.c); c 284 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h enum phm_platform_caps c) c 286 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] |= (1UL << c 287 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); c 291 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h enum phm_platform_caps c) c 293 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] &= ~(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); c 296 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps c) c 298 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h return (0 != (caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] & c 299 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h (1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))))); c 302 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h #define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c)) c 302 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h uint32_t c; c 313 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h uint32_t c; c 341 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h uint32_t c; // store in IEEE float format in this variable c 352 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h uint32_t c; // store in IEEE float format in this variable c 348 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h uint32_t c; // store in IEEE float format in this variable c 359 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h uint32_t c; // store in IEEE float format in this variable c 52 drivers/gpu/drm/amd/powerplay/inc/smu72.h int32_t c; c 44 drivers/gpu/drm/amd/powerplay/inc/smu73.h int32_t c; c 77 drivers/gpu/drm/amd/powerplay/inc/smu74.h int32_t c; c 204 drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h uint32_t c; c 215 drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h uint32_t c; c 168 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_layer_update_fb(struct komeda_component *c, c 174 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 192 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_layer_disable(struct komeda_component *c) c 194 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0); c 197 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_layer_update(struct komeda_component *c, c 204 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 208 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c d71_layer_update_fb(c, kfb, st->addr); c 269 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf) c 275 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, LAYER_INFO, 1, &v[14]); c 286 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 290 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD0, 1, v); c 293 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD4, 1, v); c 296 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD8, 4, v); c 302 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x100, 3, v); c 307 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x110, 2, v); c 311 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x118, 1, v); c 314 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x120, 2, v); c 318 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x130, 12, v); c 324 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, LAYER_RGB_RGB_COEFF0, 12, v); c 329 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x160, 3, v); c 344 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 349 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*layer), c 355 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) { c 357 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c return PTR_ERR(c); c 360 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c layer = to_layer(c); c 378 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_wb_layer_update(struct komeda_component *c, c 385 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 387 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c d71_layer_update_fb(c, kfb, st->addr); c 397 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_wb_layer_dump(struct komeda_component *c, struct seq_file *sf) c 401 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 403 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x80, 1, v); c 406 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD0, 3, v); c 411 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xE0, 1, v); c 415 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x100 + i * 0x10, 3, v); c 421 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x130, 12, v); c 426 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_wb_layer_disable(struct komeda_component *c) c 428 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c malidp_write32(c->reg, BLK_INPUT_ID0, 0); c 429 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0); c 441 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 447 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*wb_layer), c 452 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) { c 454 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c return PTR_ERR(c); c 457 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c wb_layer = to_layer(c); c 466 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_component_disable(struct komeda_component *c) c 468 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 473 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c for (i = 0; i < c->max_active_inputs; i++) { c 480 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (has_bit(c->id, KOMEDA_PIPELINE_COMPIZS)) c 511 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_compiz_update(struct komeda_component *c, c 515 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 535 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf) c 539 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 541 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x80, 5, v); c 545 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xA0, 5, v); c 552 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD0, 2, v); c 556 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xDC, 1, v); c 560 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, v[4], 3, v); c 566 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x130, 2, v); c 580 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 586 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*compiz), c 593 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) c 594 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c return PTR_ERR(c); c 596 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c compiz = to_compiz(c); c 635 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_scaler_update(struct komeda_component *c, c 639 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 704 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf) c 708 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 710 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x80, 1, v); c 713 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD0, 1, v); c 716 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xDC, 9, v); c 737 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 743 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*scaler), c 750 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) { c 752 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c return PTR_ERR(c); c 755 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c scaler = to_scaler(c); c 763 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c malidp_write32(c->reg, BLK_CONTROL, 0); c 809 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_splitter_update(struct komeda_component *c, c 813 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 821 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_splitter_dump(struct komeda_component *c, struct seq_file *sf) c 825 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 827 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, BLK_INPUT_ID0, 1, v); c 830 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, BLK_CONTROL, 3, v); c 845 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 851 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*splitter), c 858 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) { c 863 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c splitter = to_splitter(c); c 871 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_merger_update(struct komeda_component *c, c 875 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 887 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_merger_dump(struct komeda_component *c, struct seq_file *sf) c 891 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 893 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, MG_INPUT_ID0, 1, &v); c 896 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, MG_INPUT_ID1, 1, &v); c 899 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, BLK_CONTROL, 1, &v); c 902 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, MG_SIZE, 1, &v); c 915 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 921 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*merger), c 929 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) { c 931 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c return PTR_ERR(c); c 934 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c merger = to_merger(c); c 942 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_improc_update(struct komeda_component *c, c 946 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 956 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf) c 960 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 962 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x80, 2, v); c 966 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xC0, 1, v); c 969 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD0, 3, v); c 974 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x130, 12, v); c 978 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x170, 12, v); c 992 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 998 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*improc), c 1004 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) { c 1006 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c return PTR_ERR(c); c 1009 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c improc = to_improc(c); c 1024 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_timing_ctrlr_disable(struct komeda_component *c) c 1026 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c malidp_write32_mask(c->reg, BLK_CONTROL, BS_CTRL_EN, 0); c 1029 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_timing_ctrlr_update(struct komeda_component *c, c 1034 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c u32 __iomem *reg = c->reg; c 1065 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (c->pipeline->dual_link) { c 1073 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c static void d71_timing_ctrlr_dump(struct komeda_component *c, c 1078 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c dump_block_header(sf, c->reg); c 1080 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xC0, 1, v); c 1083 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0xD0, 8, v); c 1093 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x100, 3, v); c 1098 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x110, 3, v); c 1102 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c get_values_from_reg(c->reg, 0x120, 5, v); c 1119 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_component *c; c 1125 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr), c 1131 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c if (IS_ERR(c)) { c 1133 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c return PTR_ERR(c); c 1136 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c ctrlr = to_ctrlr(c); c 47 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c; c 51 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c = komeda_pipeline_get_component(pipe, i); c 52 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c komeda_component_destroy(mdev, c); c 126 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c = NULL; c 130 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c = *pos; c 132 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c return c; c 139 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c = NULL; c 144 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c = komeda_pipeline_get_component(pipe, id); c 146 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c return c; c 150 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c komeda_component_pickup_input(struct komeda_component *c, u32 avail_comps) c 152 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c u32 avail_inputs = c->supported_inputs & (avail_comps); c 154 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c return komeda_pipeline_get_first_component(c->pipeline, avail_inputs); c 167 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c; c 196 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL); c 197 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c if (!c) c 200 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->id = id; c 201 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->hw_id = hw_id; c 202 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->reg = reg; c 203 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->pipeline = pipe; c 204 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->max_active_inputs = max_active_inputs; c 205 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->max_active_outputs = max_active_outputs; c 206 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->supported_inputs = supported_inputs; c 207 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->funcs = funcs; c 213 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c vsnprintf(c->name, sizeof(c->name), name_fmt, args); c 220 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c pipe->avail_comps |= BIT(c->id); c 221 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c *pos = c; c 223 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c return c; c 227 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c) c 229 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c devm_kfree(mdev->dev, c); c 232 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c static void komeda_component_dump(struct komeda_component *c) c 234 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c if (!c) c 238 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->name, c->id, BIT(c->id)); c 240 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->max_active_inputs, c->supported_inputs); c 242 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->max_active_outputs, c->supported_outputs); c 247 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c; c 261 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c = komeda_pipeline_get_component(pipe, id); c 263 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c komeda_component_dump(c); c 267 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c static void komeda_component_verify_inputs(struct komeda_component *c) c 269 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_pipeline *pipe = c->pipeline; c 273 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c dp_for_each_set_bit(id, c->supported_inputs) { c 276 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->supported_inputs &= ~(BIT(id)); c 278 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c id, c->name); c 282 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c input->supported_outputs |= BIT(c->id); c 301 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c; c 306 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c = komeda_pipeline_get_component(pipe, id); c 307 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c komeda_component_verify_inputs(c); c 355 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c struct komeda_component *c; c 364 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c = komeda_pipeline_get_component(pipe, id); c 366 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c seq_printf(sf, "\n------%s------\n", c->name); c 367 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c if (c->funcs->dump_register) c 368 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c c->funcs->dump_register(c, sf); c 60 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h int (*validate)(struct komeda_component *c, c 63 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h void (*update)(struct komeda_component *c, c 66 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h void (*disable)(struct komeda_component *c); c 68 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h void (*dump_register)(struct komeda_component *c, struct seq_file *seq); c 447 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_layer(c) container_of(c, struct komeda_layer, base) c 448 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_compiz(c) container_of(c, struct komeda_compiz, base) c 449 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_scaler(c) container_of(c, struct komeda_scaler, base) c 450 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_splitter(c) container_of(c, struct komeda_splitter, base) c 451 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_merger(c) container_of(c, struct komeda_merger, base) c 452 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_improc(c) container_of(c, struct komeda_improc, base) c 453 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_ctrlr(c) container_of(c, struct komeda_timing_ctrlr, base) c 455 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_layer_st(c) container_of(c, struct komeda_layer_state, base) c 456 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_compiz_st(c) container_of(c, struct komeda_compiz_state, base) c 457 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_scaler_st(c) container_of(c, struct komeda_scaler_state, base) c 458 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_splitter_st(c) container_of(c, struct komeda_splitter_state, base) c 459 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_merger_st(c) container_of(c, struct komeda_merger_state, base) c 460 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_improc_st(c) container_of(c, struct komeda_improc_state, base) c 461 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h #define to_ctrlr_st(c) container_of(c, struct komeda_timing_ctrlr_state, base) c 495 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h struct komeda_component *c); c 498 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h komeda_component_pickup_output(struct komeda_component *c, u32 avail_comps) c 500 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h u32 avail_inputs = c->supported_outputs & (avail_comps); c 502 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h return komeda_pipeline_get_first_component(c->pipeline, avail_inputs); c 99 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c komeda_component_get_state(struct komeda_component *c, c 104 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock)); c 106 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c priv_st = drm_atomic_get_private_obj_state(state, &c->obj); c 114 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c komeda_component_get_old_state(struct komeda_component *c, c 119 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj); c 151 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c komeda_component_get_state_and_set_user(struct komeda_component *c, c 160 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline, c 165 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c st = komeda_component_get_state(c, state); c 171 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name); c 178 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c pipe_st->active_comps |= BIT(c->id); c 188 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c struct komeda_component *c = state->component; c 190 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c WARN_ON((idx < 0 || idx >= c->max_active_inputs)); c 211 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c struct komeda_component *c = state->component; c 213 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c if ((idx < 0) || (idx >= c->max_active_inputs)) { c 215 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c input->component->name, c->name, idx); c 221 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c input->component->name, c->name, idx); c 238 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c komeda_component_validate_private(struct komeda_component *c, c 243 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c if (!c->funcs->validate) c 246 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c err = c->funcs->validate(c, st); c 248 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name); c 255 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c komeda_component_get_avail_scaler(struct komeda_component *c, c 261 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c pipe_st = komeda_pipeline_get_state(c->pipeline, state); c 268 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c = komeda_component_pickup_output(c, avail_scalers); c 270 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c return to_scaler(c); c 1181 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c struct komeda_component *c; c 1190 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c = komeda_pipeline_get_component(pipe, id); c 1191 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c_st = komeda_component_get_state_and_set_user(c, c 1225 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c struct komeda_component *c; c 1236 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c = komeda_pipeline_get_component(pipe, id); c 1237 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c_st = priv_to_comp_st(c->obj.state); c 1249 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c->funcs->disable(c); c 1258 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c struct komeda_component *c; c 1269 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c = komeda_pipeline_get_component(pipe, id); c 1271 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c if (new->active_comps & BIT(c->id)) c 1272 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c->funcs->update(c, priv_to_comp_st(c->obj.state)); c 1274 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c->funcs->disable(c); c 241 drivers/gpu/drm/arm/display/komeda/komeda_plane.c struct komeda_component *c) c 243 drivers/gpu/drm/arm/display/komeda/komeda_plane.c bool is_primary = (c->id == KOMEDA_COMPONENT_LAYER0); c 252 drivers/gpu/drm/arm/display/komeda/komeda_plane.c struct komeda_component *c = &layer->base; c 269 drivers/gpu/drm/arm/display/komeda/komeda_plane.c get_possible_crtcs(kms, c->pipeline), c 272 drivers/gpu/drm/arm/display/komeda/komeda_plane.c get_plane_type(kms, c), c 273 drivers/gpu/drm/arm/display/komeda/komeda_plane.c "%s", c->name); c 313 drivers/gpu/drm/arm/display/komeda/komeda_plane.c komeda_set_crtc_plane_mask(kms, c->pipeline, plane); c 71 drivers/gpu/drm/armada/armada_crtc.h #define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc) c 417 drivers/gpu/drm/armada/armada_overlay.c #define C2K(c,s) (((c) >> (s)) & 0xff) c 65 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) c 67 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 69 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct drm_display_mode *adj = &c->state->adjusted_mode; c 138 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state); c 159 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c, c 162 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 167 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, c 170 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct drm_device *dev = c->dev; c 171 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 175 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c drm_crtc_vblank_off(c); c 202 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, c 205 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct drm_device *dev = c->dev; c 206 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 235 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c drm_crtc_vblank_on(c); c 327 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, c 343 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, c 346 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 348 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c if (c->state->event) { c 349 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c c->state->event->pipe = drm_crtc_index(c); c 351 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c WARN_ON(drm_crtc_vblank_get(c) != 0); c 353 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c crtc->event = c->state->event; c 354 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c c->state->event = NULL; c 374 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c) c 376 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 378 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c drm_crtc_cleanup(c); c 396 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c void atmel_hlcdc_crtc_irq(struct drm_crtc *c) c 398 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c drm_crtc_handle_vblank(c); c 399 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); c 449 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static int atmel_hlcdc_crtc_enable_vblank(struct drm_crtc *c) c 451 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 460 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c static void atmel_hlcdc_crtc_disable_vblank(struct drm_crtc *c) c 462 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); c 391 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h unsigned int c, u32 val) c 394 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h layer->desc->clut_offset + c * sizeof(u32), c 416 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h void atmel_hlcdc_crtc_irq(struct drm_crtc *c); c 82 drivers/gpu/drm/bridge/analogix-anx78xx.c static inline struct anx78xx *connector_to_anx78xx(struct drm_connector *c) c 84 drivers/gpu/drm/bridge/analogix-anx78xx.c return container_of(c, struct anx78xx, connector); c 174 drivers/gpu/drm/bridge/cdns-dsi.c #define IF_VCHAN_ID(x, c) ((c) << ((x) * 2)) c 224 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c unsigned c = cs[i]; c 226 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c for (j = 0; j < 8; j++, c >>= 1) c 227 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c dw->cs[i * 8 + j][ch] = (c & 1) << 2; c 100 drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c #define VID_NUM_CHUNKS(c) ((c) & 0x1fff) c 278 drivers/gpu/drm/bridge/tc358767.c static inline struct tc_data *connector_to_tc(struct drm_connector *c) c 280 drivers/gpu/drm/bridge/tc358767.c return container_of(c, struct tc_data, connector); c 956 drivers/gpu/drm/drm_atomic.c struct __drm_connnectors_state *c; c 959 drivers/gpu/drm/drm_atomic.c c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); c 960 drivers/gpu/drm/drm_atomic.c if (!c) c 963 drivers/gpu/drm/drm_atomic.c state->connectors = c; c 1114 drivers/gpu/drm/drm_atomic_uapi.c int i, c = 0, ret; c 1176 drivers/gpu/drm/drm_atomic_uapi.c c++; c 1220 drivers/gpu/drm/drm_atomic_uapi.c if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) c 1862 drivers/gpu/drm/drm_edid.c #define MODE_REFRESH_DIFF(c,t) (abs((c) - (t))) c 2572 drivers/gpu/drm/drm_edid.c do_inferred_modes(struct detailed_timing *timing, void *c) c 2574 drivers/gpu/drm/drm_edid.c struct detailed_mode_closure *closure = c; c 2654 drivers/gpu/drm/drm_edid.c do_established_modes(struct detailed_timing *timing, void *c) c 2656 drivers/gpu/drm/drm_edid.c struct detailed_mode_closure *closure = c; c 2703 drivers/gpu/drm/drm_edid.c do_standard_modes(struct detailed_timing *timing, void *c) c 2705 drivers/gpu/drm/drm_edid.c struct detailed_mode_closure *closure = c; c 2813 drivers/gpu/drm/drm_edid.c do_cvt_mode(struct detailed_timing *timing, void *c) c 2815 drivers/gpu/drm/drm_edid.c struct detailed_mode_closure *closure = c; c 2841 drivers/gpu/drm/drm_edid.c do_detailed_mode(struct detailed_timing *timing, void *c) c 2843 drivers/gpu/drm/drm_edid.c struct detailed_mode_closure *closure = c; c 33 drivers/gpu/drm/drm_fourcc.c static char printable_char(int c) c 35 drivers/gpu/drm/drm_fourcc.c return isascii(c) && isprint(c) ? c : '?'; c 49 drivers/gpu/drm/drm_gem_vram_helper.c unsigned int c = 0; c 55 drivers/gpu/drm/drm_gem_vram_helper.c gbo->placements[c++].flags = TTM_PL_FLAG_WC | c 60 drivers/gpu/drm/drm_gem_vram_helper.c gbo->placements[c++].flags = TTM_PL_MASK_CACHING | c 63 drivers/gpu/drm/drm_gem_vram_helper.c if (!c) c 64 drivers/gpu/drm/drm_gem_vram_helper.c gbo->placements[c++].flags = TTM_PL_MASK_CACHING | c 67 drivers/gpu/drm/drm_gem_vram_helper.c gbo->placement.num_placement = c; c 68 drivers/gpu/drm/drm_gem_vram_helper.c gbo->placement.num_busy_placement = c; c 70 drivers/gpu/drm/drm_gem_vram_helper.c for (i = 0; i < c; ++i) { c 34 drivers/gpu/drm/exynos/exynos_drm_dpi.c #define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector) c 286 drivers/gpu/drm/exynos/exynos_drm_dsi.c #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) c 32 drivers/gpu/drm/exynos/exynos_drm_vidi.c #define ctx_from_connector(c) container_of(c, struct vidi_context, \ c 151 drivers/gpu/drm/exynos/exynos_hdmi.c static inline struct hdmi_context *connector_to_hdmi(struct drm_connector *c) c 153 drivers/gpu/drm/exynos/exynos_hdmi.c return container_of(c, struct hdmi_context, connector); c 770 drivers/gpu/drm/exynos/exynos_hdmi.c struct clk **c = &hdata->clk_muxes[i]; c 772 drivers/gpu/drm/exynos/exynos_hdmi.c ret = clk_set_parent(c[2], c[to_phy]); c 79 drivers/gpu/drm/exynos/regs-scaler.h #define _SCALER_HCOEF_DELTA(r, c) ((r) * 0x10 + (c) * 0x4) c 80 drivers/gpu/drm/exynos/regs-scaler.h #define _SCALER_VCOEF_DELTA(r, c) ((r) * 0x8 + (c) * 0x4) c 82 drivers/gpu/drm/exynos/regs-scaler.h #define SCALER_YHCOEF(r, c) (0x60 + _SCALER_HCOEF_DELTA((r), (c))) c 83 drivers/gpu/drm/exynos/regs-scaler.h #define SCALER_YVCOEF(r, c) (0xf0 + _SCALER_VCOEF_DELTA((r), (c))) c 84 drivers/gpu/drm/exynos/regs-scaler.h #define SCALER_CHCOEF(r, c) (0x140 + _SCALER_HCOEF_DELTA((r), (c))) c 85 drivers/gpu/drm/exynos/regs-scaler.h #define SCALER_CVCOEF(r, c) (0x1d0 + _SCALER_VCOEF_DELTA((r), (c))) c 51 drivers/gpu/drm/gma500/psb_intel_sdvo.c #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) c 52 drivers/gpu/drm/gma500/psb_intel_sdvo.c #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) c 53 drivers/gpu/drm/gma500/psb_intel_sdvo.c #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) c 54 drivers/gpu/drm/gma500/psb_intel_sdvo.c #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) c 29 drivers/gpu/drm/i915/display/intel_connector.h int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); c 3176 drivers/gpu/drm/i915/display/intel_display.c struct drm_crtc *c; c 3198 drivers/gpu/drm/i915/display/intel_display.c for_each_crtc(dev, c) { c 3201 drivers/gpu/drm/i915/display/intel_display.c if (c == &intel_crtc->base) c 3204 drivers/gpu/drm/i915/display/intel_display.c if (!to_intel_crtc(c)->active) c 3207 drivers/gpu/drm/i915/display/intel_display.c state = to_intel_plane_state(c->primary->state); c 692 drivers/gpu/drm/i915/display/intel_dpll_mgr.c u64 a, b, c, d, diff, diff_best; c 721 drivers/gpu/drm/i915/display/intel_dpll_mgr.c c = 1000000 * diff; c 724 drivers/gpu/drm/i915/display/intel_dpll_mgr.c if (a < c && b < d) { c 731 drivers/gpu/drm/i915/display/intel_dpll_mgr.c } else if (a >= c && b < d) { c 736 drivers/gpu/drm/i915/display/intel_dpll_mgr.c } else if (a >= c && b >= d) { c 105 drivers/gpu/drm/i915/display/intel_overlay.c #define RGB16_TO_COLORKEY(c) \ c 106 drivers/gpu/drm/i915/display/intel_overlay.c (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3)) c 107 drivers/gpu/drm/i915/display/intel_overlay.c #define RGB15_TO_COLORKEY(c) \ c 108 drivers/gpu/drm/i915/display/intel_overlay.c (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3)) c 59 drivers/gpu/drm/i915/display/intel_sdvo.c #define IS_TV(c) (c->output_flag & SDVO_TV_MASK) c 60 drivers/gpu/drm/i915/display/intel_sdvo.c #define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK) c 61 drivers/gpu/drm/i915/display/intel_sdvo.c #define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) c 62 drivers/gpu/drm/i915/display/intel_sdvo.c #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) c 63 drivers/gpu/drm/i915/display/intel_sdvo.c #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK)) c 1557 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c char __maybe_unused c; c 1573 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c int err = __get_user(c, addr); c 1577 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return __get_user(c, end - 1); c 372 drivers/gpu/drm/i915/gem/i915_gem_shmem.c char c; c 379 drivers/gpu/drm/i915/gem/i915_gem_shmem.c err = __get_user(c, user_data); c 383 drivers/gpu/drm/i915/gem/i915_gem_shmem.c err = __get_user(c, user_data + len - 1); c 727 drivers/gpu/drm/i915/gt/selftest_lrc.c struct preempt_client *c) c 729 drivers/gpu/drm/i915/gt/selftest_lrc.c c->ctx = kernel_context(i915); c 730 drivers/gpu/drm/i915/gt/selftest_lrc.c if (!c->ctx) c 733 drivers/gpu/drm/i915/gt/selftest_lrc.c if (igt_spinner_init(&c->spin, &i915->gt)) c 739 drivers/gpu/drm/i915/gt/selftest_lrc.c kernel_context_close(c->ctx); c 743 drivers/gpu/drm/i915/gt/selftest_lrc.c static void preempt_client_fini(struct preempt_client *c) c 745 drivers/gpu/drm/i915/gt/selftest_lrc.c igt_spinner_fini(&c->spin); c 746 drivers/gpu/drm/i915/gt/selftest_lrc.c kernel_context_close(c->ctx); c 997 drivers/gpu/drm/i915/gt/selftest_workarounds.c struct i915_gem_context *c; c 999 drivers/gpu/drm/i915/gt/selftest_workarounds.c c = kernel_context(i915); c 1000 drivers/gpu/drm/i915/gt/selftest_workarounds.c if (IS_ERR(c)) { c 1001 drivers/gpu/drm/i915/gt/selftest_workarounds.c err = PTR_ERR(c); c 1005 drivers/gpu/drm/i915/gt/selftest_workarounds.c client[i].scratch[0] = create_scratch(c->vm, 1024); c 1008 drivers/gpu/drm/i915/gt/selftest_workarounds.c kernel_context_close(c); c 1012 drivers/gpu/drm/i915/gt/selftest_workarounds.c client[i].scratch[1] = create_scratch(c->vm, 1024); c 1016 drivers/gpu/drm/i915/gt/selftest_workarounds.c kernel_context_close(c); c 1020 drivers/gpu/drm/i915/gt/selftest_workarounds.c client[i].ctx = c; c 198 drivers/gpu/drm/i915/gvt/dmabuf.c static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c) c 200 drivers/gpu/drm/i915/gvt/dmabuf.c if (c && c->x_hot <= c->width && c->y_hot <= c->height) c 212 drivers/gpu/drm/i915/gvt/dmabuf.c struct intel_vgpu_cursor_plane_format c; c 248 drivers/gpu/drm/i915/gvt/dmabuf.c ret = intel_vgpu_decode_cursor_plane(vgpu, &c); c 251 drivers/gpu/drm/i915/gvt/dmabuf.c info->start = c.base; c 252 drivers/gpu/drm/i915/gvt/dmabuf.c info->start_gpa = c.base_gpa; c 253 drivers/gpu/drm/i915/gvt/dmabuf.c info->width = c.width; c 254 drivers/gpu/drm/i915/gvt/dmabuf.c info->height = c.height; c 255 drivers/gpu/drm/i915/gvt/dmabuf.c info->stride = c.width * (c.bpp / 8); c 256 drivers/gpu/drm/i915/gvt/dmabuf.c info->drm_format = c.drm_format; c 258 drivers/gpu/drm/i915/gvt/dmabuf.c info->x_pos = c.x_pos; c 259 drivers/gpu/drm/i915/gvt/dmabuf.c info->y_pos = c.y_pos; c 261 drivers/gpu/drm/i915/gvt/dmabuf.c if (validate_hotspot(&c)) { c 262 drivers/gpu/drm/i915/gvt/dmabuf.c info->x_hot = c.x_hot; c 263 drivers/gpu/drm/i915/gvt/dmabuf.c info->y_hot = c.y_hot; c 240 drivers/gpu/drm/i915/i915_gpu_error.c static bool compress_init(struct compress *c) c 242 drivers/gpu/drm/i915/i915_gpu_error.c struct z_stream_s *zstream = &c->zstream; c 244 drivers/gpu/drm/i915/i915_gpu_error.c if (pool_init(&c->pool, ALLOW_FAIL)) c 251 drivers/gpu/drm/i915/i915_gpu_error.c pool_fini(&c->pool); c 255 drivers/gpu/drm/i915/i915_gpu_error.c c->tmp = NULL; c 257 drivers/gpu/drm/i915/i915_gpu_error.c c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); c 262 drivers/gpu/drm/i915/i915_gpu_error.c static bool compress_start(struct compress *c) c 264 drivers/gpu/drm/i915/i915_gpu_error.c struct z_stream_s *zstream = &c->zstream; c 273 drivers/gpu/drm/i915/i915_gpu_error.c static void *compress_next_page(struct compress *c, c 281 drivers/gpu/drm/i915/i915_gpu_error.c page = pool_alloc(&c->pool, ALLOW_FAIL); c 288 drivers/gpu/drm/i915/i915_gpu_error.c static int compress_page(struct compress *c, c 292 drivers/gpu/drm/i915/i915_gpu_error.c struct z_stream_s *zstream = &c->zstream; c 295 drivers/gpu/drm/i915/i915_gpu_error.c if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) c 296 drivers/gpu/drm/i915/i915_gpu_error.c zstream->next_in = c->tmp; c 301 drivers/gpu/drm/i915/i915_gpu_error.c zstream->next_out = compress_next_page(c, dst); c 319 drivers/gpu/drm/i915/i915_gpu_error.c static int compress_flush(struct compress *c, c 322 drivers/gpu/drm/i915/i915_gpu_error.c struct z_stream_s *zstream = &c->zstream; c 327 drivers/gpu/drm/i915/i915_gpu_error.c zstream->next_out = compress_next_page(c, dst); c 348 drivers/gpu/drm/i915/i915_gpu_error.c static void compress_finish(struct compress *c) c 350 drivers/gpu/drm/i915/i915_gpu_error.c zlib_deflateEnd(&c->zstream); c 353 drivers/gpu/drm/i915/i915_gpu_error.c static void compress_fini(struct compress *c) c 355 drivers/gpu/drm/i915/i915_gpu_error.c kfree(c->zstream.workspace); c 356 drivers/gpu/drm/i915/i915_gpu_error.c if (c->tmp) c 357 drivers/gpu/drm/i915/i915_gpu_error.c pool_free(&c->pool, c->tmp); c 358 drivers/gpu/drm/i915/i915_gpu_error.c pool_fini(&c->pool); c 372 drivers/gpu/drm/i915/i915_gpu_error.c static bool compress_init(struct compress *c) c 374 drivers/gpu/drm/i915/i915_gpu_error.c return pool_init(&c->pool, ALLOW_FAIL) == 0; c 377 drivers/gpu/drm/i915/i915_gpu_error.c static bool compress_start(struct compress *c) c 382 drivers/gpu/drm/i915/i915_gpu_error.c static int compress_page(struct compress *c, c 388 drivers/gpu/drm/i915/i915_gpu_error.c ptr = pool_alloc(&c->pool, ALLOW_FAIL); c 399 drivers/gpu/drm/i915/i915_gpu_error.c static int compress_flush(struct compress *c, c 405 drivers/gpu/drm/i915/i915_gpu_error.c static void compress_finish(struct compress *c) c 409 drivers/gpu/drm/i915/i915_gpu_error.c static void compress_fini(struct compress *c) c 411 drivers/gpu/drm/i915/i915_gpu_error.c pool_fini(&c->pool); c 1284 drivers/gpu/drm/i915/i915_gpu_error.c struct capture_vma *c; c 1290 drivers/gpu/drm/i915/i915_gpu_error.c c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL); c 1291 drivers/gpu/drm/i915/i915_gpu_error.c if (!c) c 1295 drivers/gpu/drm/i915/i915_gpu_error.c kfree(c); c 1299 drivers/gpu/drm/i915/i915_gpu_error.c c->slot = (void **)out; c 1300 drivers/gpu/drm/i915/i915_gpu_error.c *c->slot = i915_vma_get(vma); c 1302 drivers/gpu/drm/i915/i915_gpu_error.c c->next = next; c 1303 drivers/gpu/drm/i915/i915_gpu_error.c return c; c 1311 drivers/gpu/drm/i915/i915_gpu_error.c struct i915_capture_list *c; c 1316 drivers/gpu/drm/i915/i915_gpu_error.c for (c = request->capture_list; c; c = c->next) c 1331 drivers/gpu/drm/i915/i915_gpu_error.c for (c = request->capture_list; c; c = c->next) { c 1332 drivers/gpu/drm/i915/i915_gpu_error.c capture = capture_vma(capture, c->vma, &bo[count]); c 242 drivers/gpu/drm/i915/i915_reg.h #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) c 243 drivers/gpu/drm/i915/i915_reg.h #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) c 244 drivers/gpu/drm/i915/i915_reg.h #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) c 245 drivers/gpu/drm/i915/i915_reg.h #define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c)) c 5566 drivers/gpu/drm/i915/i915_reg.h #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) c 5567 drivers/gpu/drm/i915/i915_reg.h #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) c 10320 drivers/gpu/drm/i915/i915_reg.h #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ c 10321 drivers/gpu/drm/i915/i915_reg.h #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) c 8095 drivers/gpu/drm/i915/intel_pm.c u16 c; c 8108 drivers/gpu/drm/i915/intel_pm.c u32 count1, count2, count3, m = 0, c = 0; c 8142 drivers/gpu/drm/i915/intel_pm.c c = cparams[i].c; c 8148 drivers/gpu/drm/i915/intel_pm.c ret = ((m * diff) + c); c 70 drivers/gpu/drm/imx/imx-ldb.c static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) c 72 drivers/gpu/drm/imx/imx-ldb.c return container_of(c, struct imx_ldb_channel, connector); c 121 drivers/gpu/drm/imx/imx-tve.c static inline struct imx_tve *con_to_tve(struct drm_connector *c) c 123 drivers/gpu/drm/imx/imx-tve.c return container_of(c, struct imx_tve, connector); c 36 drivers/gpu/drm/imx/parallel-display.c static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) c 38 drivers/gpu/drm/imx/parallel-display.c return container_of(c, struct imx_parallel_display, connector); c 67 drivers/gpu/drm/mcde/mcde_dsi.c static inline struct mcde_dsi *connector_to_mcde_dsi(struct drm_connector *c) c 69 drivers/gpu/drm/mcde/mcde_dsi.c return container_of(c, struct mcde_dsi, connector); c 61 drivers/gpu/drm/mediatek/mtk_drm_crtc.c static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c) c 63 drivers/gpu/drm/mediatek/mtk_drm_crtc.c return container_of(c, struct mtk_drm_crtc, base); c 143 drivers/gpu/drm/mediatek/mtk_dsi.c #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) c 186 drivers/gpu/drm/mediatek/mtk_dsi.c static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c) c 188 drivers/gpu/drm/mediatek/mtk_dsi.c return container_of(c, struct mtk_dsi, conn); c 178 drivers/gpu/drm/mediatek/mtk_hdmi.c static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c) c 180 drivers/gpu/drm/mediatek/mtk_hdmi.c return container_of(c, struct mtk_hdmi, conn); c 1231 drivers/gpu/drm/msm/adreno/a5xx_gpu.c u32 c = a5xx_hlsq_aperture_regs[i].count; c 1240 drivers/gpu/drm/msm/adreno/a5xx_gpu.c | c; c 1242 drivers/gpu/drm/msm/adreno/a5xx_gpu.c offset += c * sizeof(u32); c 1330 drivers/gpu/drm/msm/adreno/a5xx_gpu.c u32 c = a5xx_hlsq_aperture_regs[i].count; c 1332 drivers/gpu/drm/msm/adreno/a5xx_gpu.c for (j = 0; j < c; j++, pos++, o++) { c 69 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 71 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c return DPU_REG_READ(c, CTL_FLUSH); c 227 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 238 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c status = DPU_REG_READ(c, CTL_SW_RESET); c 249 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 252 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_SW_RESET, 0x1); c 261 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 264 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c status = DPU_REG_READ(c, CTL_SW_RESET); c 280 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 284 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); c 285 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0); c 286 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0); c 287 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0); c 294 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 419 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg); c 420 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext); c 421 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2); c 422 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); c 428 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 452 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c DPU_REG_WRITE(c, CTL_TOP, intf_cfg); c 481 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c struct dpu_hw_ctl *c; c 484 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 485 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c if (!c) c 488 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c cfg = _ctl_offset(idx, m, addr, &c->hw); c 490 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c kfree(c); c 495 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c c->caps = cfg; c 496 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c _setup_ctl_ops(&c->ops, c->caps->features); c 497 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c c->idx = idx; c 498 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c c->mixer_count = m->mixer_count; c 499 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c c->mixer_hw_caps = m->mixer; c 501 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops); c 503 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c return c; c 114 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h int (*reset)(struct dpu_hw_ctl *c); c 85 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 97 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c intf_cfg = DPU_REG_READ(c, INTF_CONFIG); c 169 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl); c 170 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period); c 171 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0, c 173 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl); c 174 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start); c 175 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end); c 176 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl); c 177 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start); c 178 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end); c 179 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr); c 180 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr); c 181 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew); c 182 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl); c 183 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3); c 184 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg); c 185 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format); c 192 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c struct dpu_hw_blk_reg_map *c = &intf->hw; c 194 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0); c 201 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c struct dpu_hw_blk_reg_map *c = &intf->hw; c 209 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c fetch_enable = DPU_REG_READ(c, INTF_CONFIG); c 212 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_PROG_FETCH_START, c 218 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable); c 225 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c struct dpu_hw_blk_reg_map *c = &intf->hw; c 227 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN); c 229 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT); c 230 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT); c 239 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c struct dpu_hw_blk_reg_map *c; c 244 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c c = &intf->hw; c 246 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c return DPU_REG_READ(c, INTF_LINE_COUNT); c 265 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c struct dpu_hw_intf *c; c 268 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 269 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c if (!c) c 272 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c cfg = _intf_offset(idx, m, addr, &c->hw); c 274 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c kfree(c); c 282 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c c->idx = idx; c 283 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c c->cap = cfg; c 284 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c c->mdss = m; c 285 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c _setup_intf_ops(&c->ops, c->cap->features); c 287 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops); c 289 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c return c; c 66 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 70 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c op_mode = DPU_REG_READ(c, LM_OP_MODE); c 73 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_OUT_SIZE, outsize); c 80 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_OP_MODE, op_mode); c 87 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 90 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_BORDER_COLOR_0, c 93 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_BORDER_COLOR_1, c 102 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 114 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha); c 115 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); c 121 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 131 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha); c 132 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha); c 133 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); c 139 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c struct dpu_hw_blk_reg_map *c = &ctx->hw; c 143 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c op_mode = DPU_REG_READ(c, LM_OP_MODE); c 147 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c DPU_REG_WRITE(c, LM_OP_MODE, op_mode); c 169 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c struct dpu_hw_mixer *c; c 172 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 173 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c if (!c) c 176 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c cfg = _lm_offset(idx, m, addr, &c->hw); c 178 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c kfree(c); c 183 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c c->idx = idx; c 184 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c c->cap = cfg; c 185 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c _setup_mixer_ops(m, &c->ops, c->cap->features); c 187 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops); c 189 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c return c; c 55 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c; c 60 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; c 68 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg); c 69 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height); c 70 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val); c 71 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq); c 72 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_START_POS, te->start_pos); c 73 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_SYNC_THRESH, c 76 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_SYNC_WRCOUNT, c 85 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c; c 92 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; c 93 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT, c 101 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c; c 105 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; c 107 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable); c 114 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c = &pp->hw; c 121 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; c 122 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC); c 128 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg); c 137 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c; c 142 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; c 144 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL); c 147 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c val = DPU_REG_READ(c, PP_INT_COUNT_VAL); c 151 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c val = DPU_REG_READ(c, PP_LINE_COUNT); c 159 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c = &pp->hw; c 165 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; c 167 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF; c 168 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF; c 173 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF; c 200 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_pingpong *c; c 203 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 204 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (!c) c 207 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c cfg = _pingpong_offset(idx, m, addr, &c->hw); c 209 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c kfree(c); c 213 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c->idx = idx; c 214 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c->caps = cfg; c 215 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c _setup_pingpong_ops(&c->ops, c->caps); c 217 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops); c 219 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c return c; c 240 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c struct dpu_hw_blk_reg_map *c; c 260 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c = &ctx->hw; c 261 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c opmode = DPU_REG_READ(c, op_mode_off + idx); c 302 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_FETCH_CONFIG, c 307 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL, c 331 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, format_off + idx, src_format); c 332 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, unpack_pat_off + idx, unpack); c 333 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, op_mode_off + idx, opmode); c 336 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31)); c 342 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c struct dpu_hw_blk_reg_map *c; c 352 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c = &ctx->hw; c 379 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]); c 380 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]); c 381 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx, c 385 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]); c 386 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]); c 387 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx, c 391 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]); c 392 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]); c 393 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx, c 432 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c struct dpu_hw_blk_reg_map *c; c 440 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c = &ctx->hw; c 469 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx); c 470 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx); c 488 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, src_size_off + idx, src_size); c 489 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, src_xy_off + idx, src_xy); c 490 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, out_size_off + idx, dst_size); c 491 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, out_xy_off + idx, dst_xy); c 493 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0); c 494 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1); c 634 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c static void _setup_layer_ops(struct dpu_hw_pipe *c, c 638 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_format = dpu_hw_sspp_setup_format; c 639 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_rects = dpu_hw_sspp_setup_rects; c 640 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress; c 641 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill; c 642 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_pe = dpu_hw_sspp_setup_pe_config; c 646 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_danger_safe_lut = c 648 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut; c 649 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl; c 654 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_csc = dpu_hw_sspp_setup_csc; c 656 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) || c 657 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features)) c 658 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; c 661 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; c 662 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; c 666 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c c->ops.setup_cdp = dpu_hw_sspp_setup_cdp; c 57 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c struct dpu_hw_blk_reg_map *c; c 64 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c = &mdp->hw; c 86 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0); c 87 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe); c 88 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe); c 89 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1); c 95 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c struct dpu_hw_blk_reg_map *c; c 103 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c = &mdp->hw; c 111 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c reg_val = DPU_REG_READ(c, reg_off); c 118 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, reg_off, new_val); c 129 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c struct dpu_hw_blk_reg_map *c; c 135 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c = &mdp->hw; c 137 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c value = DPU_REG_READ(c, DANGER_STATUS); c 158 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c struct dpu_hw_blk_reg_map *c; c 165 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c = &mdp->hw; c 166 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c reg = DPU_REG_READ(c, MDP_VSYNC_SEL); c 176 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg); c 209 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, wd_load_value, c 212 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */ c 213 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c reg = DPU_REG_READ(c, wd_ctl2); c 216 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, wd_ctl2, reg); c 226 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c struct dpu_hw_blk_reg_map *c; c 232 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c = &mdp->hw; c 234 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c value = DPU_REG_READ(c, SAFE_STATUS); c 254 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c struct dpu_hw_blk_reg_map c; c 263 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c = mdp->hw; c 264 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c.blk_off = 0x0; c 265 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static); c 270 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c struct dpu_hw_blk_reg_map *c; c 275 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c c = &mdp->hw; c 277 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1); c 62 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c void dpu_reg_write(struct dpu_hw_blk_reg_map *c, c 68 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c if (c->log_mask & dpu_hw_util_log_mask) c 70 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c name, c->blk_off + reg_off, val); c 71 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c writel_relaxed(val, c->base_off + c->blk_off + reg_off); c 74 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off) c 76 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c return readl_relaxed(c->base_off + c->blk_off + reg_off); c 84 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c, c 145 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, c 155 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0)); c 159 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c, c 193 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl); c 194 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl); c 195 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl); c 196 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr); c 197 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a); c 198 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b); c 199 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c); c 203 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, c 241 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset); c 246 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, c 255 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init); c 257 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset, c 259 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset, c 261 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset, c 263 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset, c 267 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset, c 270 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset, c 273 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset, c 276 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset, c 279 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload); c 281 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb); c 283 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv); c 285 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst); c 299 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode); c 302 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, c 305 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset); c 308 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, c 319 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off, val); c 322 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x4, val); c 325 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x8, val); c 328 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0xc, val); c 330 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x10, val); c 334 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x14, val); c 336 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x18, val); c 338 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x1c, val); c 342 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x20, val); c 344 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x24, val); c 346 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x28, val); c 349 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]); c 350 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]); c 351 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]); c 354 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]); c 355 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]); c 356 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]); c 300 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h void dpu_reg_write(struct dpu_hw_blk_reg_map *c, c 304 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off); c 306 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h #define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off) c 307 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h #define DPU_REG_READ(c, off) dpu_reg_read(c, off) c 311 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, c 316 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, c 319 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, c 38 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c; c 43 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c c = &vbif->hw; c 44 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR); c 45 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR); c 52 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src); c 58 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c; c 70 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c c = &vbif->hw; c 79 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val = DPU_REG_READ(c, reg_off); c 82 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c DPU_REG_WRITE(c, reg_off, reg_val); c 88 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c = &vbif->hw; c 100 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val = DPU_REG_READ(c, reg_off); c 103 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c DPU_REG_WRITE(c, reg_off, reg_val); c 109 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c = &vbif->hw; c 122 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val = DPU_REG_READ(c, reg_off); c 131 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c = &vbif->hw; c 134 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0); c 141 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val); c 147 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c = &vbif->hw; c 150 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1); c 158 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c; c 164 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c c = &vbif->hw; c 169 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high); c 170 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high); c 180 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val); c 181 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl); c 186 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_blk_reg_map *c; c 192 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c c = &vbif->hw; c 194 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN); c 196 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val); c 238 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c struct dpu_hw_vbif *c; c 241 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 242 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c if (!c) c 245 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c cfg = _top_offset(idx, m, addr, &c->hw); c 247 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c kfree(c); c 254 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c c->idx = idx; c 255 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c c->cap = cfg; c 256 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c _setup_vbif_ops(&c->ops, c->cap->features); c 260 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c return c; c 630 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c int c; c 635 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) c 636 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if ((ctl_mgr->ctls[c].status & checkm) == match) c 643 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) c 644 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if ((ctl_mgr->ctls[c].status & checkm) == match) c 651 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl = &ctl_mgr->ctls[c]; c 664 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c int c; c 666 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) { c 667 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; c 689 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c int c, ret; c 714 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ctl_mgr->nctl; c++) { c 715 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; c 717 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if (WARN_ON(!ctl_cfg->base[c])) { c 718 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c); c 724 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl->id = c; c 725 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c ctl->reg_offset = ctl_cfg->base[c]; c 736 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) c 737 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c if (hw_cfg->intf.connect[c] == INTF_DSI) c 63 drivers/gpu/drm/msm/disp/mdp_format.c #define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ c 72 drivers/gpu/drm/msm/disp/mdp_format.c .cpp = c, \ c 338 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + c 347 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c (*filters[k])[j][i] = (c + id5/2) >> 39 c 11 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o)) c 13 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_WNDW_SEM0(c) NV50_DISP_SYNC(1 + (c), 0x00) c 14 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_WNDW_SEM1(c) NV50_DISP_SYNC(1 + (c), 0x10) c 15 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_WNDW_NTFY(c) NV50_DISP_SYNC(1 + (c), 0x20) c 16 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_BASE_SEM0(c) NV50_DISP_WNDW_SEM0(0 + (c)) c 17 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_BASE_SEM1(c) NV50_DISP_WNDW_SEM1(0 + (c)) c 18 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_BASE_NTFY(c) NV50_DISP_WNDW_NTFY(0 + (c)) c 19 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_OVLY_SEM0(c) NV50_DISP_WNDW_SEM0(4 + (c)) c 20 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_OVLY_SEM1(c) NV50_DISP_WNDW_SEM1(4 + (c)) c 21 drivers/gpu/drm/nouveau/dispnv50/disp.h #define NV50_DISP_OVLY_NTFY(c) NV50_DISP_WNDW_NTFY(4 + (c)) c 3 drivers/gpu/drm/nouveau/dispnv50/head.h #define nv50_head(c) container_of((c), struct nv50_head, base.base) c 42 drivers/gpu/drm/nouveau/include/nvif/object.h #define nvif_rd(a,f,b,c) ({ \ c 46 drivers/gpu/drm/nouveau/include/nvif/object.h _data = f((u8 __iomem *)_object->map.ptr + (c)); \ c 48 drivers/gpu/drm/nouveau/include/nvif/object.h _data = nvif_object_rd(_object, (b), (c)); \ c 51 drivers/gpu/drm/nouveau/include/nvif/object.h #define nvif_wr(a,f,b,c,d) ({ \ c 54 drivers/gpu/drm/nouveau/include/nvif/object.h f((d), (u8 __iomem *)_object->map.ptr + (c)); \ c 56 drivers/gpu/drm/nouveau/include/nvif/object.h nvif_object_wr(_object, (b), (c), (d)); \ c 61 drivers/gpu/drm/nouveau/include/nvif/object.h #define nvif_wr08(a,b,c) nvif_wr((a), iowrite8, 1, (b), (u8)(c)) c 62 drivers/gpu/drm/nouveau/include/nvif/object.h #define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c)) c 63 drivers/gpu/drm/nouveau/include/nvif/object.h #define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c)) c 64 drivers/gpu/drm/nouveau/include/nvif/object.h #define nvif_mask(a,b,c,d) ({ \ c 67 drivers/gpu/drm/nouveau/include/nvif/object.h nvif_wr32(__object, _addr, (_data & ~(c)) | (d)); \ c 71 drivers/gpu/drm/nouveau/include/nvif/object.h #define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d)) c 87 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_fill(t,s,o,a,d,c) do { \ c 88 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \ c 103 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_fo32(o,a,d,c) nvkm_fill(32, 2, (o), (a), (d), (c)) c 104 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h #define nvkm_fo64(o,a,d,c) nvkm_fill(64, 3, (o), (a), (d), (c)) c 330 drivers/gpu/drm/nouveau/nouveau_display.c int c = 0; \ c 333 drivers/gpu/drm/nouveau/nouveau_display.c c++; \ c 336 drivers/gpu/drm/nouveau/nouveau_display.c if (c) { \ c 337 drivers/gpu/drm/nouveau/nouveau_display.c p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \ c 282 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long c; c 288 drivers/gpu/drm/nouveau/nouveau_dmem.c for (c = 0; c < npages;) { c 296 drivers/gpu/drm/nouveau/nouveau_dmem.c if (c) c 306 drivers/gpu/drm/nouveau/nouveau_dmem.c while (i < DMEM_CHUNK_NPAGES && c < npages) { c 307 drivers/gpu/drm/nouveau/nouveau_dmem.c pages[c] = chunk->pfn_first + i; c 310 drivers/gpu/drm/nouveau/nouveau_dmem.c c++; c 635 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long c, i; c 649 drivers/gpu/drm/nouveau/nouveau_dmem.c for (i = 0; i < npages; i += c) { c 650 drivers/gpu/drm/nouveau/nouveau_dmem.c c = min(SG_MAX_SINGLE_ALLOC, npages); c 651 drivers/gpu/drm/nouveau/nouveau_dmem.c args.end = start + (c << PAGE_SHIFT); c 242 drivers/gpu/drm/nouveau/nouveau_drv.h #define NV_PRINTK(l,c,f,a...) do { \ c 243 drivers/gpu/drm/nouveau/nouveau_drv.h struct nouveau_cli *_cli = (c); \ c 261 drivers/gpu/drm/nouveau/nouveau_drv.h #define NV_PRINTK_ONCE(l,c,f,a...) NV_PRINTK(l##_once,c,f, ##a) c 461 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) c 197 drivers/gpu/drm/nouveau/nvkm/core/mm.c u32 c = 0, a; c 210 drivers/gpu/drm/nouveau/nvkm/core/mm.c c = next->offset - e; c 220 drivers/gpu/drm/nouveau/nvkm/core/mm.c c += (e - s) - a; c 222 drivers/gpu/drm/nouveau/nvkm/core/mm.c if (c && !region_tail(mm, this, c)) c 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c, c 48 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c u32 prev = nvkm_rd32(device, list->data[i].addr + base + c); c 26 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h #define CONN_MSG(c,l,f,a...) do { \ c 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h struct nvkm_conn *_conn = (c); \ c 31 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h #define CONN_ERR(c,f,a...) CONN_MSG((c), error, f, ##a) c 32 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h #define CONN_DBG(c,f,a...) CONN_MSG((c), debug, f, ##a) c 33 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h #define CONN_TRACE(c,f,a...) CONN_MSG((c), trace, f, ##a) c 33 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c int c = 0; c 35 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c while (falcon->func->sclass[c].oclass) { c 36 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c if (c++ == index) { c 42 drivers/gpu/drm/nouveau/nvkm/engine/falcon.c return c; c 253 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c int c = 0; c 262 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c while ((sclass = fifo->func->chan[c])) { c 263 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c if (c++ == index) { c 271 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c return c; c 209 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c int ret, i, c; c 211 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) { c 230 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c while (engine->func->sclass[c].oclass) { c 231 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c if (c++ == index) { c 239 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index -= c; c 80 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c const struct nv04_fifo_ramfc *c; c 97 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c c = fifo->ramfc; c 100 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c u32 rm = ((1ULL << c->bits) - 1) << c->regs; c 101 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; c 102 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; c 103 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm); c 104 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); c 105 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c } while ((++c)->bits); c 108 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c c = fifo->ramfc; c 110 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c nvkm_wr32(device, c->regp, 0x00000000); c 111 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c } while ((++c)->bits); c 145 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c const struct nv04_fifo_ramfc *c = fifo->ramfc; c 149 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000); c 150 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c } while ((++c)->bits); c 120 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c int c = 0; c 122 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c if (fifo->func->user.ctor && c++ == index) { c 128 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c if (fifo->func->chan.ctor && c++ == index) { c 134 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c return c; c 25 drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h #define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4) c 92 drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c int c = 0; c 101 drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c while (gr->func->sclass[c].oclass) { c 102 drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c if (c++ == index) { c 108 drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c return c; c 18 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h #define mmio_vram(a,b,c,d) gf100_grctx_mmio_data((a), (b), (c), (d)) c 19 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h #define mmio_refn(a,b,c,d,e) gf100_grctx_mmio_item((a), (b), (c), (d), (e)) c 20 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h #define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1) c 21 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h #define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1) c 92 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h #define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) c 93 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h #define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) c 94 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h #define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) c 101 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h #define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) c 108 drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h #define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) c 302 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c int c = 0; c 304 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c while (gr->func->sclass[c].oclass) { c 305 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c if (c++ == index) { c 312 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c return c; c 958 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c int index, c = ltc->zbc_min, d = ltc->zbc_min, s = ltc->zbc_min; c 961 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c gf100_gr_zbc_color_get(gr, 1, & zero[0], &zero[4]); c++; c 962 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c gf100_gr_zbc_color_get(gr, 2, & one[0], &one[4]); c++; c 963 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c gf100_gr_zbc_color_get(gr, 4, &f32_0[0], &f32_0[4]); c++; c 964 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c gf100_gr_zbc_color_get(gr, 4, &f32_1[0], &f32_1[4]); c++; c 974 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c for (index = c; index <= ltc->zbc_max; index++) c 378 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c int c, s, m; c 388 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) { c 392 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) { c 394 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c args->v0.ctr[c].signal[s], c 396 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c if (args->v0.ctr[c].signal[s] && !sig[s]) c 400 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c src[s][m] = args->v0.ctr[c].source[s][m]; c 407 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src, c 408 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c args->v0.ctr[c].logic_op, &ctr[c]); c 425 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c for (c = 0; c < ARRAY_SIZE(ctr); c++) c 426 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c dom->ctr[c] = ctr[c]; c 62 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c int c = 0; c 64 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c while (sw->func->sclass[c].ctor) { c 65 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c if (c++ == index) { c 73 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c return c; c 31 drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c int c = 0; c 33 drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c while (xtensa->func->sclass[c].oclass) { c 34 drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c if (c++ == index) { c 40 drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c return c; c 33 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c } c; c 39 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0])); c 40 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c hwsq->c.size += size; c 53 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data)); c 54 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c hwsq->c.size = 0; c 68 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c hwsq->c.size = (hwsq->c.size + 4) / 4; c 69 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c if (hwsq->c.size <= bus->func->hwsq_size) { c 72 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c (u32 *)hwsq->c.data, c 73 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c hwsq->c.size); c 81 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c for (i = 0; ret && i < hwsq->c.size; i++) c 82 drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]); c 248 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c #define ram_nuts(s,r,m,d,c) \ c 249 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c)) c 43 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c int c, s; c 44 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c for (c = 0; c < ltc->ltc_nr; c++) { c 46 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400); c 93 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s) c 97 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c u32 base = 0x141000 + (c * 0x2000) + (s * 0x400); c 104 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg); c 118 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c u32 s, c = __ffs(mask); c 120 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c gf100_ltc_lts_intr(ltc, c, s); c 121 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c mask &= ~(1 << c); c 42 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c int c, s; c 43 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c for (c = 0; c < ltc->ltc_nr; c++) { c 45 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200); c 72 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) c 76 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); c 83 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg); c 97 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c u32 s, c = __ffs(mask); c 99 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c gm107_ltc_intr_lts(ltc, c, s); c 100 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c mask &= ~(1 << c); c 34 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c u32 s, c = __ffs(mask); c 36 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c gm107_ltc_intr_lts(ltc, c, s); c 37 drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c mask &= ~(1 << c); c 324 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c)) c 325 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d)) c 326 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \ c 329 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \ c 333 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_FO032(m,v,o,d,c) \ c 334 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c)) c 337 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_FO064(m,v,o,d,c) \ c 338 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c)) c 340 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \ c 341 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h u32 _pteo = (o), _ptes = (c); \ c 352 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_FO128(m,v,o,lo,hi,c) do { \ c 354 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \ c 14 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c } c; c 23 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c if (memx->c.mthd) { c 24 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd); c 25 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c for (i = 0; i < memx->c.size; i++) c 26 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c nvkm_wr32(device, 0x10a1c4, memx->c.data[i]); c 27 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c memx->c.mthd = 0; c 28 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c memx->c.size = 0; c 35 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) || c 36 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c (memx->c.mthd && memx->c.mthd != mthd)) c 38 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0])); c 39 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c memx->c.size += size; c 40 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c memx->c.mthd = mthd; c 423 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c unsigned int c, m, r; c 440 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c c = (ptr[1] >> 6) & 0x3; c 459 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c (c << 6) | (m << 4) | (r << 0)); c 1011 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c char c, bool ovw) c 1017 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map[y][x] = c; c 1021 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c char c) c 1023 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map[p->y / ydiv][p->x / xdiv] = c; c 56 drivers/gpu/drm/qxl/qxl_object.c u32 c = 0; c 63 drivers/gpu/drm/qxl/qxl_object.c qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; c 65 drivers/gpu/drm/qxl/qxl_object.c qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag; c 66 drivers/gpu/drm/qxl/qxl_object.c qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; c 69 drivers/gpu/drm/qxl/qxl_object.c qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; c 70 drivers/gpu/drm/qxl/qxl_object.c if (!c) c 71 drivers/gpu/drm/qxl/qxl_object.c qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; c 72 drivers/gpu/drm/qxl/qxl_object.c qbo->placement.num_placement = c; c 73 drivers/gpu/drm/qxl/qxl_object.c qbo->placement.num_busy_placement = c; c 74 drivers/gpu/drm/qxl/qxl_object.c for (i = 0; i < c; ++i) { c 9127 drivers/gpu/drm/radeon/cik.c fixed20_12 a, b, c; c 9152 drivers/gpu/drm/radeon/cik.c c.full = dfixed_const(lb_fill_bw); c 9153 drivers/gpu/drm/radeon/cik.c b.full = dfixed_div(c, b); c 2073 drivers/gpu/drm/radeon/evergreen.c fixed20_12 a, b, c; c 2096 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_const(lb_fill_bw); c 2097 drivers/gpu/drm/radeon/evergreen.c b.full = dfixed_div(c, b); c 2167 drivers/gpu/drm/radeon/evergreen.c fixed20_12 a, b, c; c 2258 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_const(latency_watermark_a); c 2259 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_mul(c, b); c 2260 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_mul(c, radeon_crtc->hsc); c 2261 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_div(c, a); c 2263 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_div(c, a); c 2264 drivers/gpu/drm/radeon/evergreen.c priority_a_mark = dfixed_trunc(c); c 2270 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_const(latency_watermark_b); c 2271 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_mul(c, b); c 2272 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_mul(c, radeon_crtc->hsc); c 2273 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_div(c, a); c 2275 drivers/gpu/drm/radeon/evergreen.c c.full = dfixed_div(c, a); c 2276 drivers/gpu/drm/radeon/evergreen.c priority_b_mark = dfixed_trunc(c); c 146 drivers/gpu/drm/radeon/mkregtable.c unsigned nlloop, i, j, n, c, id; c 149 drivers/gpu/drm/radeon/mkregtable.c c = t->nentry; c 154 drivers/gpu/drm/radeon/mkregtable.c if (n > c) c 155 drivers/gpu/drm/radeon/mkregtable.c n = c; c 156 drivers/gpu/drm/radeon/mkregtable.c c -= n; c 1305 drivers/gpu/drm/radeon/r100.c unsigned c, i; c 1314 drivers/gpu/drm/radeon/r100.c c = radeon_get_ib_value(p, idx++) & 0x1F; c 1315 drivers/gpu/drm/radeon/r100.c if (c > 16) { c 1321 drivers/gpu/drm/radeon/r100.c track->num_arrays = c; c 1322 drivers/gpu/drm/radeon/r100.c for (i = 0; i < (c - 1); i+=2, idx+=3) { c 1348 drivers/gpu/drm/radeon/r100.c if (c & 1) { c 3209 drivers/gpu/drm/radeon/r100.c int c; c 3413 drivers/gpu/drm/radeon/r100.c c = 3; c 3416 drivers/gpu/drm/radeon/r100.c c = 1; c 3420 drivers/gpu/drm/radeon/r100.c c = 3; c 3425 drivers/gpu/drm/radeon/r100.c temp_ff.full = dfixed_const(c); c 380 drivers/gpu/drm/radeon/r600_dpm.c void r600_set_tpc(struct radeon_device *rdev, u32 c) c 382 drivers/gpu/drm/radeon/r600_dpm.c WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK); c 161 drivers/gpu/drm/radeon/r600_dpm.h void r600_set_tpc(struct radeon_device *rdev, u32 c); c 103 drivers/gpu/drm/radeon/radeon_object.c u32 c = 0, i; c 113 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = c 115 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_WC | c 120 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 121 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_WC | c 128 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 129 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | c 134 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 135 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_WC | c 139 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 140 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | c 147 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 148 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | c 153 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 154 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_WC | c 158 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 159 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | c 163 drivers/gpu/drm/radeon/radeon_object.c if (!c) { c 164 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c].fpfn = 0; c 165 drivers/gpu/drm/radeon/radeon_object.c rbo->placements[c++].flags = TTM_PL_MASK_CACHING | c 169 drivers/gpu/drm/radeon/radeon_object.c rbo->placement.num_placement = c; c 170 drivers/gpu/drm/radeon/radeon_object.c rbo->placement.num_busy_placement = c; c 172 drivers/gpu/drm/radeon/radeon_object.c for (i = 0; i < c; ++i) { c 61 drivers/gpu/drm/radeon/radeon_vce.c const char *fw_name, *c; c 98 drivers/gpu/drm/radeon/radeon_vce.c c = rdev->vce_fw->data; c 99 drivers/gpu/drm/radeon/radeon_vce.c for (;size > 0; --size, ++c) c 100 drivers/gpu/drm/radeon/radeon_vce.c if (strncmp(c, fw_version, strlen(fw_version)) == 0) c 106 drivers/gpu/drm/radeon/radeon_vce.c c += strlen(fw_version); c 107 drivers/gpu/drm/radeon/radeon_vce.c if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) c 113 drivers/gpu/drm/radeon/radeon_vce.c c = rdev->vce_fw->data; c 114 drivers/gpu/drm/radeon/radeon_vce.c for (;size > 0; --size, ++c) c 115 drivers/gpu/drm/radeon/radeon_vce.c if (strncmp(c, fb_version, strlen(fb_version)) == 0) c 121 drivers/gpu/drm/radeon/radeon_vce.c c += strlen(fb_version); c 122 drivers/gpu/drm/radeon/radeon_vce.c if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) c 278 drivers/gpu/drm/radeon/rs690.c fixed20_12 a, b, c; c 311 drivers/gpu/drm/radeon/rs690.c c.full = dfixed_const(256); c 312 drivers/gpu/drm/radeon/rs690.c a.full = dfixed_div(b, c); c 336 drivers/gpu/drm/radeon/rs690.c c.full = dfixed_const(2); c 337 drivers/gpu/drm/radeon/rs690.c b.full = dfixed_div(b, c); c 961 drivers/gpu/drm/radeon/rv515.c fixed20_12 a, b, c; c 991 drivers/gpu/drm/radeon/rv515.c c.full = dfixed_const(256); c 992 drivers/gpu/drm/radeon/rv515.c a.full = dfixed_div(b, c); c 1016 drivers/gpu/drm/radeon/rv515.c c.full = dfixed_const(2); c 1017 drivers/gpu/drm/radeon/rv515.c b.full = dfixed_div(b, c); c 2217 drivers/gpu/drm/radeon/si.c fixed20_12 a, b, c; c 2242 drivers/gpu/drm/radeon/si.c c.full = dfixed_const(lb_fill_bw); c 2243 drivers/gpu/drm/radeon/si.c b.full = dfixed_div(c, b); c 2312 drivers/gpu/drm/radeon/si.c fixed20_12 a, b, c; c 2409 drivers/gpu/drm/radeon/si.c c.full = dfixed_const(latency_watermark_a); c 2410 drivers/gpu/drm/radeon/si.c c.full = dfixed_mul(c, b); c 2411 drivers/gpu/drm/radeon/si.c c.full = dfixed_mul(c, radeon_crtc->hsc); c 2412 drivers/gpu/drm/radeon/si.c c.full = dfixed_div(c, a); c 2414 drivers/gpu/drm/radeon/si.c c.full = dfixed_div(c, a); c 2415 drivers/gpu/drm/radeon/si.c priority_a_mark = dfixed_trunc(c); c 2421 drivers/gpu/drm/radeon/si.c c.full = dfixed_const(latency_watermark_b); c 2422 drivers/gpu/drm/radeon/si.c c.full = dfixed_mul(c, b); c 2423 drivers/gpu/drm/radeon/si.c c.full = dfixed_mul(c, radeon_crtc->hsc); c 2424 drivers/gpu/drm/radeon/si.c c.full = dfixed_div(c, a); c 2426 drivers/gpu/drm/radeon/si.c c.full = dfixed_div(c, a); c 2427 drivers/gpu/drm/radeon/si.c priority_b_mark = dfixed_trunc(c); c 76 drivers/gpu/drm/rcar-du/rcar_du_crtc.h #define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) c 77 drivers/gpu/drm/rcar-du/rcar_du_crtc.h #define wb_to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, writeback) c 78 drivers/gpu/drm/rcar-du/rcar_lvds.c #define connector_to_rcar_lvds(c) \ c 79 drivers/gpu/drm/rcar-du/rcar_lvds.c container_of(c, struct rcar_lvds, connector) c 86 drivers/gpu/drm/rcar-du/rcar_lvds_regs.h #define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4)) c 28 drivers/gpu/drm/rockchip/cdn-dp-core.c #define connector_to_dp(c) \ c 29 drivers/gpu/drm/rockchip/cdn-dp-core.c container_of(c, struct cdn_dp_device, connector) c 31 drivers/gpu/drm/rockchip/cdn-dp-core.c #define encoder_to_dp(c) \ c 32 drivers/gpu/drm/rockchip/cdn-dp-core.c container_of(c, struct cdn_dp_device, encoder) c 33 drivers/gpu/drm/rockchip/rockchip_lvds.c #define connector_to_lvds(c) \ c 34 drivers/gpu/drm/rockchip/rockchip_lvds.c container_of(c, struct rockchip_lvds, connector) c 36 drivers/gpu/drm/rockchip/rockchip_lvds.c #define encoder_to_lvds(c) \ c 37 drivers/gpu/drm/rockchip/rockchip_lvds.c container_of(c, struct rockchip_lvds, encoder) c 20 drivers/gpu/drm/rockchip/rockchip_rgb.c #define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder) c 321 drivers/gpu/drm/shmobile/shmob_drm_crtc.c #define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc) c 593 drivers/gpu/drm/shmobile/shmob_drm_crtc.c #define to_shmob_connector(c) \ c 594 drivers/gpu/drm/shmobile/shmob_drm_crtc.c container_of(c, struct shmob_drm_connector, connector) c 478 drivers/gpu/drm/sti/sti_hqvdp.c static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c) c 483 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config); c 484 drivers/gpu/drm/sti/sti_hqvdp.c switch (c->top.config) { c 499 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format); c 500 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma); c 501 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma); c 502 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch); c 504 drivers/gpu/drm/sti/sti_hqvdp.c c->top.chroma_src_pitch); c 506 drivers/gpu/drm/sti/sti_hqvdp.c c->top.input_frame_size); c 508 drivers/gpu/drm/sti/sti_hqvdp.c c->top.input_frame_size & 0x0000FFFF, c 509 drivers/gpu/drm/sti/sti_hqvdp.c c->top.input_frame_size >> 16); c 511 drivers/gpu/drm/sti/sti_hqvdp.c c->top.input_viewport_size); c 512 drivers/gpu/drm/sti/sti_hqvdp.c src_w = c->top.input_viewport_size & 0x0000FFFF; c 513 drivers/gpu/drm/sti/sti_hqvdp.c src_h = c->top.input_viewport_size >> 16; c 518 drivers/gpu/drm/sti/sti_hqvdp.c c->hvsrc.output_picture_size); c 519 drivers/gpu/drm/sti/sti_hqvdp.c dst_w = c->hvsrc.output_picture_size & 0x0000FFFF; c 520 drivers/gpu/drm/sti/sti_hqvdp.c dst_h = c->hvsrc.output_picture_size >> 16; c 522 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl); c 525 drivers/gpu/drm/sti/sti_hqvdp.c hqvdp_dbg_get_lut(c->hvsrc.yh_coef)); c 527 drivers/gpu/drm/sti/sti_hqvdp.c hqvdp_dbg_get_lut(c->hvsrc.ch_coef)); c 529 drivers/gpu/drm/sti/sti_hqvdp.c hqvdp_dbg_get_lut(c->hvsrc.yv_coef)); c 531 drivers/gpu/drm/sti/sti_hqvdp.c hqvdp_dbg_get_lut(c->hvsrc.cv_coef)); c 546 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config); c 547 drivers/gpu/drm/sti/sti_hqvdp.c switch (c->csdi.config) { c 559 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2); c 560 drivers/gpu/drm/sti/sti_hqvdp.c seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config); c 130 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_IYUVADD_REG(c) (0x930 + (0x4 * (c))) c 132 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_IYUVLINEWIDTH_REG(c) (0x940 + (0x4 * (c))) c 134 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_YGCOEF_REG(c) (0x950 + (0x4 * (c))) c 136 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_URCOEF_REG(c) (0x960 + (0x4 * (c))) c 138 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_VBCOEF_REG(c) (0x970 + (0x4 * (c))) c 75 drivers/gpu/drm/sun4i/sun4i_frontend.h #define SUN4I_FRONTEND_CSC_COEF_REG(c) (0x070 + (0x4 * (c))) c 74 drivers/gpu/drm/sun4i/sun8i_mixer.h #define SUN50I_MIXER_BLEND_CSC_CONST_VAL(d, c) (((d) << 16) | ((c) & 0xffff)) c 137 drivers/gpu/drm/tegra/drm.h static inline struct tegra_output *connector_to_output(struct drm_connector *c) c 139 drivers/gpu/drm/tegra/drm.h return container_of(c, struct tegra_output, connector); c 35 drivers/gpu/drm/tegra/dsi.h #define DSI_CONTROL_CHANNEL(c) (((c) & 0x3) << 16) c 87 drivers/gpu/drm/virtio/virtgpu_object.c u32 c = 1; c 96 drivers/gpu/drm/virtio/virtgpu_object.c vgbo->placement.num_placement = c; c 97 drivers/gpu/drm/virtio/virtgpu_object.c vgbo->placement.num_busy_placement = c; c 568 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c SVGA3dCmdSetTextureState c; c 579 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c cmd->body.c.cid = bi->ctx->id; c 742 drivers/gpu/host1x/bus.c struct host1x_client *c; c 759 drivers/gpu/host1x/bus.c list_for_each_entry(c, &clients, list) { c 760 drivers/gpu/host1x/bus.c if (c == client) { c 761 drivers/gpu/host1x/bus.c list_del_init(&c->list); c 163 drivers/gpu/ipu-v3/ipu-di.c struct di_sync_config *c = &config[i]; c 166 drivers/gpu/ipu-v3/ipu-di.c if ((c->run_count >= 0x1000) || (c->offset_count >= 0x1000) || c 167 drivers/gpu/ipu-v3/ipu-di.c (c->repeat_count >= 0x1000) || c 168 drivers/gpu/ipu-v3/ipu-di.c (c->cnt_up >= 0x400) || c 169 drivers/gpu/ipu-v3/ipu-di.c (c->cnt_down >= 0x400)) { c 175 drivers/gpu/ipu-v3/ipu-di.c reg = DI_SW_GEN0_RUN_COUNT(c->run_count) | c 176 drivers/gpu/ipu-v3/ipu-di.c DI_SW_GEN0_RUN_SRC(c->run_src) | c 177 drivers/gpu/ipu-v3/ipu-di.c DI_SW_GEN0_OFFSET_COUNT(c->offset_count) | c 178 drivers/gpu/ipu-v3/ipu-di.c DI_SW_GEN0_OFFSET_SRC(c->offset_src); c 181 drivers/gpu/ipu-v3/ipu-di.c reg = DI_SW_GEN1_CNT_POL_GEN_EN(c->cnt_polarity_gen_en) | c 182 drivers/gpu/ipu-v3/ipu-di.c DI_SW_GEN1_CNT_CLR_SRC(c->cnt_clr_src) | c 184 drivers/gpu/ipu-v3/ipu-di.c c->cnt_polarity_trigger_src) | c 185 drivers/gpu/ipu-v3/ipu-di.c DI_SW_GEN1_CNT_POL_CLR_SRC(c->cnt_polarity_clr_src) | c 186 drivers/gpu/ipu-v3/ipu-di.c DI_SW_GEN1_CNT_DOWN(c->cnt_down) | c 187 drivers/gpu/ipu-v3/ipu-di.c DI_SW_GEN1_CNT_UP(c->cnt_up); c 190 drivers/gpu/ipu-v3/ipu-di.c if (c->repeat_count == 0) c 197 drivers/gpu/ipu-v3/ipu-di.c reg |= c->repeat_count << (16 * ((wave_gen - 1) & 0x1)); c 180 drivers/gpu/ipu-v3/ipu-ic.c const u16 (*c)[3]; c 188 drivers/gpu/ipu-v3/ipu-ic.c c = (const u16 (*)[3])csc->params.coeff; c 191 drivers/gpu/ipu-v3/ipu-ic.c param = ((a[0] & 0x1f) << 27) | ((c[0][0] & 0x1ff) << 18) | c 192 drivers/gpu/ipu-v3/ipu-ic.c ((c[1][1] & 0x1ff) << 9) | (c[2][2] & 0x1ff); c 199 drivers/gpu/ipu-v3/ipu-ic.c param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) | c 200 drivers/gpu/ipu-v3/ipu-ic.c ((c[1][0] & 0x1ff) << 9) | (c[2][0] & 0x1ff); c 206 drivers/gpu/ipu-v3/ipu-ic.c param = ((a[2] & 0x1f) << 27) | ((c[0][2] & 0x1ff) << 18) | c 207 drivers/gpu/ipu-v3/ipu-ic.c ((c[1][2] & 0x1ff) << 9) | (c[2][1] & 0x1ff); c 166 drivers/gpu/vga/vga_switcheroo.c #define client_is_audio(c) ((c)->id & ID_BIT_AUDIO) c 167 drivers/gpu/vga/vga_switcheroo.c #define client_is_vga(c) (!client_is_audio(c)) c 168 drivers/gpu/vga/vga_switcheroo.c #define client_id(c) ((c)->id & ~ID_BIT_AUDIO) c 659 drivers/hid/hid-asus.c #define asus_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, \ c 660 drivers/hid/hid-asus.c max, EV_KEY, (c)) c 24 drivers/hid/hid-belkin.c #define belkin_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 25 drivers/hid/hid-belkin.c EV_KEY, (c)) c 36 drivers/hid/hid-cherry.c #define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 37 drivers/hid/hid-cherry.c EV_KEY, (c)) c 24 drivers/hid/hid-chicony.c #define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 25 drivers/hid/hid-chicony.c EV_KEY, (c)) c 126 drivers/hid/hid-creative-sb0540.c u64 c; c 128 drivers/hid/hid-creative-sb0540.c c = 0; c 130 drivers/hid/hid-creative-sb0540.c c |= (u64) (((data & (((u64) 1) << i)) ? 1 : 0)) c 133 drivers/hid/hid-creative-sb0540.c return (c); c 245 drivers/hid/hid-dr.c #define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c)) c 246 drivers/hid/hid-dr.c #define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c)) c 22 drivers/hid/hid-ezkey.c #define ez_map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c)) c 23 drivers/hid/hid-ezkey.c #define ez_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c)) c 22 drivers/hid/hid-gyration.c #define gy_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 23 drivers/hid/hid-gyration.c EV_KEY, (c)) c 51 drivers/hid/hid-input.c #define map_abs(c) hid_map_usage(hidinput, usage, &bit, &max, EV_ABS, (c)) c 52 drivers/hid/hid-input.c #define map_rel(c) hid_map_usage(hidinput, usage, &bit, &max, EV_REL, (c)) c 53 drivers/hid/hid-input.c #define map_key(c) hid_map_usage(hidinput, usage, &bit, &max, EV_KEY, (c)) c 54 drivers/hid/hid-input.c #define map_led(c) hid_map_usage(hidinput, usage, &bit, &max, EV_LED, (c)) c 56 drivers/hid/hid-input.c #define map_abs_clear(c) hid_map_usage_clear(hidinput, usage, &bit, \ c 57 drivers/hid/hid-input.c &max, EV_ABS, (c)) c 58 drivers/hid/hid-input.c #define map_key_clear(c) hid_map_usage_clear(hidinput, usage, &bit, \ c 59 drivers/hid/hid-input.c &max, EV_KEY, (c)) c 18 drivers/hid/hid-kensington.c #define ks_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c)) c 18 drivers/hid/hid-lcpower.c #define ts_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 19 drivers/hid/hid-lcpower.c EV_KEY, (c)) c 53 drivers/hid/hid-lenovo.c #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) c 535 drivers/hid/hid-lg.c #define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 536 drivers/hid/hid-lg.c EV_KEY, (c)) c 78 drivers/hid/hid-microsoft.c #define ms_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 79 drivers/hid/hid-microsoft.c EV_KEY, (c)) c 31 drivers/hid/hid-monterey.c #define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 32 drivers/hid/hid-monterey.c EV_KEY, (c)) c 1288 drivers/hid/hid-multitouch.c #define mt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, \ c 1289 drivers/hid/hid-multitouch.c max, EV_KEY, (c)) c 97 drivers/hid/hid-ntrig.c __u8 c = ((raw[0] & 0x03) << 3) | ((raw[3] & 0xe0) >> 5); c 106 drivers/hid/hid-ntrig.c return sprintf(buf, "%u.%u.%u.%u.%u", a, b, c, d, e); c 35 drivers/hid/hid-petalynx.c #define pl_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 36 drivers/hid/hid-petalynx.c EV_KEY, (c)) c 135 drivers/hid/hid-picolcd.h #define hid_hw_request(a, b, c) \ c 138 drivers/hid/hid-picolcd.h hid_hw_request(a, b, c); \ c 92 drivers/hid/hid-samsung.c #define samsung_kbd_mouse_map_key_clear(c) \ c 93 drivers/hid/hid-samsung.c hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) c 33 drivers/hid/hid-sunplus.c #define sp_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 34 drivers/hid/hid-sunplus.c EV_KEY, (c)) c 19 drivers/hid/hid-tivo.c #define tivo_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 20 drivers/hid/hid-tivo.c EV_KEY, (c)) c 24 drivers/hid/hid-topseed.c #define ts_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 25 drivers/hid/hid-topseed.c EV_KEY, (c)) c 59 drivers/hid/hid-twinhan.c #define th_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ c 60 drivers/hid/hid-twinhan.c EV_KEY, (c)) c 42 drivers/hid/hid-zydacron.c #define zc_map_key_clear(c) \ c 43 drivers/hid/hid-zydacron.c hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) c 104 drivers/hid/i2c-hid/i2c-hid-core.c } __packed c; c 227 drivers/hid/i2c-hid/i2c-hid-core.c cmd->c.reg = ihid->wHIDDescRegister; c 234 drivers/hid/i2c-hid/i2c-hid-core.c cmd->c.opcode = command->opcode; c 235 drivers/hid/i2c-hid/i2c-hid-core.c cmd->c.reportTypeID = reportID | reportType << 4; c 467 drivers/hsi/controllers/omap_ssi_core.c static int ssi_remove_ports(struct device *dev, void *c) c 226 drivers/hwmon/coretemp.c static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) c 252 drivers/hwmon/coretemp.c if (strstr(c->x86_model_id, tjmax_table[i].id)) c 258 drivers/hwmon/coretemp.c if (c->x86_model == tm->model && c 259 drivers/hwmon/coretemp.c (tm->mask == ANY || c->x86_stepping == tm->mask)) c 265 drivers/hwmon/coretemp.c if (c->x86_model == 0xf && c->x86_stepping < 4) c 268 drivers/hwmon/coretemp.c if (c->x86_model > 0xe && usemsr_ee) { c 282 drivers/hwmon/coretemp.c } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) { c 297 drivers/hwmon/coretemp.c if (c->x86_model == 0x17 && c 329 drivers/hwmon/coretemp.c static bool cpu_has_tjmax(struct cpuinfo_x86 *c) c 331 drivers/hwmon/coretemp.c u8 model = c->x86_model; c 341 drivers/hwmon/coretemp.c static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) c 353 drivers/hwmon/coretemp.c if (cpu_has_tjmax(c)) c 377 drivers/hwmon/coretemp.c return adjust_tjmax(c, id, dev); c 409 drivers/hwmon/coretemp.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 416 drivers/hwmon/coretemp.c if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) { c 455 drivers/hwmon/coretemp.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 480 drivers/hwmon/coretemp.c tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); c 487 drivers/hwmon/coretemp.c if (c->x86_model > 0xe && c->x86_model != 0x1c) { c 592 drivers/hwmon/coretemp.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 607 drivers/hwmon/coretemp.c if (!cpu_has(c, X86_FEATURE_DTHERM)) c 629 drivers/hwmon/coretemp.c if (cpu_has(c, X86_FEATURE_PTS)) c 298 drivers/hwmon/fam15h_power.c struct cpuinfo_x86 *c = &boot_cpu_data; c 300 drivers/hwmon/fam15h_power.c if (c->x86 == 0x15 && c 301 drivers/hwmon/fam15h_power.c (c->x86_model <= 0xf || c 302 drivers/hwmon/fam15h_power.c (c->x86_model >= 0x60 && c->x86_model <= 0x7f))) c 318 drivers/hwmon/fam15h_power.c if (c->x86 == 0x15 && c 319 drivers/hwmon/fam15h_power.c (c->x86_model <= 0xf || c 320 drivers/hwmon/fam15h_power.c (c->x86_model >= 0x60 && c->x86_model <= 0x7f))) c 866 drivers/hwmon/fschmd.c char c; c 867 drivers/hwmon/fschmd.c if (get_user(c, buf + i)) c 869 drivers/hwmon/fschmd.c if (c == 'V') c 277 drivers/hwmon/hwmon-vid.c struct cpuinfo_x86 *c = &cpu_data(0); c 280 drivers/hwmon/hwmon-vid.c if (c->x86 < 6) /* Any CPU with family lower than 6 */ c 283 drivers/hwmon/hwmon-vid.c vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor); c 253 drivers/hwmon/i5k_amb.c u16 c; c 272 drivers/hwmon/i5k_amb.c c = data->amb_present[i]; c 273 drivers/hwmon/i5k_amb.c for (j = 0; j < REAL_MAX_AMBS_PER_CHANNEL; j++, c >>= 1) { c 277 drivers/hwmon/i5k_amb.c if (!(c & 0x1)) c 113 drivers/hwmon/via-cputemp.c struct cpuinfo_x86 *c = &cpu_data(pdev->id); c 125 drivers/hwmon/via-cputemp.c if (c->x86 == 7) { c 128 drivers/hwmon/via-cputemp.c switch (c->x86_model) { c 1371 drivers/hwmon/w83793.c char c; c 1372 drivers/hwmon/w83793.c if (get_user(c, buf + i)) c 1374 drivers/hwmon/w83793.c if (c == 'V') c 577 drivers/hwtracing/stm/core.c unsigned int c, bool ts_first, const void *buf, c 586 drivers/hwtracing/stm/core.c sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz, c 15 drivers/hwtracing/stm/p_basic.c unsigned int c = output->channel + chan; c 20 drivers/hwtracing/stm/p_basic.c sz = stm_data_write(data, m, c, true, buf, count); c 22 drivers/hwtracing/stm/p_basic.c data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); c 264 drivers/hwtracing/stm/p_sys-t.c sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c) c 271 drivers/hwtracing/stm/p_sys-t.c sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED, c 278 drivers/hwtracing/stm/p_sys-t.c sz = stm_data_write(data, m, c, false, &payload, sizeof(payload)); c 282 drivers/hwtracing/stm/p_sys-t.c data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); c 291 drivers/hwtracing/stm/p_sys-t.c unsigned int c = output->channel + chan; c 302 drivers/hwtracing/stm/p_sys-t.c sz = sys_t_clock_sync(data, m, c); c 319 drivers/hwtracing/stm/p_sys-t.c sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED, c 325 drivers/hwtracing/stm/p_sys-t.c sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE); c 333 drivers/hwtracing/stm/p_sys-t.c sz = data->packet(data, m, c, STP_PACKET_DATA, 0, 2, c 343 drivers/hwtracing/stm/p_sys-t.c sz = stm_data_write(data, m, c, false, &ts, sizeof(ts)); c 349 drivers/hwtracing/stm/p_sys-t.c sz = stm_data_write(data, m, c, false, buf, count); c 351 drivers/hwtracing/stm/p_sys-t.c data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); c 114 drivers/hwtracing/stm/stm.h unsigned int c, bool ts_first, const void *buf, c 152 drivers/i2c/algos/i2c-algo-bit.c static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c) c 161 drivers/i2c/algos/i2c-algo-bit.c sb = (c >> i) & 1; c 167 drivers/i2c/algos/i2c-algo-bit.c (int)c, i); c 181 drivers/i2c/algos/i2c-algo-bit.c "i2c_outb: 0x%02x, timeout at ack\n", (int)c); c 189 drivers/i2c/algos/i2c-algo-bit.c bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c, c 277 drivers/i2c/busses/i2c-axxia.c int c = readl(idev->base + MST_DATA); c 283 drivers/i2c/busses/i2c-axxia.c if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { c 289 drivers/i2c/busses/i2c-axxia.c msg->len = 1 + c; c 292 drivers/i2c/busses/i2c-axxia.c msg->buf[idev->msg_xfrd_r++] = c; c 233 drivers/i2c/busses/i2c-bcm2835.c u32 c = BCM2835_I2C_C_ST | BCM2835_I2C_C_I2CEN; c 245 drivers/i2c/busses/i2c-bcm2835.c c |= BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR; c 247 drivers/i2c/busses/i2c-bcm2835.c c |= BCM2835_I2C_C_INTT; c 250 drivers/i2c/busses/i2c-bcm2835.c c |= BCM2835_I2C_C_INTD; c 254 drivers/i2c/busses/i2c-bcm2835.c bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); c 56 drivers/i2c/busses/i2c-designware-pcidrv.c int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c); c 89 drivers/i2c/busses/i2c-designware-pcidrv.c static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) c 93 drivers/i2c/busses/i2c-designware-pcidrv.c c->bus_cfg &= ~DW_IC_CON_SPEED_MASK; c 94 drivers/i2c/busses/i2c-designware-pcidrv.c c->bus_cfg |= DW_IC_CON_SPEED_STD; c 98 drivers/i2c/busses/i2c-designware-pcidrv.c c->bus_num = pdev->device - 0x817 + 3; c 103 drivers/i2c/busses/i2c-designware-pcidrv.c c->bus_num = pdev->device - 0x82C + 0; c 109 drivers/i2c/busses/i2c-designware-pcidrv.c static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) c 119 drivers/i2c/busses/i2c-designware-pcidrv.c c->bus_num = PCI_FUNC(pdev->devfn) + 0 + 1; c 122 drivers/i2c/busses/i2c-designware-pcidrv.c c->bus_num = PCI_FUNC(pdev->devfn) + 4 + 1; c 414 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 425 drivers/i2c/busses/i2c-st.c if (c->count < (SSC_TXFIFO_SIZE - tx_fstat)) c 426 drivers/i2c/busses/i2c-st.c i = c->count; c 430 drivers/i2c/busses/i2c-st.c for (; i > 0; i--, c->count--, c->buf++) c 431 drivers/i2c/busses/i2c-st.c st_i2c_write_tx_fifo(i2c_dev, *c->buf); c 444 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 460 drivers/i2c/busses/i2c-st.c for (; i > 0; i--, c->xfered++) c 466 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 478 drivers/i2c/busses/i2c-st.c for (; (i > 0) && (c->count > 0); i--, c->count--) { c 480 drivers/i2c/busses/i2c-st.c *c->buf++ = (u8)rbuf & 0xff; c 495 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 500 drivers/i2c/busses/i2c-st.c if (c->stop) { c 515 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 519 drivers/i2c/busses/i2c-st.c if (!c->count) c 532 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 536 drivers/i2c/busses/i2c-st.c if (!c->xfered) { c 543 drivers/i2c/busses/i2c-st.c if (!c->count) { c 546 drivers/i2c/busses/i2c-st.c } else if (c->count == 1) { c 554 drivers/i2c/busses/i2c-st.c st_i2c_rd_fill_tx_fifo(i2c_dev, c->count); c 556 drivers/i2c/busses/i2c-st.c st_i2c_rd_fill_tx_fifo(i2c_dev, c->count - 1); c 568 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 585 drivers/i2c/busses/i2c-st.c if (c->addr & I2C_M_RD) c 601 drivers/i2c/busses/i2c-st.c if ((c->addr & I2C_M_RD) && (c->count == 1) && (c->xfered)) { c 610 drivers/i2c/busses/i2c-st.c c->result = -EIO; c 620 drivers/i2c/busses/i2c-st.c c->result = -EAGAIN; c 648 drivers/i2c/busses/i2c-st.c struct st_i2c_client *c = &i2c_dev->client; c 653 drivers/i2c/busses/i2c-st.c c->addr = i2c_8bit_addr_from_msg(msg); c 654 drivers/i2c/busses/i2c-st.c c->buf = msg->buf; c 655 drivers/i2c/busses/i2c-st.c c->count = msg->len; c 656 drivers/i2c/busses/i2c-st.c c->xfered = 0; c 657 drivers/i2c/busses/i2c-st.c c->result = 0; c 658 drivers/i2c/busses/i2c-st.c c->stop = is_last; c 666 drivers/i2c/busses/i2c-st.c if (c->addr & I2C_M_RD) c 671 drivers/i2c/busses/i2c-st.c st_i2c_write_tx_fifo(i2c_dev, c->addr); c 674 drivers/i2c/busses/i2c-st.c if (!(c->addr & I2C_M_RD)) c 690 drivers/i2c/busses/i2c-st.c ret = c->result; c 694 drivers/i2c/busses/i2c-st.c c->addr); c 360 drivers/i2c/muxes/i2c-mux-pca954x.c int c, irq; c 373 drivers/i2c/muxes/i2c-mux-pca954x.c for (c = 0; c < data->chip->nchans; c++) { c 374 drivers/i2c/muxes/i2c-mux-pca954x.c irq = irq_create_mapping(data->irq, c); c 391 drivers/i2c/muxes/i2c-mux-pca954x.c int c, irq; c 396 drivers/i2c/muxes/i2c-mux-pca954x.c for (c = 0; c < data->chip->nchans; c++) { c 397 drivers/i2c/muxes/i2c-mux-pca954x.c irq = irq_find_mapping(data->irq, c); c 305 drivers/i3c/master/i3c-master-cdns.c #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0)) c 52 drivers/ide/dtc2278.c static void sub22 (char b, char c) c 60 drivers/ide/dtc2278.c outb_p(c,0xb4); c 62 drivers/ide/dtc2278.c if(inb(0xb4) == c) { c 107 drivers/ide/ide-atapi.c memcpy(scsi_req(rq)->cmd, pc->c, 12); c 123 drivers/ide/ide-atapi.c pc.c[0] = TEST_UNIT_READY; c 134 drivers/ide/ide-atapi.c pc.c[0] = START_STOP; c 135 drivers/ide/ide-atapi.c pc.c[4] = start; c 152 drivers/ide/ide-atapi.c pc.c[0] = ALLOW_MEDIUM_REMOVAL; c 153 drivers/ide/ide-atapi.c pc.c[4] = on; c 162 drivers/ide/ide-atapi.c pc->c[0] = REQUEST_SENSE; c 164 drivers/ide/ide-atapi.c pc->c[4] = 255; c 167 drivers/ide/ide-atapi.c pc->c[4] = 20; c 280 drivers/ide/ide-atapi.c memcpy(pc->c, scsi_req(sense_rq)->cmd, 12); c 604 drivers/ide/ide-atapi.c drive->hwif->tp_ops->output_data(drive, NULL, drive->pc->c, 12); c 566 drivers/ide/ide-cd.c u8 *c = scsi_req(rq)->cmd; c 569 drivers/ide/ide-cd.c if (c[0] == READ_6 || c[0] == WRITE_6) { c 570 drivers/ide/ide-cd.c c[8] = c[4]; c 571 drivers/ide/ide-cd.c c[5] = c[3]; c 572 drivers/ide/ide-cd.c c[4] = c[2]; c 573 drivers/ide/ide-cd.c c[3] = c[1] & 0x1f; c 574 drivers/ide/ide-cd.c c[2] = 0; c 575 drivers/ide/ide-cd.c c[1] &= 0xe0; c 576 drivers/ide/ide-cd.c c[0] += (READ_10 - READ_6); c 586 drivers/ide/ide-cd.c if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) { c 75 drivers/ide/ide-floppy.c if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || c 78 drivers/ide/ide-floppy.c else if (pc->c[0] == GPCMD_REQUEST_SENSE) { c 91 drivers/ide/ide-floppy.c drive->failed_pc->c[0]); c 118 drivers/ide/ide-floppy.c floppy->drive->name, pc->c[0], floppy->sense_key, c 130 drivers/ide/ide-floppy.c pc->c[0] != GPCMD_REQUEST_SENSE) c 161 drivers/ide/ide-floppy.c pc->c[0] = GPCMD_READ_FORMAT_CAPACITIES; c 162 drivers/ide/ide-floppy.c pc->c[7] = 255; c 163 drivers/ide/ide-floppy.c pc->c[8] = 255; c 173 drivers/ide/ide-floppy.c pc->c[0] = GPCMD_MODE_SENSE_10; c 174 drivers/ide/ide-floppy.c pc->c[1] = 0; c 175 drivers/ide/ide-floppy.c pc->c[2] = page_code; c 187 drivers/ide/ide-floppy.c put_unaligned(cpu_to_be16(length), (u16 *) &pc->c[7]); c 203 drivers/ide/ide-floppy.c pc->c[0] = cmd == READ ? GPCMD_READ_10 : GPCMD_WRITE_10; c 204 drivers/ide/ide-floppy.c put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); c 205 drivers/ide/ide-floppy.c put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); c 207 drivers/ide/ide-floppy.c memcpy(scsi_req(rq)->cmd, pc->c, 12); c 220 drivers/ide/ide-floppy.c memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c)); c 103 drivers/ide/ide-floppy_ioctl.c pc->c[0] = GPCMD_FORMAT_UNIT; c 104 drivers/ide/ide-floppy_ioctl.c pc->c[1] = 0x17; c 292 drivers/ide/ide-tape.c if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6) c 294 drivers/ide/ide-tape.c && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) { c 302 drivers/ide/ide-tape.c if (pc->c[0] == READ_6 && (sense[2] & 0x80)) { c 306 drivers/ide/ide-tape.c if (pc->c[0] == WRITE_6) { c 313 drivers/ide/ide-tape.c if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) { c 343 drivers/ide/ide-tape.c if (pc->c[0] == REQUEST_SENSE) { c 349 drivers/ide/ide-tape.c } else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) { c 446 drivers/ide/ide-tape.c if (drive->failed_pc == NULL && pc->c[0] != REQUEST_SENSE) c 461 drivers/ide/ide-tape.c if (!(pc->c[0] == TEST_UNIT_READY && c 467 drivers/ide/ide-tape.c tape->name, pc->c[0], c 481 drivers/ide/ide-tape.c pc->c[0]); c 492 drivers/ide/ide-tape.c pc->c[0] = MODE_SENSE; c 495 drivers/ide/ide-tape.c pc->c[1] = 8; c 496 drivers/ide/ide-tape.c pc->c[2] = page_code; c 504 drivers/ide/ide-tape.c pc->c[3] = 0; c 506 drivers/ide/ide-tape.c pc->c[4] = 255; c 527 drivers/ide/ide-tape.c if (pc->c[0] != TEST_UNIT_READY) c 550 drivers/ide/ide-tape.c put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); c 551 drivers/ide/ide-tape.c pc->c[1] = 1; c 557 drivers/ide/ide-tape.c pc->c[0] = READ_6; c 559 drivers/ide/ide-tape.c pc->c[0] = WRITE_6; c 563 drivers/ide/ide-tape.c memcpy(scsi_req(rq)->cmd, pc->c, 12); c 585 drivers/ide/ide-tape.c if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { c 678 drivers/ide/ide-tape.c pc->c[0] = WRITE_FILEMARKS; c 679 drivers/ide/ide-tape.c pc->c[4] = write_filemark; c 735 drivers/ide/ide-tape.c pc.c[0] = READ_POSITION; c 772 drivers/ide/ide-tape.c pc->c[0] = POSITION_TO_ELEMENT; c 773 drivers/ide/ide-tape.c pc->c[1] = 2; c 774 drivers/ide/ide-tape.c put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]); c 775 drivers/ide/ide-tape.c pc->c[8] = partition; c 891 drivers/ide/ide-tape.c pc->c[0] = INQUIRY; c 892 drivers/ide/ide-tape.c pc->c[4] = 254; c 900 drivers/ide/ide-tape.c pc->c[0] = REZERO_UNIT; c 907 drivers/ide/ide-tape.c pc->c[0] = ERASE; c 908 drivers/ide/ide-tape.c pc->c[1] = 1; c 915 drivers/ide/ide-tape.c pc->c[0] = SPACE; c 916 drivers/ide/ide-tape.c put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]); c 917 drivers/ide/ide-tape.c pc->c[1] = cmd; c 250 drivers/ide/ide.c unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1; c 254 drivers/ide/ide.c if (sscanf(str, "%u.%u:%u,%u,%u", &a, &b, &c, &h, &s) != 5 && c 263 drivers/ide/ide.c if (c > INT_MAX || h > 255 || s > 255) c 271 drivers/ide/ide.c ide_disks_chs[i].cyl = c; c 12 drivers/ide/qd65xx.h #define IDE_IN(a,b,c) ( ((a)<(b)) ? (b) : ( (a)>(c) ? (c) : (a)) ) c 702 drivers/iio/adc/exynos_adc.c static int exynos_adc_remove_devices(struct device *dev, void *c) c 98 drivers/iio/adc/ina2xx-adc.c #define SAMPLING_PERIOD(c) ((c->int_time_vbus + c->int_time_vshunt) \ c 99 drivers/iio/adc/ina2xx-adc.c * c->avg) c 30 drivers/iio/common/ssp_sensors/ssp_dev.c u8 c; c 226 drivers/iio/common/ssp_sensors/ssp_dev.c to_send.c = data->batch_opt_buf[type]; c 288 drivers/iio/common/ssp_sensors/ssp_dev.c to_send.c = data->batch_opt_buf[type]; c 43 drivers/iio/dac/ad5761.c int c; c 83 drivers/iio/dac/ad5761.c .c = 40, c 87 drivers/iio/dac/ad5761.c .c = 0, c 91 drivers/iio/dac/ad5761.c .c = 20, c 95 drivers/iio/dac/ad5761.c .c = 0, c 99 drivers/iio/dac/ad5761.c .c = 10, c 103 drivers/iio/dac/ad5761.c .c = 12, c 107 drivers/iio/dac/ad5761.c .c = 0, c 111 drivers/iio/dac/ad5761.c .c = 0, c 223 drivers/iio/dac/ad5761.c *val *= ad5761_range_params[st->range].c; c 225 drivers/iio/industrialio-buffer.c return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); c 233 drivers/iio/industrialio-buffer.c u8 type = this_attr->c->scan_type.endianness; c 242 drivers/iio/industrialio-buffer.c if (this_attr->c->scan_type.repeat > 1) c 245 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.sign, c 246 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.realbits, c 247 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.storagebits, c 248 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.repeat, c 249 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.shift); c 253 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.sign, c 254 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.realbits, c 255 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.storagebits, c 256 drivers/iio/industrialio-buffer.c this_attr->c->scan_type.shift); c 403 drivers/iio/industrialio-core.c ext_info = &this_attr->c->ext_info[this_attr->address]; c 405 drivers/iio/industrialio-core.c return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); c 417 drivers/iio/industrialio-core.c ext_info = &this_attr->c->ext_info[this_attr->address]; c 420 drivers/iio/industrialio-core.c this_attr->c, buf, len); c 641 drivers/iio/industrialio-core.c ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, c 646 drivers/iio/industrialio-core.c ret = indio_dev->info->read_raw(indio_dev, this_attr->c, c 752 drivers/iio/industrialio-core.c ret = indio_dev->info->read_avail(indio_dev, this_attr->c, c 848 drivers/iio/industrialio-core.c this_attr->c, this_attr->address)) { c 866 drivers/iio/industrialio-core.c ret = indio_dev->info->write_raw(indio_dev, this_attr->c, c 1034 drivers/iio/industrialio-core.c iio_attr->c = chan; c 244 drivers/iio/industrialio-event.c return attr->c->event_spec[attr->address & 0xffff].dir; c 249 drivers/iio/industrialio-event.c return attr->c->event_spec[attr->address & 0xffff].type; c 272 drivers/iio/industrialio-event.c this_attr->c, iio_ev_attr_type(this_attr), c 287 drivers/iio/industrialio-event.c this_attr->c, iio_ev_attr_type(this_attr), c 305 drivers/iio/industrialio-event.c this_attr->c, iio_ev_attr_type(this_attr), c 332 drivers/iio/industrialio-event.c this_attr->c, iio_ev_attr_type(this_attr), c 280 drivers/iio/inkern.c struct iio_map_internal *c_i = NULL, *c = NULL; c 294 drivers/iio/inkern.c c = c_i; c 295 drivers/iio/inkern.c iio_device_get(c->indio_dev); c 299 drivers/iio/inkern.c if (c == NULL) c 308 drivers/iio/inkern.c channel->indio_dev = c->indio_dev; c 310 drivers/iio/inkern.c if (c->map->adc_channel_label) { c 313 drivers/iio/inkern.c c->map->adc_channel_label); c 326 drivers/iio/inkern.c iio_device_put(c->indio_dev); c 408 drivers/iio/inkern.c struct iio_map_internal *c = NULL; c 424 drivers/iio/inkern.c list_for_each_entry(c, &iio_map_list, l) c 425 drivers/iio/inkern.c if (name && strcmp(name, c->map->consumer_dev_name) != 0) c 443 drivers/iio/inkern.c list_for_each_entry(c, &iio_map_list, l) { c 444 drivers/iio/inkern.c if (name && strcmp(name, c->map->consumer_dev_name) != 0) c 446 drivers/iio/inkern.c chans[mapind].indio_dev = c->indio_dev; c 447 drivers/iio/inkern.c chans[mapind].data = c->map->consumer_data; c 450 drivers/iio/inkern.c c->map->adc_channel_label); c 42 drivers/iio/pressure/t5403.c __le16 c[10]; c 45 drivers/iio/pressure/t5403.c #define T5403_C_U16(i) le16_to_cpu(data->c[(i) - 1]) c 247 drivers/iio/pressure/t5403.c sizeof(data->c), (u8 *) data->c); c 186 drivers/infiniband/core/netlink.c struct netlink_dump_control c = { c 189 drivers/infiniband/core/netlink.c if (c.dump) c 190 drivers/infiniband/core/netlink.c err = netlink_dump_start(skb->sk, skb, nlh, &c); c 194 drivers/infiniband/hw/cxgb3/iwch_provider.h static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c) c 196 drivers/infiniband/hw/cxgb3/iwch_provider.h return container_of(c, struct iwch_ucontext, ibucontext); c 536 drivers/infiniband/hw/cxgb4/iw_cxgb4.h static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) c 538 drivers/infiniband/hw/cxgb4/iw_cxgb4.h return container_of(c, struct c4iw_ucontext, ibucontext); c 5265 drivers/infiniband/hw/hfi1/chip.c char c; c 5278 drivers/infiniband/hw/hfi1/chip.c while ((c = *s++) != 0) { c 5283 drivers/infiniband/hw/hfi1/chip.c *p++ = c; c 243 drivers/infiniband/hw/hfi1/eprom.c #define MAGIC4(a, b, c, d) ((d) << 24 | (c) << 16 | (b) << 8 | (a)) c 1424 drivers/infiniband/hw/hfi1/hfi.h #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c)) c 893 drivers/infiniband/hw/mlx5/devx.c static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in) c 898 drivers/infiniband/hw/mlx5/devx.c if (c->devx_uid) c 899 drivers/infiniband/hw/mlx5/devx.c return c->devx_uid; c 901 drivers/infiniband/hw/mlx5/devx.c dev = to_mdev(c->ibucontext.device); c 908 drivers/infiniband/hw/mlx5/devx.c if (!c->devx_uid) c 911 drivers/infiniband/hw/mlx5/devx.c return c->devx_uid; c 951 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c; c 962 drivers/infiniband/hw/mlx5/devx.c c = devx_ufile2uctx(attrs); c 963 drivers/infiniband/hw/mlx5/devx.c if (IS_ERR(c)) c 964 drivers/infiniband/hw/mlx5/devx.c return PTR_ERR(c); c 965 drivers/infiniband/hw/mlx5/devx.c dev = to_mdev(c->ibucontext.device); c 1001 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c; c 1006 drivers/infiniband/hw/mlx5/devx.c c = devx_ufile2uctx(attrs); c 1007 drivers/infiniband/hw/mlx5/devx.c if (IS_ERR(c)) c 1008 drivers/infiniband/hw/mlx5/devx.c return PTR_ERR(c); c 1009 drivers/infiniband/hw/mlx5/devx.c dev = to_mdev(c->ibucontext.device); c 1015 drivers/infiniband/hw/mlx5/devx.c dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true); c 1029 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c; c 1039 drivers/infiniband/hw/mlx5/devx.c c = devx_ufile2uctx(attrs); c 1040 drivers/infiniband/hw/mlx5/devx.c if (IS_ERR(c)) c 1041 drivers/infiniband/hw/mlx5/devx.c return PTR_ERR(c); c 1042 drivers/infiniband/hw/mlx5/devx.c dev = to_mdev(c->ibucontext.device); c 1044 drivers/infiniband/hw/mlx5/devx.c uid = devx_get_uid(c, cmd_in); c 1408 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( c 1410 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); c 1422 drivers/infiniband/hw/mlx5/devx.c uid = devx_get_uid(c, cmd_in); c 1509 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( c 1511 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); c 1519 drivers/infiniband/hw/mlx5/devx.c uid = devx_get_uid(c, cmd_in); c 1554 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( c 1559 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); c 1564 drivers/infiniband/hw/mlx5/devx.c uid = devx_get_uid(c, cmd_in); c 1634 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( c 1636 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); c 1692 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( c 1697 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); c 1704 drivers/infiniband/hw/mlx5/devx.c uid = devx_get_uid(c, cmd_in); c 1921 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( c 1923 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); c 1941 drivers/infiniband/hw/mlx5/devx.c if (!c->devx_uid) c 2181 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( c 2183 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); c 2186 drivers/infiniband/hw/mlx5/devx.c if (!c->devx_uid) c 2193 drivers/infiniband/hw/mlx5/devx.c err = devx_umem_get(dev, &c->ibucontext, attrs, obj); c 2203 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid); c 93 drivers/infiniband/hw/mlx5/mr.c int c = order2idx(dev, mr->order); c 94 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; c 136 drivers/infiniband/hw/mlx5/mr.c static int add_keys(struct mlx5_ib_dev *dev, int c, int num) c 139 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; c 198 drivers/infiniband/hw/mlx5/mr.c static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) c 201 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; c 238 drivers/infiniband/hw/mlx5/mr.c int c; c 244 drivers/infiniband/hw/mlx5/mr.c c = order2idx(dev, ent->order); c 254 drivers/infiniband/hw/mlx5/mr.c err = add_keys(dev, c, var - ent->size); c 261 drivers/infiniband/hw/mlx5/mr.c remove_keys(dev, c, ent->size - var); c 296 drivers/infiniband/hw/mlx5/mr.c int c; c 302 drivers/infiniband/hw/mlx5/mr.c c = order2idx(dev, ent->order); c 313 drivers/infiniband/hw/mlx5/mr.c err = add_keys(dev, c, 2 * ent->limit - ent->cur); c 464 drivers/infiniband/hw/mlx5/mr.c int c; c 467 drivers/infiniband/hw/mlx5/mr.c c = order2idx(dev, order); c 469 drivers/infiniband/hw/mlx5/mr.c if (c < 0 || c > last_umr_cache_entry) { c 470 drivers/infiniband/hw/mlx5/mr.c mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); c 474 drivers/infiniband/hw/mlx5/mr.c for (i = c; i <= last_umr_cache_entry; i++) { c 496 drivers/infiniband/hw/mlx5/mr.c cache->ent[c].miss++; c 506 drivers/infiniband/hw/mlx5/mr.c int c; c 511 drivers/infiniband/hw/mlx5/mr.c c = order2idx(dev, mr->order); c 512 drivers/infiniband/hw/mlx5/mr.c WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); c 517 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[c]; c 523 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[c]; c 535 drivers/infiniband/hw/mlx5/mr.c static void clean_keys(struct mlx5_ib_dev *dev, int c) c 538 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; c 889 drivers/infiniband/hw/mthca/mthca_cq.c int c; c 892 drivers/infiniband/hw/mthca/mthca_cq.c c = cq->refcount; c 895 drivers/infiniband/hw/mthca/mthca_cq.c return c; c 1433 drivers/infiniband/hw/mthca/mthca_qp.c int c; c 1436 drivers/infiniband/hw/mthca/mthca_qp.c c = qp->refcount; c 1439 drivers/infiniband/hw/mthca/mthca_qp.c return c; c 164 drivers/infiniband/hw/mthca/mthca_reset.c int c = 0; c 166 drivers/infiniband/hw/mthca/mthca_reset.c for (c = 0; c < 100; ++c) { c 329 drivers/infiniband/hw/mthca/mthca_srq.c int c; c 332 drivers/infiniband/hw/mthca/mthca_srq.c c = srq->refcount; c 335 drivers/infiniband/hw/mthca/mthca_srq.c return c; c 1041 drivers/infiniband/hw/qib/qib_user_sdma.c u32 c) c 1043 drivers/infiniband/hw/qib/qib_user_sdma.c pq->sent_counter = c; c 1255 drivers/infiniband/hw/qib/qib_user_sdma.c int i, j, c = 0; c 1288 drivers/infiniband/hw/qib/qib_user_sdma.c c += i + 1 - pkt->index; c 1295 drivers/infiniband/hw/qib/qib_user_sdma.c ppd->sdma_descq_added += c; c 1296 drivers/infiniband/hw/qib/qib_user_sdma.c nsent += c; c 1156 drivers/infiniband/ulp/srp/ib_srp.c int i, c = 0; c 1159 drivers/infiniband/ulp/srp/ib_srp.c c += target->ch[i].connected; c 1161 drivers/infiniband/ulp/srp/ib_srp.c return c; c 3272 drivers/infiniband/ulp/srp/ib_srp.c int c = 0; c 3275 drivers/infiniband/ulp/srp/ib_srp.c c++; c 3277 drivers/infiniband/ulp/srp/ib_srp.c return c; c 51 drivers/input/gameport/ns558.c unsigned char c, u, v; c 67 drivers/input/gameport/ns558.c c = inb(io); c 68 drivers/input/gameport/ns558.c outb(~c & ~3, io); c 70 drivers/input/gameport/ns558.c outb(c, io); c 81 drivers/input/gameport/ns558.c outb(c, io); c 93 drivers/input/gameport/ns558.c outb(c, io); c 317 drivers/input/input-mt.c int f, *p, s, c; c 332 drivers/input/input-mt.c c = (f + s + 1) / 2; c 333 drivers/input/input-mt.c if (c == 0 || (c > mu && (!eq || mu > 0))) c 337 drivers/input/input-mt.c c *= 2; c 340 drivers/input/input-mt.c *p -= c; c 342 drivers/input/input-mt.c return (c < s && s <= 0) || (f >= 0 && f < c); c 133 drivers/input/joystick/db9.c unsigned char c; c 137 drivers/input/joystick/db9.c c = 0x80 | 0x30 | (powered ? 0x08 : 0) | (pwr_sub ? 0x04 : 0) | data; c 138 drivers/input/joystick/db9.c parport_write_data(port, c); c 141 drivers/input/joystick/db9.c c = 0x40 | data << 4 | (powered ? 0x08 : 0) | (pwr_sub ? 0x04 : 0) | 0x03; c 142 drivers/input/joystick/db9.c parport_write_data(port, c); c 145 drivers/input/joystick/db9.c c = ((((data & 2) ? 2 : 0) | ((data & 1) ? 4 : 0)) ^ 0x02) | !powered; c 146 drivers/input/joystick/db9.c parport_write_control(port, c); c 215 drivers/input/joystick/iforce/iforce-main.c u8 c[] = "CEOV"; c 306 drivers/input/joystick/iforce/iforce-main.c for (i = 0; c[i]; i++) c 307 drivers/input/joystick/iforce/iforce-main.c if (!iforce_get_id_packet(iforce, c[i], buf, &len)) c 31 drivers/input/joystick/iforce/iforce-packets.c int c; c 63 drivers/input/joystick/iforce/iforce-packets.c c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); c 64 drivers/input/joystick/iforce/iforce-packets.c if (n < c) c=n; c 68 drivers/input/joystick/iforce/iforce-packets.c c); c 69 drivers/input/joystick/iforce/iforce-packets.c if (n != c) { c 71 drivers/input/joystick/iforce/iforce-packets.c data + c, c 72 drivers/input/joystick/iforce/iforce-packets.c n - c); c 27 drivers/input/joystick/iforce/iforce-usb.c int n, c; c 47 drivers/input/joystick/iforce/iforce-usb.c c = CIRC_CNT_TO_END(iforce->xmit.head, iforce->xmit.tail, XMIT_SIZE); c 48 drivers/input/joystick/iforce/iforce-usb.c if (n < c) c=n; c 52 drivers/input/joystick/iforce/iforce-usb.c c); c 53 drivers/input/joystick/iforce/iforce-usb.c if (n != c) { c 54 drivers/input/joystick/iforce/iforce-usb.c memcpy(iforce_usb->out->transfer_buffer + 1 + c, c 56 drivers/input/joystick/iforce/iforce-usb.c n-c); c 62 drivers/input/joystick/spaceorb.c unsigned char c = 0; c 67 drivers/input/joystick/spaceorb.c for (i = 0; i < spaceorb->idx; i++) c ^= data[i]; c 68 drivers/input/joystick/spaceorb.c if (c) return; c 99 drivers/input/keyboard/hilkbd.c unsigned char c; c 128 drivers/input/keyboard/hilkbd.c static inline void handle_status(unsigned char s, unsigned char c) c 130 drivers/input/keyboard/hilkbd.c if (c & 0x8) { c 132 drivers/input/keyboard/hilkbd.c if (c & 0x10) c 135 drivers/input/keyboard/hilkbd.c if (c & 0x10) { c 138 drivers/input/keyboard/hilkbd.c hil_dev.curdev = c & 7; c 145 drivers/input/keyboard/hilkbd.c static inline void handle_data(unsigned char s, unsigned char c) c 148 drivers/input/keyboard/hilkbd.c hil_dev.data[hil_dev.ptr++] = c; c 157 drivers/input/keyboard/hilkbd.c unsigned char s, c; c 160 drivers/input/keyboard/hilkbd.c c = hil_read_data(); c 164 drivers/input/keyboard/hilkbd.c handle_status(s, c); c 167 drivers/input/keyboard/hilkbd.c handle_data(s, c); c 171 drivers/input/keyboard/hilkbd.c hil_dev.c = c; c 201 drivers/input/keyboard/hilkbd.c unsigned char c; c 233 drivers/input/keyboard/hilkbd.c c = hil_dev.c; c 235 drivers/input/keyboard/hilkbd.c if (c == 0) { c 239 drivers/input/keyboard/hilkbd.c kbid = ffz(~c); c 244 drivers/input/keyboard/hilkbd.c c = 0; c 245 drivers/input/keyboard/hilkbd.c hil_do(HIL_WRITEKBDSADR, &c, 1); c 150 drivers/input/keyboard/lm8323.c #define client_to_lm8323(c) container_of(c, struct lm8323_chip, client) c 152 drivers/input/keyboard/lm8323.c #define cdev_to_pwm(c) container_of(c, struct lm8323_pwm, cdev) c 53 drivers/input/keyboard/locomokbd.c #define SCANCODE(c,r) ( ((c)<<4) + (r) + 1 ) c 29 drivers/input/keyboard/opencores-kbd.c unsigned char c; c 31 drivers/input/keyboard/opencores-kbd.c c = readb(opencores_kbd->addr); c 32 drivers/input/keyboard/opencores-kbd.c input_report_key(input, c & 0x7f, c & 0x80 ? 0 : 1); c 50 drivers/input/misc/mma8450.c struct i2c_client *c = m->client; c 53 drivers/input/misc/mma8450.c ret = i2c_smbus_read_byte_data(c, off); c 55 drivers/input/misc/mma8450.c dev_err(&c->dev, c 64 drivers/input/misc/mma8450.c struct i2c_client *c = m->client; c 67 drivers/input/misc/mma8450.c error = i2c_smbus_write_byte_data(c, off, v); c 69 drivers/input/misc/mma8450.c dev_err(&c->dev, c 81 drivers/input/misc/mma8450.c struct i2c_client *c = m->client; c 84 drivers/input/misc/mma8450.c err = i2c_smbus_read_i2c_block_data(c, off, size, buf); c 86 drivers/input/misc/mma8450.c dev_err(&c->dev, c 157 drivers/input/misc/mma8450.c static int mma8450_probe(struct i2c_client *c, c 164 drivers/input/misc/mma8450.c m = devm_kzalloc(&c->dev, sizeof(*m), GFP_KERNEL); c 168 drivers/input/misc/mma8450.c idev = devm_input_allocate_polled_device(&c->dev); c 172 drivers/input/misc/mma8450.c m->client = c; c 191 drivers/input/misc/mma8450.c dev_err(&c->dev, "failed to register polled input device\n"); c 61 drivers/input/misc/yealink.c #define _SEG(t, a, am, b, bm, c, cm, d, dm, e, em, f, fm, g, gm) \ c 63 drivers/input/misc/yealink.c .u = { .s = { _LOC(a, am), _LOC(b, bm), _LOC(c, cm), \ c 294 drivers/input/mouse/bcm5974.c const struct bcm5974_config *c = &dev->cfg; c 295 drivers/input/mouse/bcm5974.c u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta; c 297 drivers/input/mouse/bcm5974.c return (const struct tp_finger *)(f_base + i * c->tp_fsize); c 607 drivers/input/mouse/bcm5974.c const struct bcm5974_config *c = &dev->cfg; c 612 drivers/input/mouse/bcm5974.c if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0) c 615 drivers/input/mouse/bcm5974.c raw_n = (size - c->tp_header) / c->tp_fsize; c 622 drivers/input/mouse/bcm5974.c dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y); c 634 drivers/input/mouse/bcm5974.c report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n); c 637 drivers/input/mouse/bcm5974.c if (c->caps & HAS_INTEGRATED_BUTTON) { c 638 drivers/input/mouse/bcm5974.c int ibt = raw2int(dev->tp_data[c->tp_button]); c 649 drivers/input/mouse/bcm5974.c const struct bcm5974_config *c = &dev->cfg; c 654 drivers/input/mouse/bcm5974.c if (c->tp_type == TYPE3) c 657 drivers/input/mouse/bcm5974.c data = kmalloc(c->um_size, GFP_KERNEL); c 668 drivers/input/mouse/bcm5974.c c->um_req_val, c->um_req_idx, data, c->um_size, 5000); c 670 drivers/input/mouse/bcm5974.c if (size != c->um_size) { c 677 drivers/input/mouse/bcm5974.c data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off; c 683 drivers/input/mouse/bcm5974.c c->um_req_val, c->um_req_idx, data, c->um_size, 5000); c 685 drivers/input/mouse/bcm5974.c if (size != c->um_size) { c 35 drivers/input/mouse/elantech.c static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c, c 38 drivers/input/mouse/elantech.c if (ps2_sliced_command(&psmouse->ps2dev, c) || c 40 drivers/input/mouse/elantech.c psmouse_err(psmouse, "%s query 0x%02x failed.\n", __func__, c); c 50 drivers/input/mouse/elantech.c static int elantech_send_cmd(struct psmouse *psmouse, unsigned char c, c 56 drivers/input/mouse/elantech.c ps2_command(ps2dev, NULL, c) || c 58 drivers/input/mouse/elantech.c psmouse_err(psmouse, "%s query 0x%02x failed.\n", __func__, c); c 161 drivers/input/mouse/elantech.h int (*send_cmd)(struct psmouse *psmouse, unsigned char c, c 116 drivers/input/mouse/inport.c unsigned char a, b, c; c 126 drivers/input/mouse/inport.c c = inb(INPORT_SIGNATURE_PORT); c 127 drivers/input/mouse/inport.c if (a == b || a != c) { c 234 drivers/input/mouse/sermouse.c unsigned char c = serio->id.extra; c 250 drivers/input/mouse/sermouse.c input_dev->id.product = c; c 259 drivers/input/mouse/sermouse.c if (c & 0x01) set_bit(BTN_MIDDLE, input_dev->keybit); c 260 drivers/input/mouse/sermouse.c if (c & 0x02) set_bit(BTN_SIDE, input_dev->keybit); c 261 drivers/input/mouse/sermouse.c if (c & 0x04) set_bit(BTN_EXTRA, input_dev->keybit); c 262 drivers/input/mouse/sermouse.c if (c & 0x10) set_bit(REL_WHEEL, input_dev->relbit); c 263 drivers/input/mouse/sermouse.c if (c & 0x20) set_bit(REL_HWHEEL, input_dev->relbit); c 625 drivers/input/mouse/synaptics.c static int synaptics_pt_write(struct serio *serio, u8 c) c 631 drivers/input/mouse/synaptics.c error = ps2_sliced_command(&parent->ps2dev, c); c 43 drivers/input/mouse/synaptics.h #define SYN_CAP_EXTENDED(c) ((c) & BIT(23)) c 44 drivers/input/mouse/synaptics.h #define SYN_CAP_MIDDLE_BUTTON(c) ((c) & BIT(18)) c 45 drivers/input/mouse/synaptics.h #define SYN_CAP_PASS_THROUGH(c) ((c) & BIT(7)) c 46 drivers/input/mouse/synaptics.h #define SYN_CAP_SLEEP(c) ((c) & BIT(4)) c 47 drivers/input/mouse/synaptics.h #define SYN_CAP_FOUR_BUTTON(c) ((c) & BIT(3)) c 48 drivers/input/mouse/synaptics.h #define SYN_CAP_MULTIFINGER(c) ((c) & BIT(1)) c 49 drivers/input/mouse/synaptics.h #define SYN_CAP_PALMDETECT(c) ((c) & BIT(0)) c 50 drivers/input/mouse/synaptics.h #define SYN_CAP_SUBMODEL_ID(c) (((c) & GENMASK(15, 8)) >> 8) c 51 drivers/input/mouse/synaptics.h #define SYN_EXT_CAP_REQUESTS(c) (((c) & GENMASK(22, 20)) >> 20) c 34 drivers/input/mouse/touchkit_ps2.c #define TOUCHKIT_SEND_PARMS(s, r, c) ((s) << 12 | (r) << 8 | (c)) c 678 drivers/input/mousedev.c unsigned char c; c 683 drivers/input/mousedev.c if (get_user(c, buffer + i)) c 688 drivers/input/mousedev.c if (c == mousedev_imex_seq[client->imexseq]) { c 696 drivers/input/mousedev.c if (c == mousedev_imps_seq[client->impsseq]) { c 704 drivers/input/mousedev.c mousedev_generate_response(client, c); c 130 drivers/input/serio/ct82c710.c static int ct82c710_write(struct serio *port, unsigned char c) c 133 drivers/input/serio/ct82c710.c outb_p(c, CT82C710_DATA); c 792 drivers/input/serio/hil_mlc.c static int hil_mlc_serio_write(struct serio *serio, unsigned char c) c 806 drivers/input/serio/hil_mlc.c ((hil_packet)c) << (8 * (3 - mlc->serio_oidx[map->didx])); c 356 drivers/input/serio/i8042.c static int i8042_kbd_write(struct serio *port, unsigned char c) c 364 drivers/input/serio/i8042.c dbg("%02x -> i8042 (kbd-data)\n", c); c 365 drivers/input/serio/i8042.c i8042_write_data(c); c 377 drivers/input/serio/i8042.c static int i8042_aux_write(struct serio *serio, unsigned char c) c 381 drivers/input/serio/i8042.c return i8042_command(&c, port->mux == -1 ? c 83 drivers/input/serio/parkbd.c static int parkbd_write(struct serio *port, unsigned char c) c 89 drivers/input/serio/parkbd.c p = c ^ (c >> 4); c 95 drivers/input/serio/parkbd.c parkbd_buffer = c | (((int) (~p & 1)) << 8) | 0x600; c 140 drivers/input/serio/serio_raw.c static bool serio_raw_fetch_byte(struct serio_raw *serio_raw, char *c) c 148 drivers/input/serio/serio_raw.c *c = serio_raw->queue[serio_raw->tail]; c 162 drivers/input/serio/serio_raw.c char uninitialized_var(c); c 177 drivers/input/serio/serio_raw.c while (read < count && serio_raw_fetch_byte(serio_raw, &c)) { c 178 drivers/input/serio/serio_raw.c if (put_user(c, buffer++)) c 204 drivers/input/serio/serio_raw.c unsigned char c; c 219 drivers/input/serio/serio_raw.c if (get_user(c, buffer++)) { c 224 drivers/input/serio/serio_raw.c if (serio_write(serio_raw->serio, c)) { c 108 drivers/input/serio/xilinx_ps2.c u8 c; c 126 drivers/input/serio/xilinx_ps2.c status = xps2_recv(drvdata, &c); c 133 drivers/input/serio/xilinx_ps2.c serio_interrupt(drvdata->serio, c, drvdata->flags); c 155 drivers/input/serio/xilinx_ps2.c static int sxps2_write(struct serio *pserio, unsigned char c) c 167 drivers/input/serio/xilinx_ps2.c out_be32(drvdata->base_address + XPS2_TX_DATA_OFFSET, c); c 186 drivers/input/serio/xilinx_ps2.c u8 c; c 199 drivers/input/serio/xilinx_ps2.c (void)xps2_recv(drvdata, &c); c 2750 drivers/input/touchscreen/atmel_mxt_ts.c char c; c 2753 drivers/input/touchscreen/atmel_mxt_ts.c c = *(fw->data + pos); c 2755 drivers/input/touchscreen/atmel_mxt_ts.c if (c < '0' || (c > '9' && c < 'A') || c > 'F') c 3827 drivers/iommu/amd_iommu.c int index, c, alignment = 1; c 3844 drivers/iommu/amd_iommu.c for (index = ALIGN(table->min_index, alignment), c = 0; c 3847 drivers/iommu/amd_iommu.c c += 1; c 3849 drivers/iommu/amd_iommu.c c = 0; c 3854 drivers/iommu/amd_iommu.c if (c == count) { c 3855 drivers/iommu/amd_iommu.c for (; c != 0; --c) c 3856 drivers/iommu/amd_iommu.c iommu->irte_ops->set_allocated(table, index - c + 1); c 279 drivers/iommu/intel-iommu.c static inline int context_domain_id(struct context_entry *c) c 281 drivers/iommu/intel-iommu.c return((c->hi >> 8) & 0xffff); c 25 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CONTEXT_FIELD(b, c, r, F) \ c 26 drivers/iommu/msm_iommu_hw-8xxx.h GET_FIELD(((b) + (r) + ((c) << CTX_SHIFT)), F##_MASK, F##_SHIFT) c 30 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CONTEXT_FIELD(b, c, r, F, v) \ c 31 drivers/iommu/msm_iommu_hw-8xxx.h SET_FIELD(((b) + (r) + ((c) << CTX_SHIFT)), F##_MASK, F##_SHIFT, (v)) c 121 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SCTLR(b, c, v) SET_CTX_REG(SCTLR, (b), (c), (v)) c 122 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ACTLR(b, c, v) SET_CTX_REG(ACTLR, (b), (c), (v)) c 123 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CONTEXTIDR, (b), (c), (v)) c 124 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR0(b, c, v) SET_CTX_REG(TTBR0, (b), (c), (v)) c 125 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR1(b, c, v) SET_CTX_REG(TTBR1, (b), (c), (v)) c 126 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBCR(b, c, v) SET_CTX_REG(TTBCR, (b), (c), (v)) c 127 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PAR(b, c, v) SET_CTX_REG(PAR, (b), (c), (v)) c 128 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FSR(b, c, v) SET_CTX_REG(FSR, (b), (c), (v)) c 129 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FSRRESTORE(b, c, v) SET_CTX_REG(FSRRESTORE, (b), (c), (v)) c 130 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAR(b, c, v) SET_CTX_REG(FAR, (b), (c), (v)) c 131 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FSYNR0(b, c, v) SET_CTX_REG(FSYNR0, (b), (c), (v)) c 132 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FSYNR1(b, c, v) SET_CTX_REG(FSYNR1, (b), (c), (v)) c 133 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PRRR(b, c, v) SET_CTX_REG(PRRR, (b), (c), (v)) c 134 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NMRR(b, c, v) SET_CTX_REG(NMRR, (b), (c), (v)) c 135 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBLKCR(b, c, v) SET_CTX_REG(TLBLCKR, (b), (c), (v)) c 136 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_V2PSR(b, c, v) SET_CTX_REG(V2PSR, (b), (c), (v)) c 137 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBFLPTER(b, c, v) SET_CTX_REG(TLBFLPTER, (b), (c), (v)) c 138 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBSLPTER(b, c, v) SET_CTX_REG(TLBSLPTER, (b), (c), (v)) c 139 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BFBCR(b, c, v) SET_CTX_REG(BFBCR, (b), (c), (v)) c 140 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CTX_TLBIALL(b, c, v) SET_CTX_REG(CTX_TLBIALL, (b), (c), (v)) c 141 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBIASID(b, c, v) SET_CTX_REG(TLBIASID, (b), (c), (v)) c 142 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBIVA(b, c, v) SET_CTX_REG(TLBIVA, (b), (c), (v)) c 143 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBIVAA(b, c, v) SET_CTX_REG(TLBIVAA, (b), (c), (v)) c 144 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_V2PPR(b, c, v) SET_CTX_REG(V2PPR, (b), (c), (v)) c 145 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_V2PPW(b, c, v) SET_CTX_REG(V2PPW, (b), (c), (v)) c 146 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_V2PUR(b, c, v) SET_CTX_REG(V2PUR, (b), (c), (v)) c 147 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_V2PUW(b, c, v) SET_CTX_REG(V2PUW, (b), (c), (v)) c 148 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_RESUME(b, c, v) SET_CTX_REG(RESUME, (b), (c), (v)) c 150 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SCTLR(b, c) GET_CTX_REG(SCTLR, (b), (c)) c 151 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ACTLR(b, c) GET_CTX_REG(ACTLR, (b), (c)) c 152 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CONTEXTIDR(b, c) GET_CTX_REG(CONTEXTIDR, (b), (c)) c 153 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR0(b, c) GET_CTX_REG(TTBR0, (b), (c)) c 154 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR1(b, c) GET_CTX_REG(TTBR1, (b), (c)) c 155 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBCR(b, c) GET_CTX_REG(TTBCR, (b), (c)) c 156 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PAR(b, c) GET_CTX_REG(PAR, (b), (c)) c 157 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FSR(b, c) GET_CTX_REG(FSR, (b), (c)) c 158 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FSRRESTORE(b, c) GET_CTX_REG(FSRRESTORE, (b), (c)) c 159 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAR(b, c) GET_CTX_REG(FAR, (b), (c)) c 160 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FSYNR0(b, c) GET_CTX_REG(FSYNR0, (b), (c)) c 161 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FSYNR1(b, c) GET_CTX_REG(FSYNR1, (b), (c)) c 162 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PRRR(b, c) GET_CTX_REG(PRRR, (b), (c)) c 163 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NMRR(b, c) GET_CTX_REG(NMRR, (b), (c)) c 164 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBLCKR(b, c) GET_CTX_REG(TLBLCKR, (b), (c)) c 165 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_V2PSR(b, c) GET_CTX_REG(V2PSR, (b), (c)) c 166 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBFLPTER(b, c) GET_CTX_REG(TLBFLPTER, (b), (c)) c 167 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBSLPTER(b, c) GET_CTX_REG(TLBSLPTER, (b), (c)) c 168 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BFBCR(b, c) GET_CTX_REG(BFBCR, (b), (c)) c 169 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CTX_TLBIALL(b, c) GET_CTX_REG(CTX_TLBIALL, (b), (c)) c 170 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBIASID(b, c) GET_CTX_REG(TLBIASID, (b), (c)) c 171 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBIVA(b, c) GET_CTX_REG(TLBIVA, (b), (c)) c 172 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBIVAA(b, c) GET_CTX_REG(TLBIVAA, (b), (c)) c 173 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_V2PPR(b, c) GET_CTX_REG(V2PPR, (b), (c)) c 174 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_V2PPW(b, c) GET_CTX_REG(V2PPW, (b), (c)) c 175 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_V2PUR(b, c) GET_CTX_REG(V2PUR, (b), (c)) c 176 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_V2PUW(b, c) GET_CTX_REG(V2PUW, (b), (c)) c 177 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_RESUME(b, c) GET_CTX_REG(RESUME, (b), (c)) c 427 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CFERE(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, CFERE, v) c 428 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CFEIE(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, CFEIE, v) c 429 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PTSHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, PTSHCFG, v) c 430 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_RCOSH(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, RCOSH, v) c 431 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_RCISH(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, RCISH, v) c 432 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_RCNSH(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, RCNSH, v) c 433 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PRIVCFG(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, PRIVCFG, v) c 434 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_DNA(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, DNA, v) c 435 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_DNLV2PA(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, DNLV2PA, v) c 436 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBMCFG(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, TLBMCFG, v) c 437 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, CFCFG, v) c 438 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TIPCF(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, TIPCF, v) c 439 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_V2PCFG(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, V2PCFG, v) c 440 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_HUME(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, HUME, v) c 441 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PTMTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, PTMTCFG, v) c 442 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PTMEMTYPE(b, c, v) SET_CONTEXT_FIELD(b, c, ACTLR, PTMEMTYPE, v) c 446 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BFBDFE(b, c, v) SET_CONTEXT_FIELD(b, c, BFBCR, BFBDFE, v) c 447 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BFBSFE(b, c, v) SET_CONTEXT_FIELD(b, c, BFBCR, BFBSFE, v) c 448 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SFVS(b, c, v) SET_CONTEXT_FIELD(b, c, BFBCR, SFVS, v) c 449 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FLVIC(b, c, v) SET_CONTEXT_FIELD(b, c, BFBCR, FLVIC, v) c 450 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SLVIC(b, c, v) SET_CONTEXT_FIELD(b, c, BFBCR, SLVIC, v) c 454 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CONTEXTIDR_ASID(b, c, v) \ c 455 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, CONTEXTIDR, CONTEXTIDR_ASID, v) c 456 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_CONTEXTIDR_PROCID(b, c, v) \ c 457 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, CONTEXTIDR, PROCID, v) c 461 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TF(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, TF, v) c 462 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, AFF, v) c 463 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_APF(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, APF, v) c 464 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBMF(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, TLBMF, v) c 465 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_HTWDEEF(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, HTWDEEF, v) c 466 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_HTWSEEF(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, HTWSEEF, v) c 467 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MHF(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, MHF, v) c 468 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SL(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, SL, v) c 469 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SS(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, SS, v) c 470 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MULTI(b, c, v) SET_CONTEXT_FIELD(b, c, FSR, MULTI, v) c 474 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AMID(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR0, AMID, v) c 475 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_APID(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR0, APID, v) c 476 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ABID(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR0, ABID, v) c 477 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ATID(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR0, ATID, v) c 481 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AMEMTYPE(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, AMEMTYPE, v) c 482 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ASHARED(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, ASHARED, v) c 483 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AINNERSHARED(b, c, v) \ c 484 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, FSYNR1, AINNERSHARED, v) c 485 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_APRIV(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, APRIV, v) c 486 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_APROTNS(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, APROTNS, v) c 487 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AINST(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, AINST, v) c 488 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AWRITE(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, AWRITE, v) c 489 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ABURST(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, ABURST, v) c 490 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ALEN(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, ALEN, v) c 491 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FSYNR1_ASIZE(b, c, v) \ c 492 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, FSYNR1, FSYNR1_ASIZE, v) c 493 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ALOCK(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, ALOCK, v) c 494 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AFULL(b, c, v) SET_CONTEXT_FIELD(b, c, FSYNR1, AFULL, v) c 498 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC0(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC0, v) c 499 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC1(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC1, v) c 500 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC2(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC2, v) c 501 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC3(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC3, v) c 502 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC4(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC4, v) c 503 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC5(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC5, v) c 504 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC6(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC6, v) c 505 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_ICPC7(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, ICPC7, v) c 506 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC0(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC0, v) c 507 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC1(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC1, v) c 508 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC2(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC2, v) c 509 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC3(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC3, v) c 510 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC4(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC4, v) c 511 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC5(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC5, v) c 512 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC6(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC6, v) c 513 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_OCPC7(b, c, v) SET_CONTEXT_FIELD(b, c, NMRR, OCPC7, v) c 517 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT, v) c 519 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_TF(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_TF, v) c 520 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_AFF, v) c 521 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_APF(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_APF, v) c 522 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_TLBMF(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_TLBMF, v) c 523 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_HTWDEEF(b, c, v) \ c 524 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWDEEF, v) c 525 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_HTWSEEF(b, c, v) \ c 526 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWSEEF, v) c 527 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_MHF(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_MHF, v) c 528 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_SL(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_SL, v) c 529 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FAULT_SS(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_SS, v) c 531 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOFAULT_SS(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_SS, v) c 532 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOFAULT_MT(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_MT, v) c 533 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOFAULT_SH(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_SH, v) c 534 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOFAULT_NS(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_NS, v) c 535 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOFAULT_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_NOS, v) c 536 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NPFAULT_PA(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, NPFAULT_PA, v) c 540 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC0(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC0, v) c 541 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC1(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC1, v) c 542 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC2(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC2, v) c 543 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC3(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC3, v) c 544 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC4(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC4, v) c 545 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC5(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC5, v) c 546 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC6(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC6, v) c 547 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_MTC7(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, MTC7, v) c 548 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SHDSH0(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, SHDSH0, v) c 549 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SHDSH1(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, SHDSH1, v) c 550 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SHNMSH0(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, SHNMSH0, v) c 551 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_SHNMSH1(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, SHNMSH1, v) c 552 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS0(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS0, v) c 553 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS1(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS1, v) c 554 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS2(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS2, v) c 555 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS3(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS3, v) c 556 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS4(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS4, v) c 557 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS5(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS5, v) c 558 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS6(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS6, v) c 559 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_NOS7(b, c, v) SET_CONTEXT_FIELD(b, c, PRRR, NOS7, v) c 563 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TNR(b, c, v) SET_CONTEXT_FIELD(b, c, RESUME, TNR, v) c 567 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_M(b, c, v) SET_CONTEXT_FIELD(b, c, SCTLR, M, v) c 568 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TRE(b, c, v) SET_CONTEXT_FIELD(b, c, SCTLR, TRE, v) c 569 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AFE(b, c, v) SET_CONTEXT_FIELD(b, c, SCTLR, AFE, v) c 570 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_HAF(b, c, v) SET_CONTEXT_FIELD(b, c, SCTLR, HAF, v) c 571 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_BE(b, c, v) SET_CONTEXT_FIELD(b, c, SCTLR, BE, v) c 572 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_AFFD(b, c, v) SET_CONTEXT_FIELD(b, c, SCTLR, AFFD, v) c 576 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_LKE(b, c, v) SET_CONTEXT_FIELD(b, c, TLBLKCR, LKE, v) c 577 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBLKCR_TLBIALLCFG(b, c, v) \ c 578 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, TLBLKCR, TLBLCKR_TLBIALLCFG, v) c 579 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBIASIDCFG(b, c, v) \ c 580 drivers/iommu/msm_iommu_hw-8xxx.h SET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIASIDCFG, v) c 581 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TLBIVAACFG(b, c, v) SET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIVAACFG, v) c 582 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_FLOOR(b, c, v) SET_CONTEXT_FIELD(b, c, TLBLKCR, FLOOR, v) c 583 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_VICTIM(b, c, v) SET_CONTEXT_FIELD(b, c, TLBLKCR, VICTIM, v) c 587 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_N(b, c, v) SET_CONTEXT_FIELD(b, c, TTBCR, N, v) c 588 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PD0(b, c, v) SET_CONTEXT_FIELD(b, c, TTBCR, PD0, v) c 589 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_PD1(b, c, v) SET_CONTEXT_FIELD(b, c, TTBCR, PD1, v) c 593 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR0_IRGNH(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNH, v) c 594 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR0_SH(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_SH, v) c 595 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR0_ORGN(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_ORGN, v) c 596 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR0_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_NOS, v) c 597 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR0_IRGNL(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNL, v) c 598 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR0_PA(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_PA, v) c 602 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR1_IRGNH(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNH, v) c 603 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR1_SH(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_SH, v) c 604 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR1_ORGN(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_ORGN, v) c 605 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR1_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_NOS, v) c 606 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR1_IRGNL(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNL, v) c 607 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_TTBR1_PA(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_PA, v) c 611 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_HIT(b, c, v) SET_CONTEXT_FIELD(b, c, V2PSR, HIT, v) c 612 drivers/iommu/msm_iommu_hw-8xxx.h #define SET_INDEX(b, c, v) SET_CONTEXT_FIELD(b, c, V2PSR, INDEX, v) c 617 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CFERE(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, CFERE) c 618 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CFEIE(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, CFEIE) c 619 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PTSHCFG(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, PTSHCFG) c 620 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_RCOSH(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, RCOSH) c 621 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_RCISH(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, RCISH) c 622 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_RCNSH(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, RCNSH) c 623 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PRIVCFG(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, PRIVCFG) c 624 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_DNA(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, DNA) c 625 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_DNLV2PA(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, DNLV2PA) c 626 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBMCFG(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, TLBMCFG) c 627 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CFCFG(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, CFCFG) c 628 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TIPCF(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, TIPCF) c 629 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_V2PCFG(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, V2PCFG) c 630 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_HUME(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, HUME) c 631 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PTMTCFG(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, PTMTCFG) c 632 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PTMEMTYPE(b, c) GET_CONTEXT_FIELD(b, c, ACTLR, PTMEMTYPE) c 635 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BFBDFE(b, c) GET_CONTEXT_FIELD(b, c, BFBCR, BFBDFE) c 636 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BFBSFE(b, c) GET_CONTEXT_FIELD(b, c, BFBCR, BFBSFE) c 637 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SFVS(b, c) GET_CONTEXT_FIELD(b, c, BFBCR, SFVS) c 638 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FLVIC(b, c) GET_CONTEXT_FIELD(b, c, BFBCR, FLVIC) c 639 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SLVIC(b, c) GET_CONTEXT_FIELD(b, c, BFBCR, SLVIC) c 643 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CONTEXTIDR_ASID(b, c) \ c 644 drivers/iommu/msm_iommu_hw-8xxx.h GET_CONTEXT_FIELD(b, c, CONTEXTIDR, CONTEXTIDR_ASID) c 645 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_CONTEXTIDR_PROCID(b, c) GET_CONTEXT_FIELD(b, c, CONTEXTIDR, PROCID) c 649 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TF(b, c) GET_CONTEXT_FIELD(b, c, FSR, TF) c 650 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AFF(b, c) GET_CONTEXT_FIELD(b, c, FSR, AFF) c 651 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_APF(b, c) GET_CONTEXT_FIELD(b, c, FSR, APF) c 652 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBMF(b, c) GET_CONTEXT_FIELD(b, c, FSR, TLBMF) c 653 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_HTWDEEF(b, c) GET_CONTEXT_FIELD(b, c, FSR, HTWDEEF) c 654 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_HTWSEEF(b, c) GET_CONTEXT_FIELD(b, c, FSR, HTWSEEF) c 655 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MHF(b, c) GET_CONTEXT_FIELD(b, c, FSR, MHF) c 656 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SL(b, c) GET_CONTEXT_FIELD(b, c, FSR, SL) c 657 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SS(b, c) GET_CONTEXT_FIELD(b, c, FSR, SS) c 658 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MULTI(b, c) GET_CONTEXT_FIELD(b, c, FSR, MULTI) c 662 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AMID(b, c) GET_CONTEXT_FIELD(b, c, FSYNR0, AMID) c 663 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_APID(b, c) GET_CONTEXT_FIELD(b, c, FSYNR0, APID) c 664 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ABID(b, c) GET_CONTEXT_FIELD(b, c, FSYNR0, ABID) c 665 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ATID(b, c) GET_CONTEXT_FIELD(b, c, FSYNR0, ATID) c 669 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AMEMTYPE(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, AMEMTYPE) c 670 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ASHARED(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, ASHARED) c 671 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AINNERSHARED(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, AINNERSHARED) c 672 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_APRIV(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, APRIV) c 673 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_APROTNS(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, APROTNS) c 674 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AINST(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, AINST) c 675 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AWRITE(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, AWRITE) c 676 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ABURST(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, ABURST) c 677 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ALEN(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, ALEN) c 678 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FSYNR1_ASIZE(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, FSYNR1_ASIZE) c 679 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ALOCK(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, ALOCK) c 680 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AFULL(b, c) GET_CONTEXT_FIELD(b, c, FSYNR1, AFULL) c 684 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC0(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC0) c 685 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC1(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC1) c 686 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC2(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC2) c 687 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC3(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC3) c 688 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC4(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC4) c 689 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC5(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC5) c 690 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC6(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC6) c 691 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_ICPC7(b, c) GET_CONTEXT_FIELD(b, c, NMRR, ICPC7) c 692 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC0(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC0) c 693 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC1(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC1) c 694 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC2(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC2) c 695 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC3(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC3) c 696 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC4(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC4) c 697 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC5(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC5) c 698 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC6(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC6) c 699 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_OCPC7(b, c) GET_CONTEXT_FIELD(b, c, NMRR, OCPC7) c 705 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT) c 707 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_TF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_TF) c 708 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_AFF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_AFF) c 709 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_APF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_APF) c 710 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_TLBMF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_TLBMF) c 711 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_HTWDEEF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWDEEF) c 712 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_HTWSEEF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWSEEF) c 713 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_MHF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_MHF) c 714 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_SL(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_SL) c 715 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FAULT_SS(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_SS) c 717 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOFAULT_SS(b, c) GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_SS) c 718 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOFAULT_MT(b, c) GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_MT) c 719 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOFAULT_SH(b, c) GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_SH) c 720 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOFAULT_NS(b, c) GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_NS) c 721 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOFAULT_NOS(b, c) GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_NOS) c 722 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NPFAULT_PA(b, c) GET_CONTEXT_FIELD(b, c, PAR, PAR_NPFAULT_PA) c 726 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC0(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC0) c 727 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC1(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC1) c 728 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC2(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC2) c 729 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC3(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC3) c 730 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC4(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC4) c 731 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC5(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC5) c 732 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC6(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC6) c 733 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_MTC7(b, c) GET_CONTEXT_FIELD(b, c, PRRR, MTC7) c 734 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SHDSH0(b, c) GET_CONTEXT_FIELD(b, c, PRRR, SHDSH0) c 735 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SHDSH1(b, c) GET_CONTEXT_FIELD(b, c, PRRR, SHDSH1) c 736 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SHNMSH0(b, c) GET_CONTEXT_FIELD(b, c, PRRR, SHNMSH0) c 737 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_SHNMSH1(b, c) GET_CONTEXT_FIELD(b, c, PRRR, SHNMSH1) c 738 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS0(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS0) c 739 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS1(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS1) c 740 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS2(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS2) c 741 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS3(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS3) c 742 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS4(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS4) c 743 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS5(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS5) c 744 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS6(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS6) c 745 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_NOS7(b, c) GET_CONTEXT_FIELD(b, c, PRRR, NOS7) c 751 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TNR(b, c) GET_CONTEXT_FIELD(b, c, RESUME, TNR) c 755 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_M(b, c) GET_CONTEXT_FIELD(b, c, SCTLR, M) c 756 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TRE(b, c) GET_CONTEXT_FIELD(b, c, SCTLR, TRE) c 757 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AFE(b, c) GET_CONTEXT_FIELD(b, c, SCTLR, AFE) c 758 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_HAF(b, c) GET_CONTEXT_FIELD(b, c, SCTLR, HAF) c 759 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_BE(b, c) GET_CONTEXT_FIELD(b, c, SCTLR, BE) c 760 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_AFFD(b, c) GET_CONTEXT_FIELD(b, c, SCTLR, AFFD) c 764 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_LKE(b, c) GET_CONTEXT_FIELD(b, c, TLBLKCR, LKE) c 765 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBLCKR_TLBIALLCFG(b, c) \ c 766 drivers/iommu/msm_iommu_hw-8xxx.h GET_CONTEXT_FIELD(b, c, TLBLKCR, TLBLCKR_TLBIALLCFG) c 767 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBIASIDCFG(b, c) GET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIASIDCFG) c 768 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TLBIVAACFG(b, c) GET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIVAACFG) c 769 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_FLOOR(b, c) GET_CONTEXT_FIELD(b, c, TLBLKCR, FLOOR) c 770 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_VICTIM(b, c) GET_CONTEXT_FIELD(b, c, TLBLKCR, VICTIM) c 774 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_N(b, c) GET_CONTEXT_FIELD(b, c, TTBCR, N) c 775 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PD0(b, c) GET_CONTEXT_FIELD(b, c, TTBCR, PD0) c 776 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_PD1(b, c) GET_CONTEXT_FIELD(b, c, TTBCR, PD1) c 780 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR0_IRGNH(b, c) GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNH) c 781 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR0_SH(b, c) GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_SH) c 782 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR0_ORGN(b, c) GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_ORGN) c 783 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR0_NOS(b, c) GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_NOS) c 784 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR0_IRGNL(b, c) GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNL) c 785 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR0_PA(b, c) GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_PA) c 789 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR1_IRGNH(b, c) GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNH) c 790 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR1_SH(b, c) GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_SH) c 791 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR1_ORGN(b, c) GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_ORGN) c 792 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR1_NOS(b, c) GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_NOS) c 793 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR1_IRGNL(b, c) GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNL) c 794 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_TTBR1_PA(b, c) GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_PA) c 798 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_HIT(b, c) GET_CONTEXT_FIELD(b, c, V2PSR, HIT) c 799 drivers/iommu/msm_iommu_hw-8xxx.h #define GET_INDEX(b, c) GET_CONTEXT_FIELD(b, c, V2PSR, INDEX) c 114 drivers/ipack/ipack.c unsigned int i, c, l, s; c 126 drivers/ipack/ipack.c c = 0; c 130 drivers/ipack/ipack.c buf[c++] = '\n'; c 132 drivers/ipack/ipack.c buf[c++] = ' '; c 134 drivers/ipack/ipack.c sprintf(&buf[c], "%02x", idev->id[i]); c 135 drivers/ipack/ipack.c c += 2; c 137 drivers/ipack/ipack.c buf[c++] = '\n'; c 138 drivers/ipack/ipack.c return c; c 268 drivers/ipack/ipack.c static u16 ipack_crc_byte(u16 crc, u8 c) c 272 drivers/ipack/ipack.c crc ^= c << 8; c 284 drivers/ipack/ipack.c u8 c; c 290 drivers/ipack/ipack.c c = (i != 11) ? dev->id[i] : 0; c 291 drivers/ipack/ipack.c crc = ipack_crc_byte(crc, c); c 299 drivers/ipack/ipack.c u8 c; c 305 drivers/ipack/ipack.c c = ((i != 0x18) && (i != 0x19)) ? dev->id[i] : 0; c 306 drivers/ipack/ipack.c crc = ipack_crc_byte(crc, c); c 212 drivers/irqchip/irq-tango.c struct device_node *c; c 222 drivers/irqchip/irq-tango.c for_each_child_of_node(node, c) c 223 drivers/irqchip/irq-tango.c tangox_irq_init(base, &res, c); c 204 drivers/isdn/capi/capiutil.c static unsigned command_2_index(u8 c, u8 sc) c 206 drivers/isdn/capi/capiutil.c if (c & 0x80) c 207 drivers/isdn/capi/capiutil.c c = 0x9 + (c & 0x0f); c 208 drivers/isdn/capi/capiutil.c else if (c == 0x41) c 209 drivers/isdn/capi/capiutil.c c = 0x9 + 0x1; c 210 drivers/isdn/capi/capiutil.c if (c > 0x18) c 211 drivers/isdn/capi/capiutil.c c = 0x00; c 212 drivers/isdn/capi/capiutil.c return (sc & 3) * (0x9 + 0x9) + c; c 213 drivers/isdn/hardware/mISDN/hfc_pci.h #define Write_hfc(a, b, c) (writeb(c, (a->hw.pci_io) + b)) c 554 drivers/isdn/hardware/mISDN/hfcmulti.c enablepcibridge(struct hfc_multi *c) c 556 drivers/isdn/hardware/mISDN/hfcmulti.c HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); /* was _io before */ c 560 drivers/isdn/hardware/mISDN/hfcmulti.c disablepcibridge(struct hfc_multi *c) c 562 drivers/isdn/hardware/mISDN/hfcmulti.c HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x2); /* was _io before */ c 667 drivers/isdn/hardware/mISDN/hfcmulti.c vpm_read_address(struct hfc_multi *c) c 672 drivers/isdn/hardware/mISDN/hfcmulti.c addr = cpld_read_reg(c, 0); c 673 drivers/isdn/hardware/mISDN/hfcmulti.c highbit = cpld_read_reg(c, 1); c 681 drivers/isdn/hardware/mISDN/hfcmulti.c vpm_in(struct hfc_multi *c, int which, unsigned short addr) c 685 drivers/isdn/hardware/mISDN/hfcmulti.c vpm_write_address(c, addr); c 688 drivers/isdn/hardware/mISDN/hfcmulti.c cpld_set_reg(c, 2); c 690 drivers/isdn/hardware/mISDN/hfcmulti.c cpld_set_reg(c, 3); c 692 drivers/isdn/hardware/mISDN/hfcmulti.c enablepcibridge(c); c 693 drivers/isdn/hardware/mISDN/hfcmulti.c res = readpcibridge(c, 1); c 694 drivers/isdn/hardware/mISDN/hfcmulti.c disablepcibridge(c); c 696 drivers/isdn/hardware/mISDN/hfcmulti.c cpld_set_reg(c, 0); c 702 drivers/isdn/hardware/mISDN/hfcmulti.c vpm_out(struct hfc_multi *c, int which, unsigned short addr, c 705 drivers/isdn/hardware/mISDN/hfcmulti.c vpm_write_address(c, addr); c 707 drivers/isdn/hardware/mISDN/hfcmulti.c enablepcibridge(c); c 710 drivers/isdn/hardware/mISDN/hfcmulti.c cpld_set_reg(c, 2); c 712 drivers/isdn/hardware/mISDN/hfcmulti.c cpld_set_reg(c, 3); c 714 drivers/isdn/hardware/mISDN/hfcmulti.c writepcibridge(c, 1, data); c 716 drivers/isdn/hardware/mISDN/hfcmulti.c cpld_set_reg(c, 0); c 718 drivers/isdn/hardware/mISDN/hfcmulti.c disablepcibridge(c); c 722 drivers/isdn/hardware/mISDN/hfcmulti.c regin = vpm_in(c, which, addr); c 107 drivers/isdn/hardware/mISDN/hfcsusb.h #define write_reg_atomic(a, b, c) \ c 108 drivers/isdn/hardware/mISDN/hfcsusb.h usb_control_msg((a)->dev, (a)->ctrl_out_pipe, 0, 0x40, (c), (b), \ c 110 drivers/isdn/hardware/mISDN/hfcsusb.h #define read_reg_atomic(a, b, c) \ c 111 drivers/isdn/hardware/mISDN/hfcsusb.h usb_control_msg((a)->dev, (a)->ctrl_in_pipe, 1, 0xC0, 0, (b), (c), \ c 1309 drivers/isdn/mISDN/dsp_cmx.c dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members) c 1522 drivers/isdn/mISDN/dsp_cmx.c sample = dsp_audio_law_to_s32[p[t]] + *c++ - c 1534 drivers/isdn/mISDN/dsp_cmx.c sample = *c++ - dsp_audio_law_to_s32[q[r]]; c 1550 drivers/isdn/mISDN/dsp_cmx.c sample = dsp_audio_law_to_s32[p[t]] + *c++; c 1561 drivers/isdn/mISDN/dsp_cmx.c sample = *c++; c 1635 drivers/isdn/mISDN/dsp_cmx.c s32 *c; c 1714 drivers/isdn/mISDN/dsp_cmx.c c = mixbuffer; c 1720 drivers/isdn/mISDN/dsp_cmx.c *c++ += dsp_audio_law_to_s32[q[r]]; c 312 drivers/isdn/mISDN/l1oip_codec.c int i1, i2, c, sample; c 329 drivers/isdn/mISDN/l1oip_codec.c c = ulaw_to_4bit[i1]; c 331 drivers/isdn/mISDN/l1oip_codec.c c = alaw_to_4bit[i1]; c 334 drivers/isdn/mISDN/l1oip_codec.c table_com[(i1 << 8) | i2] |= (c << 4); c 335 drivers/isdn/mISDN/l1oip_codec.c table_com[(i2 << 8) | i1] |= c; c 304 drivers/isdn/mISDN/layer2.c long c = (long)arg; c 307 drivers/isdn/mISDN/layer2.c mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c); c 310 drivers/isdn/mISDN/layer2.c switch (c) { c 1862 drivers/isdn/mISDN/layer2.c int c = 0; c 1900 drivers/isdn/mISDN/layer2.c c = iframe_error(l2, skb); c 1901 drivers/isdn/mISDN/layer2.c if (!c) c 1904 drivers/isdn/mISDN/layer2.c c = super_error(l2, skb); c 1905 drivers/isdn/mISDN/layer2.c if (!c) c 1908 drivers/isdn/mISDN/layer2.c c = UI_error(l2, skb); c 1909 drivers/isdn/mISDN/layer2.c if (!c) c 1912 drivers/isdn/mISDN/layer2.c c = unnum_error(l2, skb, CMD); c 1913 drivers/isdn/mISDN/layer2.c if (!c) c 1916 drivers/isdn/mISDN/layer2.c c = unnum_error(l2, skb, RSP); c 1917 drivers/isdn/mISDN/layer2.c if (!c) c 1920 drivers/isdn/mISDN/layer2.c c = unnum_error(l2, skb, CMD); c 1921 drivers/isdn/mISDN/layer2.c if (!c) c 1924 drivers/isdn/mISDN/layer2.c c = unnum_error(l2, skb, RSP); c 1925 drivers/isdn/mISDN/layer2.c if (!c) c 1928 drivers/isdn/mISDN/layer2.c c = FRMR_error(l2, skb); c 1929 drivers/isdn/mISDN/layer2.c if (!c) c 1932 drivers/isdn/mISDN/layer2.c c = 'L'; c 1933 drivers/isdn/mISDN/layer2.c if (c) { c 1935 drivers/isdn/mISDN/layer2.c mISDNDevName4ch(&l2->ch), c); c 1936 drivers/isdn/mISDN/layer2.c mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c); c 62 drivers/leds/leds-apu.c #define cdev_to_priv(c) container_of(c, struct apu_led_priv, cdev) c 26 drivers/leds/leds-as3645a.c #define AS_TIMER_CODE_TO_US(c) ((50 * (c) + 100) * 1000) c 31 drivers/leds/leds-blinkm.c #define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev) c 60 drivers/leds/leds-lp3944.c #define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev) c 222 drivers/leds/leds-lp5521.c char c[3]; c 230 drivers/leds/leds-lp5521.c ret = sscanf(data + offset, "%2s%n ", c, &nrchars); c 234 drivers/leds/leds-lp5521.c ret = sscanf(c, "%2x", &cmd); c 330 drivers/leds/leds-lp5523.c char c[3]; c 338 drivers/leds/leds-lp5523.c ret = sscanf(data + offset, "%2s%n ", c, &nrchars); c 342 drivers/leds/leds-lp5523.c ret = sscanf(c, "%2x", &cmd); c 217 drivers/leds/leds-lp5562.c char c[3]; c 231 drivers/leds/leds-lp5562.c ret = sscanf(data + offset, "%2s%n ", c, &nrchars); c 235 drivers/leds/leds-lp5562.c ret = sscanf(c, "%2x", &cmd); c 209 drivers/leds/leds-lp8501.c char c[3]; c 223 drivers/leds/leds-lp8501.c ret = sscanf(data + offset, "%2s%n ", c, &nrchars); c 227 drivers/leds/leds-lp8501.c ret = sscanf(c, "%2x", &cmd); c 88 drivers/leds/leds-mlxcpld.c #define cdev_to_priv(c) container_of(c, struct mlxcpld_led_priv, cdev) c 44 drivers/leds/leds-mlxreg.c #define cdev_to_priv(c) container_of(c, struct mlxreg_led_data, led_cdev) c 31 drivers/leds/leds-pca9532.c #define ldev_to_led(c) container_of(c, struct pca9532_led, ldev) c 41 drivers/leds/leds-tlc591xx.c #define ldev_to_led(c) container_of(c, struct tlc591xx_led, ldev) c 354 drivers/lightnvm/pblk-init.c struct pblk_global_caches *c; c 356 drivers/lightnvm/pblk-init.c c = container_of(ref, struct pblk_global_caches, kref); c 358 drivers/lightnvm/pblk-init.c kmem_cache_destroy(c->ws); c 359 drivers/lightnvm/pblk-init.c kmem_cache_destroy(c->rec); c 360 drivers/lightnvm/pblk-init.c kmem_cache_destroy(c->g_rq); c 361 drivers/lightnvm/pblk-init.c kmem_cache_destroy(c->w_rq); c 812 drivers/lightnvm/pblk-rb.c struct pblk_c_ctx *c; c 817 drivers/lightnvm/pblk-rb.c list_for_each_entry(c, &pblk->compl_list, list) c 79 drivers/lightnvm/pblk-write.c struct pblk_c_ctx *c, *r; c 93 drivers/lightnvm/pblk-write.c list_for_each_entry_safe(c, r, &pblk->compl_list, list) { c 94 drivers/lightnvm/pblk-write.c rqd = nvm_rq_from_c_ctx(c); c 95 drivers/lightnvm/pblk-write.c if (c->sentry == pos) { c 96 drivers/lightnvm/pblk-write.c pos = pblk_end_queued_w_bio(pblk, rqd, c); c 1129 drivers/lightnvm/pblk.h return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached); c 1134 drivers/lightnvm/pblk.h return ppa.c.line; c 1141 drivers/lightnvm/pblk.h p.c.line = addr; c 1142 drivers/lightnvm/pblk.h p.c.is_cached = 1; c 1197 drivers/lightnvm/pblk.h if (p->c.is_cached) { c 1199 drivers/lightnvm/pblk.h msg, error, (u64)p->c.line); c 1241 drivers/lightnvm/pblk.h if (!ppa->c.is_cached && c 1250 drivers/lightnvm/pblk.h if (!ppa->c.is_cached && c 34 drivers/macintosh/ans-lcd.c anslcd_write_byte_ctrl ( unsigned char c ) c 37 drivers/macintosh/ans-lcd.c printk(KERN_DEBUG "LCD: CTRL byte: %02x\n",c); c 39 drivers/macintosh/ans-lcd.c out_8(anslcd_ptr + ANSLCD_CTRL_IX, c); c 40 drivers/macintosh/ans-lcd.c switch(c) { c 50 drivers/macintosh/ans-lcd.c anslcd_write_byte_data ( unsigned char c ) c 52 drivers/macintosh/ans-lcd.c out_8(anslcd_ptr + ANSLCD_DATA_IX, c); c 73 drivers/macintosh/ans-lcd.c char c; c 74 drivers/macintosh/ans-lcd.c __get_user(c, p); c 75 drivers/macintosh/ans-lcd.c anslcd_write_byte_data( c ); c 2544 drivers/macintosh/via-pmu.c int i, l, c; c 2547 drivers/macintosh/via-pmu.c c = req->data[0]; c 2548 drivers/macintosh/via-pmu.c l = pmu_data_len[c][0]; c 2558 drivers/macintosh/via-pmu.c polled_send_byte(c); c 2566 drivers/macintosh/via-pmu.c l = pmu_data_len[c][1]; c 42 drivers/macintosh/windfarm_lm75_sensor.c #define wf_to_lm75(c) container_of(c, struct wf_lm75_sensor, sens) c 38 drivers/macintosh/windfarm_lm87_sensor.c #define wf_to_lm87(c) container_of(c, struct wf_lm87_sensor, sens) c 51 drivers/macintosh/windfarm_smu_controls.c #define to_smu_fan(c) container_of(c, struct smu_fan_control, ctrl) c 49 drivers/macintosh/windfarm_smu_sat.c #define wf_to_sat(c) container_of(c, struct wf_sat_sensor, sens) c 56 drivers/macintosh/windfarm_smu_sensors.c #define to_smu_ads(c) container_of(c, struct smu_ad_sensor, sens) c 280 drivers/macintosh/windfarm_smu_sensors.c #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens) c 772 drivers/mailbox/bcm-flexrm-mailbox.c struct brcm_sba_command *c; c 777 drivers/mailbox/bcm-flexrm-mailbox.c c = &msg->sba.cmds[i]; c 779 drivers/mailbox/bcm-flexrm-mailbox.c if ((c->flags & BRCM_SBA_CMD_HAS_RESP) && c 780 drivers/mailbox/bcm-flexrm-mailbox.c (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) { c 782 drivers/mailbox/bcm-flexrm-mailbox.c d = flexrm_dst_desc(c->resp, c->resp_len); c 787 drivers/mailbox/bcm-flexrm-mailbox.c } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) { c 789 drivers/mailbox/bcm-flexrm-mailbox.c d = flexrm_dstt_desc(c->resp, c->resp_len); c 796 drivers/mailbox/bcm-flexrm-mailbox.c if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) { c 798 drivers/mailbox/bcm-flexrm-mailbox.c d = flexrm_dstt_desc(c->data, c->data_len); c 805 drivers/mailbox/bcm-flexrm-mailbox.c if (c->flags & BRCM_SBA_CMD_TYPE_B) { c 807 drivers/mailbox/bcm-flexrm-mailbox.c d = flexrm_imm_desc(c->cmd); c 814 drivers/mailbox/bcm-flexrm-mailbox.c d = flexrm_immt_desc(c->cmd); c 821 drivers/mailbox/bcm-flexrm-mailbox.c if ((c->flags & BRCM_SBA_CMD_TYPE_B) || c 822 drivers/mailbox/bcm-flexrm-mailbox.c (c->flags & BRCM_SBA_CMD_TYPE_C)) { c 824 drivers/mailbox/bcm-flexrm-mailbox.c d = flexrm_srct_desc(c->data, c->data_len); c 136 drivers/mcb/mcb-parse.c struct chameleon_bar *c; c 156 drivers/mcb/mcb-parse.c c = kcalloc(bar_count, sizeof(struct chameleon_bar), c 158 drivers/mcb/mcb-parse.c if (!c) c 161 drivers/mcb/mcb-parse.c chameleon_parse_bar(*base, c, bar_count); c 164 drivers/mcb/mcb-parse.c c = kzalloc(sizeof(struct chameleon_bar), GFP_KERNEL); c 165 drivers/mcb/mcb-parse.c if (!c) c 169 drivers/mcb/mcb-parse.c c->addr = mapbase; c 172 drivers/mcb/mcb-parse.c *cb = c; c 86 drivers/md/bcache/alloc.c void bch_rescale_priorities(struct cache_set *c, int sectors) c 90 drivers/md/bcache/alloc.c unsigned int next = c->nbuckets * c->sb.bucket_size / 1024; c 94 drivers/md/bcache/alloc.c atomic_sub(sectors, &c->rescale); c 97 drivers/md/bcache/alloc.c r = atomic_read(&c->rescale); c 101 drivers/md/bcache/alloc.c } while (atomic_cmpxchg(&c->rescale, r, r + next) != r); c 103 drivers/md/bcache/alloc.c mutex_lock(&c->bucket_lock); c 105 drivers/md/bcache/alloc.c c->min_prio = USHRT_MAX; c 107 drivers/md/bcache/alloc.c for_each_cache(ca, c, i) c 113 drivers/md/bcache/alloc.c c->min_prio = min(c->min_prio, b->prio); c 116 drivers/md/bcache/alloc.c mutex_unlock(&c->bucket_lock); c 481 drivers/md/bcache/alloc.c void bch_bucket_free(struct cache_set *c, struct bkey *k) c 486 drivers/md/bcache/alloc.c __bch_bucket_free(PTR_CACHE(c, k, i), c 487 drivers/md/bcache/alloc.c PTR_BUCKET(c, k, i)); c 490 drivers/md/bcache/alloc.c int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, c 496 drivers/md/bcache/alloc.c if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) c 499 drivers/md/bcache/alloc.c lockdep_assert_held(&c->bucket_lock); c 500 drivers/md/bcache/alloc.c BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET); c 507 drivers/md/bcache/alloc.c struct cache *ca = c->cache_by_alloc[i]; c 514 drivers/md/bcache/alloc.c bucket_to_sector(c, b), c 522 drivers/md/bcache/alloc.c bch_bucket_free(c, k); c 523 drivers/md/bcache/alloc.c bkey_put(c, k); c 527 drivers/md/bcache/alloc.c int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, c 532 drivers/md/bcache/alloc.c mutex_lock(&c->bucket_lock); c 533 drivers/md/bcache/alloc.c ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); c 534 drivers/md/bcache/alloc.c mutex_unlock(&c->bucket_lock); c 572 drivers/md/bcache/alloc.c static struct open_bucket *pick_data_bucket(struct cache_set *c, c 579 drivers/md/bcache/alloc.c list_for_each_entry_reverse(ret, &c->data_buckets, list) c 580 drivers/md/bcache/alloc.c if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != c 581 drivers/md/bcache/alloc.c UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) c 588 drivers/md/bcache/alloc.c ret = ret_task ?: list_first_entry(&c->data_buckets, c 592 drivers/md/bcache/alloc.c ret->sectors_free = c->sb.bucket_size; c 613 drivers/md/bcache/alloc.c bool bch_alloc_sectors(struct cache_set *c, c 632 drivers/md/bcache/alloc.c spin_lock(&c->data_bucket_lock); c 634 drivers/md/bcache/alloc.c while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { c 639 drivers/md/bcache/alloc.c spin_unlock(&c->data_bucket_lock); c 641 drivers/md/bcache/alloc.c if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) c 644 drivers/md/bcache/alloc.c spin_lock(&c->data_bucket_lock); c 653 drivers/md/bcache/alloc.c bkey_put(c, &alloc.key); c 656 drivers/md/bcache/alloc.c EBUG_ON(ptr_stale(c, &b->key, i)); c 673 drivers/md/bcache/alloc.c list_move_tail(&b->list, &c->data_buckets); c 683 drivers/md/bcache/alloc.c &PTR_CACHE(c, &b->key, i)->sectors_written); c 686 drivers/md/bcache/alloc.c if (b->sectors_free < c->sb.block_size) c 696 drivers/md/bcache/alloc.c atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); c 698 drivers/md/bcache/alloc.c spin_unlock(&c->data_bucket_lock); c 704 drivers/md/bcache/alloc.c void bch_open_buckets_free(struct cache_set *c) c 708 drivers/md/bcache/alloc.c while (!list_empty(&c->data_buckets)) { c 709 drivers/md/bcache/alloc.c b = list_first_entry(&c->data_buckets, c 716 drivers/md/bcache/alloc.c int bch_open_buckets_alloc(struct cache_set *c) c 720 drivers/md/bcache/alloc.c spin_lock_init(&c->data_bucket_lock); c 728 drivers/md/bcache/alloc.c list_add(&b->list, &c->data_buckets); c 254 drivers/md/bcache/bcache.h struct cache_set *c; c 750 drivers/md/bcache/bcache.h #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) c 752 drivers/md/bcache/bcache.h ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) c 754 drivers/md/bcache/bcache.h #define btree_default_blocks(c) \ c 755 drivers/md/bcache/bcache.h ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) c 757 drivers/md/bcache/bcache.h #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) c 758 drivers/md/bcache/bcache.h #define bucket_bytes(c) ((c)->sb.bucket_size << 9) c 759 drivers/md/bcache/bcache.h #define block_bytes(c) ((c)->sb.block_size << 9) c 761 drivers/md/bcache/bcache.h #define prios_per_bucket(c) \ c 762 drivers/md/bcache/bcache.h ((bucket_bytes(c) - sizeof(struct prio_set)) / \ c 764 drivers/md/bcache/bcache.h #define prio_buckets(c) \ c 765 drivers/md/bcache/bcache.h DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c)) c 767 drivers/md/bcache/bcache.h static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) c 769 drivers/md/bcache/bcache.h return s >> c->bucket_bits; c 772 drivers/md/bcache/bcache.h static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) c 774 drivers/md/bcache/bcache.h return ((sector_t) b) << c->bucket_bits; c 777 drivers/md/bcache/bcache.h static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) c 779 drivers/md/bcache/bcache.h return s & (c->sb.bucket_size - 1); c 782 drivers/md/bcache/bcache.h static inline struct cache *PTR_CACHE(struct cache_set *c, c 786 drivers/md/bcache/bcache.h return c->cache[PTR_DEV(k, ptr)]; c 789 drivers/md/bcache/bcache.h static inline size_t PTR_BUCKET_NR(struct cache_set *c, c 793 drivers/md/bcache/bcache.h return sector_to_bucket(c, PTR_OFFSET(k, ptr)); c 796 drivers/md/bcache/bcache.h static inline struct bucket *PTR_BUCKET(struct cache_set *c, c 800 drivers/md/bcache/bcache.h return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); c 810 drivers/md/bcache/bcache.h static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, c 813 drivers/md/bcache/bcache.h return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); c 816 drivers/md/bcache/bcache.h static inline bool ptr_available(struct cache_set *c, const struct bkey *k, c 819 drivers/md/bcache/bcache.h return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); c 837 drivers/md/bcache/bcache.h if (bch_cache_set_error((b)->c, __VA_ARGS__)) \ c 841 drivers/md/bcache/bcache.h #define cache_bug(c, ...) \ c 843 drivers/md/bcache/bcache.h if (bch_cache_set_error(c, __VA_ARGS__)) \ c 853 drivers/md/bcache/bcache.h #define cache_bug_on(cond, c, ...) \ c 856 drivers/md/bcache/bcache.h cache_bug(c, __VA_ARGS__); \ c 859 drivers/md/bcache/bcache.h #define cache_set_err_on(cond, c, ...) \ c 862 drivers/md/bcache/bcache.h bch_cache_set_error(c, __VA_ARGS__); \ c 909 drivers/md/bcache/bcache.h static inline void wake_up_allocators(struct cache_set *c) c 914 drivers/md/bcache/bcache.h for_each_cache(ca, c, i) c 918 drivers/md/bcache/bcache.h static inline void closure_bio_submit(struct cache_set *c, c 923 drivers/md/bcache/bcache.h if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) { c 950 drivers/md/bcache/bcache.h void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, c 952 drivers/md/bcache/bcache.h void bch_bbio_endio(struct cache_set *c, struct bio *bio, c 954 drivers/md/bcache/bcache.h void bch_bbio_free(struct bio *bio, struct cache_set *c); c 955 drivers/md/bcache/bcache.h struct bio *bch_bbio_alloc(struct cache_set *c); c 957 drivers/md/bcache/bcache.h void __bch_submit_bbio(struct bio *bio, struct cache_set *c); c 958 drivers/md/bcache/bcache.h void bch_submit_bbio(struct bio *bio, struct cache_set *c, c 962 drivers/md/bcache/bcache.h void bch_rescale_priorities(struct cache_set *c, int sectors); c 968 drivers/md/bcache/bcache.h void bch_bucket_free(struct cache_set *c, struct bkey *k); c 971 drivers/md/bcache/bcache.h int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, c 973 drivers/md/bcache/bcache.h int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, c 975 drivers/md/bcache/bcache.h bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, c 981 drivers/md/bcache/bcache.h bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...); c 1002 drivers/md/bcache/bcache.h int bch_uuid_write(struct cache_set *c); c 1003 drivers/md/bcache/bcache.h void bcache_write_super(struct cache_set *c); c 1005 drivers/md/bcache/bcache.h int bch_flash_dev_create(struct cache_set *c, uint64_t size); c 1007 drivers/md/bcache/bcache.h int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, c 1013 drivers/md/bcache/bcache.h void bch_cache_set_unregister(struct cache_set *c); c 1014 drivers/md/bcache/bcache.h void bch_cache_set_stop(struct cache_set *c); c 1017 drivers/md/bcache/bcache.h void bch_btree_cache_free(struct cache_set *c); c 1018 drivers/md/bcache/bcache.h int bch_btree_cache_alloc(struct cache_set *c); c 1019 drivers/md/bcache/bcache.h void bch_moving_init_cache_set(struct cache_set *c); c 1020 drivers/md/bcache/bcache.h int bch_open_buckets_alloc(struct cache_set *c); c 1021 drivers/md/bcache/bcache.h void bch_open_buckets_free(struct cache_set *c); c 99 drivers/md/bcache/btree.c #define PTR_HASH(c, k) \ c 100 drivers/md/bcache/btree.c (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) c 126 drivers/md/bcache/btree.c struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ c 142 drivers/md/bcache/btree.c #define btree_root(fn, c, op, ...) \ c 146 drivers/md/bcache/btree.c struct btree *_b = (c)->root; \ c 149 drivers/md/bcache/btree.c if (_b == (c)->root && \ c 154 drivers/md/bcache/btree.c bch_cannibalize_unlock(c); \ c 159 drivers/md/bcache/btree.c finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ c 165 drivers/md/bcache/btree.c return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); c 172 drivers/md/bcache/btree.c bch_btree_sort(&b->keys, &b->c->sort); c 174 drivers/md/bcache/btree.c bch_btree_sort_lazy(&b->keys, &b->c->sort); c 178 drivers/md/bcache/btree.c bset_magic(&b->c->sb)); c 184 drivers/md/bcache/btree.c void bkey_put(struct cache_set *c, struct bkey *k) c 189 drivers/md/bcache/btree.c if (ptr_available(c, k, i)) c 190 drivers/md/bcache/btree.c atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); c 215 drivers/md/bcache/btree.c iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); c 216 drivers/md/bcache/btree.c iter->size = b->c->sb.bucket_size / b->c->sb.block_size; c 234 drivers/md/bcache/btree.c if (b->written + set_blocks(i, block_bytes(b->c)) > c 239 drivers/md/bcache/btree.c if (i->magic != bset_magic(&b->c->sb)) c 260 drivers/md/bcache/btree.c b->written += set_blocks(i, block_bytes(b->c)); c 266 drivers/md/bcache/btree.c i = ((void *) i) + block_bytes(b->c)) c 270 drivers/md/bcache/btree.c bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); c 280 drivers/md/bcache/btree.c bset_magic(&b->c->sb)); c 282 drivers/md/bcache/btree.c mempool_free(iter, &b->c->fill_iter); c 286 drivers/md/bcache/btree.c bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", c 287 drivers/md/bcache/btree.c err, PTR_BUCKET_NR(b->c, &b->key, 0), c 309 drivers/md/bcache/btree.c bio = bch_bbio_alloc(b->c); c 317 drivers/md/bcache/btree.c bch_submit_bbio(bio, b->c, &b->key, 0); c 323 drivers/md/bcache/btree.c bch_bbio_free(bio, b->c); c 329 drivers/md/bcache/btree.c bch_time_stats_update(&b->c->btree_read_time, start_time); c 333 drivers/md/bcache/btree.c bch_cache_set_error(b->c, "io error reading bucket %zu", c 334 drivers/md/bcache/btree.c PTR_BUCKET_NR(b->c, &b->key, 0)); c 340 drivers/md/bcache/btree.c !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) c 341 drivers/md/bcache/btree.c wake_up_allocators(b->c); c 345 drivers/md/bcache/btree.c __closure_wake_up(&b->c->journal.wait); c 364 drivers/md/bcache/btree.c bch_bbio_free(b->bio, b->c); c 390 drivers/md/bcache/btree.c bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); c 404 drivers/md/bcache/btree.c b->bio = bch_bbio_alloc(b->c); c 408 drivers/md/bcache/btree.c b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); c 441 drivers/md/bcache/btree.c bch_submit_bbio(b->bio, b->c, &k.key, 0); c 452 drivers/md/bcache/btree.c bch_submit_bbio(b->bio, b->c, &k.key, 0); c 477 drivers/md/bcache/btree.c closure_init(&b->io, parent ?: &b->c->cl); c 484 drivers/md/bcache/btree.c atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size, c 485 drivers/md/bcache/btree.c &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); c 487 drivers/md/bcache/btree.c b->written += set_blocks(i, block_bytes(b->c)); c 548 drivers/md/bcache/btree.c journal_pin_cmp(b->c, w->journal, journal_ref)) { c 570 drivers/md/bcache/btree.c #define mca_reserve(c) (((c->root && c->root->level) \ c 571 drivers/md/bcache/btree.c ? c->root->level : 1) * 8 + 16) c 572 drivers/md/bcache/btree.c #define mca_can_free(c) \ c 573 drivers/md/bcache/btree.c max_t(int, 0, c->btree_cache_used - mca_reserve(c)) c 581 drivers/md/bcache/btree.c b->c->btree_cache_used--; c 582 drivers/md/bcache/btree.c list_move(&b->list, &b->c->btree_cache_freed); c 591 drivers/md/bcache/btree.c list_move(&b->list, &b->c->btree_cache_freeable); c 603 drivers/md/bcache/btree.c ilog2(b->c->btree_pages), c 606 drivers/md/bcache/btree.c b->c->btree_cache_used++; c 607 drivers/md/bcache/btree.c list_move(&b->list, &b->c->btree_cache); c 609 drivers/md/bcache/btree.c list_move(&b->list, &b->c->btree_cache_freed); c 613 drivers/md/bcache/btree.c static struct btree *mca_bucket_alloc(struct cache_set *c, c 631 drivers/md/bcache/btree.c b->c = c; c 643 drivers/md/bcache/btree.c lockdep_assert_held(&b->c->bucket_lock); c 700 drivers/md/bcache/btree.c struct cache_set *c = container_of(shrink, struct cache_set, shrink); c 706 drivers/md/bcache/btree.c if (c->shrinker_disabled) c 709 drivers/md/bcache/btree.c if (c->btree_cache_alloc_lock) c 714 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 715 drivers/md/bcache/btree.c else if (!mutex_trylock(&c->bucket_lock)) c 725 drivers/md/bcache/btree.c nr /= c->btree_pages; c 728 drivers/md/bcache/btree.c nr = min_t(unsigned long, nr, mca_can_free(c)); c 731 drivers/md/bcache/btree.c btree_cache_used = c->btree_cache_used; c 732 drivers/md/bcache/btree.c list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { c 746 drivers/md/bcache/btree.c if (list_empty(&c->btree_cache)) c 749 drivers/md/bcache/btree.c b = list_first_entry(&c->btree_cache, struct btree, list); c 750 drivers/md/bcache/btree.c list_rotate_left(&c->btree_cache); c 762 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 763 drivers/md/bcache/btree.c return freed * c->btree_pages; c 769 drivers/md/bcache/btree.c struct cache_set *c = container_of(shrink, struct cache_set, shrink); c 771 drivers/md/bcache/btree.c if (c->shrinker_disabled) c 774 drivers/md/bcache/btree.c if (c->btree_cache_alloc_lock) c 777 drivers/md/bcache/btree.c return mca_can_free(c) * c->btree_pages; c 780 drivers/md/bcache/btree.c void bch_btree_cache_free(struct cache_set *c) c 787 drivers/md/bcache/btree.c if (c->shrink.list.next) c 788 drivers/md/bcache/btree.c unregister_shrinker(&c->shrink); c 790 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 793 drivers/md/bcache/btree.c if (c->verify_data) c 794 drivers/md/bcache/btree.c list_move(&c->verify_data->list, &c->btree_cache); c 796 drivers/md/bcache/btree.c free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c))); c 799 drivers/md/bcache/btree.c list_splice(&c->btree_cache_freeable, c 800 drivers/md/bcache/btree.c &c->btree_cache); c 802 drivers/md/bcache/btree.c while (!list_empty(&c->btree_cache)) { c 803 drivers/md/bcache/btree.c b = list_first_entry(&c->btree_cache, struct btree, list); c 817 drivers/md/bcache/btree.c while (!list_empty(&c->btree_cache_freed)) { c 818 drivers/md/bcache/btree.c b = list_first_entry(&c->btree_cache_freed, c 825 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 828 drivers/md/bcache/btree.c int bch_btree_cache_alloc(struct cache_set *c) c 832 drivers/md/bcache/btree.c for (i = 0; i < mca_reserve(c); i++) c 833 drivers/md/bcache/btree.c if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) c 836 drivers/md/bcache/btree.c list_splice_init(&c->btree_cache, c 837 drivers/md/bcache/btree.c &c->btree_cache_freeable); c 840 drivers/md/bcache/btree.c mutex_init(&c->verify_lock); c 842 drivers/md/bcache/btree.c c->verify_ondisk = (void *) c 843 drivers/md/bcache/btree.c __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); c 845 drivers/md/bcache/btree.c c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); c 847 drivers/md/bcache/btree.c if (c->verify_data && c 848 drivers/md/bcache/btree.c c->verify_data->keys.set->data) c 849 drivers/md/bcache/btree.c list_del_init(&c->verify_data->list); c 851 drivers/md/bcache/btree.c c->verify_data = NULL; c 854 drivers/md/bcache/btree.c c->shrink.count_objects = bch_mca_count; c 855 drivers/md/bcache/btree.c c->shrink.scan_objects = bch_mca_scan; c 856 drivers/md/bcache/btree.c c->shrink.seeks = 4; c 857 drivers/md/bcache/btree.c c->shrink.batch = c->btree_pages * 2; c 859 drivers/md/bcache/btree.c if (register_shrinker(&c->shrink)) c 868 drivers/md/bcache/btree.c static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) c 870 drivers/md/bcache/btree.c return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; c 873 drivers/md/bcache/btree.c static struct btree *mca_find(struct cache_set *c, struct bkey *k) c 878 drivers/md/bcache/btree.c hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) c 879 drivers/md/bcache/btree.c if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) c 887 drivers/md/bcache/btree.c static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) c 891 drivers/md/bcache/btree.c old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); c 894 drivers/md/bcache/btree.c prepare_to_wait(&c->btree_cache_wait, &op->wait, c 902 drivers/md/bcache/btree.c static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, c 907 drivers/md/bcache/btree.c trace_bcache_btree_cache_cannibalize(c); c 909 drivers/md/bcache/btree.c if (mca_cannibalize_lock(c, op)) c 912 drivers/md/bcache/btree.c list_for_each_entry_reverse(b, &c->btree_cache, list) c 916 drivers/md/bcache/btree.c list_for_each_entry_reverse(b, &c->btree_cache, list) c 930 drivers/md/bcache/btree.c static void bch_cannibalize_unlock(struct cache_set *c) c 932 drivers/md/bcache/btree.c if (c->btree_cache_alloc_lock == current) { c 933 drivers/md/bcache/btree.c c->btree_cache_alloc_lock = NULL; c 934 drivers/md/bcache/btree.c wake_up(&c->btree_cache_wait); c 938 drivers/md/bcache/btree.c static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, c 945 drivers/md/bcache/btree.c lockdep_assert_held(&c->bucket_lock); c 947 drivers/md/bcache/btree.c if (mca_find(c, k)) c 953 drivers/md/bcache/btree.c list_for_each_entry(b, &c->btree_cache_freeable, list) c 960 drivers/md/bcache/btree.c list_for_each_entry(b, &c->btree_cache_freed, list) c 969 drivers/md/bcache/btree.c b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); c 980 drivers/md/bcache/btree.c list_move(&b->list, &c->btree_cache); c 982 drivers/md/bcache/btree.c hlist_add_head_rcu(&b->hash, mca_hash(c, k)); c 992 drivers/md/bcache/btree.c &b->c->expensive_debug_checks); c 995 drivers/md/bcache/btree.c &b->c->expensive_debug_checks); c 1002 drivers/md/bcache/btree.c b = mca_cannibalize(c, op, k); c 1018 drivers/md/bcache/btree.c struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, c 1027 drivers/md/bcache/btree.c b = mca_find(c, k); c 1033 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 1034 drivers/md/bcache/btree.c b = mca_alloc(c, op, k, level); c 1035 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 1048 drivers/md/bcache/btree.c if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { c 1080 drivers/md/bcache/btree.c mutex_lock(&parent->c->bucket_lock); c 1081 drivers/md/bcache/btree.c b = mca_alloc(parent->c, NULL, k, parent->level - 1); c 1082 drivers/md/bcache/btree.c mutex_unlock(&parent->c->bucket_lock); c 1097 drivers/md/bcache/btree.c BUG_ON(b == b->c->root); c 1123 drivers/md/bcache/btree.c mutex_lock(&b->c->bucket_lock); c 1124 drivers/md/bcache/btree.c bch_bucket_free(b->c, &b->key); c 1126 drivers/md/bcache/btree.c mutex_unlock(&b->c->bucket_lock); c 1129 drivers/md/bcache/btree.c struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, c 1136 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 1138 drivers/md/bcache/btree.c if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) c 1141 drivers/md/bcache/btree.c bkey_put(c, &k.key); c 1142 drivers/md/bcache/btree.c SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); c 1144 drivers/md/bcache/btree.c b = mca_alloc(c, op, &k.key, level); c 1149 drivers/md/bcache/btree.c cache_bug(c, c 1156 drivers/md/bcache/btree.c bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); c 1158 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 1163 drivers/md/bcache/btree.c bch_bucket_free(c, &k.key); c 1165 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 1167 drivers/md/bcache/btree.c trace_bcache_btree_node_alloc_fail(c); c 1171 drivers/md/bcache/btree.c static struct btree *bch_btree_node_alloc(struct cache_set *c, c 1175 drivers/md/bcache/btree.c return __bch_btree_node_alloc(c, op, level, op != NULL, parent); c 1181 drivers/md/bcache/btree.c struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); c 1185 drivers/md/bcache/btree.c bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); c 1197 drivers/md/bcache/btree.c mutex_lock(&b->c->bucket_lock); c 1199 drivers/md/bcache/btree.c atomic_inc(&b->c->prio_blocked); c 1206 drivers/md/bcache/btree.c bch_inc_gen(PTR_CACHE(b->c, &b->key, i), c 1207 drivers/md/bcache/btree.c PTR_BUCKET(b->c, &b->key, i))); c 1209 drivers/md/bcache/btree.c mutex_unlock(&b->c->bucket_lock); c 1214 drivers/md/bcache/btree.c struct cache_set *c = b->c; c 1216 drivers/md/bcache/btree.c unsigned int i, reserve = (c->root->level - b->level) * 2 + 1; c 1218 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 1220 drivers/md/bcache/btree.c for_each_cache(ca, c, i) c 1223 drivers/md/bcache/btree.c prepare_to_wait(&c->btree_cache_wait, &op->wait, c 1225 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 1229 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 1231 drivers/md/bcache/btree.c return mca_cannibalize_lock(b->c, op); c 1236 drivers/md/bcache/btree.c static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, c 1252 drivers/md/bcache/btree.c if (!ptr_available(c, k, i)) c 1255 drivers/md/bcache/btree.c g = PTR_BUCKET(c, k, i); c 1260 drivers/md/bcache/btree.c if (ptr_stale(c, k, i)) { c 1261 drivers/md/bcache/btree.c stale = max(stale, ptr_stale(c, k, i)); c 1267 drivers/md/bcache/btree.c c, "inconsistent ptrs: mark = %llu, level = %i", c 1288 drivers/md/bcache/btree.c #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) c 1290 drivers/md/bcache/btree.c void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) c 1295 drivers/md/bcache/btree.c if (ptr_available(c, k, i) && c 1296 drivers/md/bcache/btree.c !ptr_stale(c, k, i)) { c 1297 drivers/md/bcache/btree.c struct bucket *b = PTR_BUCKET(c, k, i); c 1307 drivers/md/bcache/btree.c __bch_btree_mark_key(c, level, k); c 1310 drivers/md/bcache/btree.c void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) c 1312 drivers/md/bcache/btree.c stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; c 1345 drivers/md/bcache/btree.c if (b->c->gc_always_rewrite) c 1389 drivers/md/bcache/btree.c blocks = btree_default_blocks(b->c) * 2 / 3; c 1393 drivers/md/bcache/btree.c block_bytes(b->c)) > blocks * (nodes - 1)) c 1427 drivers/md/bcache/btree.c block_bytes(b->c)) > blocks) c 1443 drivers/md/bcache/btree.c block_bytes(b->c)) > c 1452 drivers/md/bcache/btree.c BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) > c 1525 drivers/md/bcache/btree.c atomic_dec(&b->c->prio_blocked); c 1584 drivers/md/bcache/btree.c static size_t btree_gc_min_nodes(struct cache_set *c) c 1602 drivers/md/bcache/btree.c min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; c 1620 drivers/md/bcache/btree.c bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); c 1628 drivers/md/bcache/btree.c r->b = bch_btree_node_get(b->c, op, k, b->level - 1, c 1659 drivers/md/bcache/btree.c bkey_copy_key(&b->c->gc_done, &last->b->key); c 1675 drivers/md/bcache/btree.c if (atomic_read(&b->c->search_inflight) && c 1676 drivers/md/bcache/btree.c gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { c 1722 drivers/md/bcache/btree.c __bch_btree_mark_key(b->c, b->level + 1, &b->key); c 1730 drivers/md/bcache/btree.c bkey_copy_key(&b->c->gc_done, &b->key); c 1735 drivers/md/bcache/btree.c static void btree_gc_start(struct cache_set *c) c 1741 drivers/md/bcache/btree.c if (!c->gc_mark_valid) c 1744 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 1746 drivers/md/bcache/btree.c c->gc_mark_valid = 0; c 1747 drivers/md/bcache/btree.c c->gc_done = ZERO_KEY; c 1749 drivers/md/bcache/btree.c for_each_cache(ca, c, i) c 1758 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 1761 drivers/md/bcache/btree.c static void bch_btree_gc_finish(struct cache_set *c) c 1767 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 1769 drivers/md/bcache/btree.c set_gc_sectors(c); c 1770 drivers/md/bcache/btree.c c->gc_mark_valid = 1; c 1771 drivers/md/bcache/btree.c c->need_gc = 0; c 1773 drivers/md/bcache/btree.c for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) c 1774 drivers/md/bcache/btree.c SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), c 1779 drivers/md/bcache/btree.c for (i = 0; i < c->devices_max_used; i++) { c 1780 drivers/md/bcache/btree.c struct bcache_device *d = c->devices[i]; c 1785 drivers/md/bcache/btree.c if (!d || UUID_FLASH_ONLY(&c->uuids[i])) c 1793 drivers/md/bcache/btree.c SET_GC_MARK(PTR_BUCKET(c, &w->key, j), c 1799 drivers/md/bcache/btree.c c->avail_nbuckets = 0; c 1800 drivers/md/bcache/btree.c for_each_cache(ca, c, i) { c 1813 drivers/md/bcache/btree.c c->need_gc = max(c->need_gc, bucket_gc_gen(b)); c 1821 drivers/md/bcache/btree.c c->avail_nbuckets++; c 1825 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 1828 drivers/md/bcache/btree.c static void bch_btree_gc(struct cache_set *c) c 1836 drivers/md/bcache/btree.c trace_bcache_gc_start(c); c 1842 drivers/md/bcache/btree.c btree_gc_start(c); c 1846 drivers/md/bcache/btree.c ret = btree_root(gc_root, c, &op, &writes, &stats); c 1855 drivers/md/bcache/btree.c } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); c 1857 drivers/md/bcache/btree.c bch_btree_gc_finish(c); c 1858 drivers/md/bcache/btree.c wake_up_allocators(c); c 1860 drivers/md/bcache/btree.c bch_time_stats_update(&c->btree_gc_time, start_time); c 1864 drivers/md/bcache/btree.c bch_update_bucket_in_use(c, &stats); c 1865 drivers/md/bcache/btree.c memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); c 1867 drivers/md/bcache/btree.c trace_bcache_gc_end(c); c 1869 drivers/md/bcache/btree.c bch_moving_gc(c); c 1872 drivers/md/bcache/btree.c static bool gc_should_run(struct cache_set *c) c 1877 drivers/md/bcache/btree.c for_each_cache(ca, c, i) c 1881 drivers/md/bcache/btree.c if (atomic_read(&c->sectors_to_gc) < 0) c 1889 drivers/md/bcache/btree.c struct cache_set *c = arg; c 1892 drivers/md/bcache/btree.c wait_event_interruptible(c->gc_wait, c 1894 drivers/md/bcache/btree.c test_bit(CACHE_SET_IO_DISABLE, &c->flags) || c 1895 drivers/md/bcache/btree.c gc_should_run(c)); c 1898 drivers/md/bcache/btree.c test_bit(CACHE_SET_IO_DISABLE, &c->flags)) c 1901 drivers/md/bcache/btree.c set_gc_sectors(c); c 1902 drivers/md/bcache/btree.c bch_btree_gc(c); c 1909 drivers/md/bcache/btree.c int bch_gc_thread_start(struct cache_set *c) c 1911 drivers/md/bcache/btree.c c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); c 1912 drivers/md/bcache/btree.c return PTR_ERR_OR_ZERO(c->gc_thread); c 1924 drivers/md/bcache/btree.c bch_initial_mark_key(b->c, b->level, k); c 1926 drivers/md/bcache/btree.c bch_initial_mark_key(b->c, b->level + 1, &b->key); c 1940 drivers/md/bcache/btree.c b->c->gc_stats.nodes++; c 1953 drivers/md/bcache/btree.c int bch_btree_check(struct cache_set *c) c 1959 drivers/md/bcache/btree.c return btree_root(check_recurse, c, &op); c 1962 drivers/md/bcache/btree.c void bch_initial_gc_finish(struct cache_set *c) c 1968 drivers/md/bcache/btree.c bch_btree_gc_finish(c); c 1970 drivers/md/bcache/btree.c mutex_lock(&c->bucket_lock); c 1981 drivers/md/bcache/btree.c for_each_cache(ca, c, i) { c 1998 drivers/md/bcache/btree.c mutex_unlock(&c->bucket_lock); c 2050 drivers/md/bcache/btree.c bkey_put(b->c, k); c 2102 drivers/md/bcache/btree.c block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; c 2109 drivers/md/bcache/btree.c n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); c 2114 drivers/md/bcache/btree.c n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); c 2189 drivers/md/bcache/btree.c bch_time_stats_update(&b->c->btree_split_time, start_time); c 2193 drivers/md/bcache/btree.c bkey_put(b->c, &n2->key); c 2197 drivers/md/bcache/btree.c bkey_put(b->c, &n1->key); c 2250 drivers/md/bcache/btree.c op->lock = b->c->root->level + 1; c 2252 drivers/md/bcache/btree.c } else if (op->lock <= b->c->root->level) { c 2253 drivers/md/bcache/btree.c op->lock = b->c->root->level + 1; c 2325 drivers/md/bcache/btree.c int bch_btree_insert(struct cache_set *c, struct keylist *keys, c 2341 drivers/md/bcache/btree.c ret = bch_btree_map_leaf_nodes(&op.op, c, c 2352 drivers/md/bcache/btree.c bkey_put(c, k); c 2371 drivers/md/bcache/btree.c BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); c 2373 drivers/md/bcache/btree.c mutex_lock(&b->c->bucket_lock); c 2375 drivers/md/bcache/btree.c mutex_unlock(&b->c->bucket_lock); c 2377 drivers/md/bcache/btree.c b->c->root = b; c 2379 drivers/md/bcache/btree.c bch_journal_meta(b->c, &cl); c 2414 drivers/md/bcache/btree.c int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, c 2417 drivers/md/bcache/btree.c return btree_root(map_nodes_recurse, c, op, from, fn, flags); c 2447 drivers/md/bcache/btree.c int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, c 2450 drivers/md/bcache/btree.c return btree_root(map_keys_recurse, c, op, from, fn, flags); c 2523 drivers/md/bcache/btree.c void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, c 2537 drivers/md/bcache/btree.c bch_btree_map_keys(&refill.op, c, &buf->last_scanned, c 2623 drivers/md/bcache/btree.c struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, c 2640 drivers/md/bcache/btree.c bch_refill_keybuf(c, buf, end, pred); c 128 drivers/md/bcache/btree.h struct cache_set *c; c 191 drivers/md/bcache/btree.h return bset_sector_offset(&b->keys, i) >> b->c->block_bits; c 194 drivers/md/bcache/btree.h static inline void set_gc_sectors(struct cache_set *c) c 196 drivers/md/bcache/btree.h atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); c 199 drivers/md/bcache/btree.h void bkey_put(struct cache_set *c, struct bkey *k); c 203 drivers/md/bcache/btree.h #define for_each_cached_btree(b, c, iter) \ c 205 drivers/md/bcache/btree.h iter < ARRAY_SIZE((c)->bucket_hash); \ c 207 drivers/md/bcache/btree.h hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash) c 248 drivers/md/bcache/btree.h struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, c 251 drivers/md/bcache/btree.h struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, c 257 drivers/md/bcache/btree.h int bch_btree_insert(struct cache_set *c, struct keylist *keys, c 260 drivers/md/bcache/btree.h int bch_gc_thread_start(struct cache_set *c); c 261 drivers/md/bcache/btree.h void bch_initial_gc_finish(struct cache_set *c); c 262 drivers/md/bcache/btree.h void bch_moving_gc(struct cache_set *c); c 263 drivers/md/bcache/btree.h int bch_btree_check(struct cache_set *c); c 264 drivers/md/bcache/btree.h void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k); c 266 drivers/md/bcache/btree.h static inline void wake_up_gc(struct cache_set *c) c 268 drivers/md/bcache/btree.h wake_up(&c->gc_wait); c 271 drivers/md/bcache/btree.h static inline void force_wake_up_gc(struct cache_set *c) c 285 drivers/md/bcache/btree.h atomic_set(&c->sectors_to_gc, -1); c 286 drivers/md/bcache/btree.h wake_up_gc(c); c 298 drivers/md/bcache/btree.h int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, c 301 drivers/md/bcache/btree.h static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, c 304 drivers/md/bcache/btree.h return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES); c 308 drivers/md/bcache/btree.h struct cache_set *c, c 312 drivers/md/bcache/btree.h return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); c 317 drivers/md/bcache/btree.h int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, c 323 drivers/md/bcache/btree.h void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, c 329 drivers/md/bcache/btree.h struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, c 333 drivers/md/bcache/btree.h void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats); c 28 drivers/md/bcache/debug.c i = (void *) i + set_blocks(i, block_bytes(b->c)) * \ c 29 drivers/md/bcache/debug.c block_bytes(b->c)) c 33 drivers/md/bcache/debug.c struct btree *v = b->c->verify_data; c 37 drivers/md/bcache/debug.c if (!b->c->verify || !b->c->verify_ondisk) c 41 drivers/md/bcache/debug.c mutex_lock(&b->c->verify_lock); c 43 drivers/md/bcache/debug.c ondisk = b->c->verify_ondisk; c 44 drivers/md/bcache/debug.c sorted = b->c->verify_data->keys.set->data; c 52 drivers/md/bcache/debug.c bio = bch_bbio_alloc(b->c); c 53 drivers/md/bcache/debug.c bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev); c 60 drivers/md/bcache/debug.c bch_bbio_free(bio, b->c); c 85 drivers/md/bcache/debug.c block_bytes(b->c); c 92 drivers/md/bcache/debug.c ((void *) i - (void *) ondisk) / block_bytes(b->c)); c 104 drivers/md/bcache/debug.c mutex_unlock(&b->c->verify_lock); c 139 drivers/md/bcache/debug.c dc->disk.c, c 162 drivers/md/bcache/debug.c struct cache_set *c; c 194 drivers/md/bcache/debug.c w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred); c 208 drivers/md/bcache/debug.c struct cache_set *c = inode->i_private; c 216 drivers/md/bcache/debug.c i->c = c; c 236 drivers/md/bcache/debug.c void bch_debug_init_cache_set(struct cache_set *c) c 241 drivers/md/bcache/debug.c snprintf(name, 50, "bcache-%pU", c->sb.set_uuid); c 242 drivers/md/bcache/debug.c c->debug = debugfs_create_file(name, 0400, bcache_debug, c, c 14 drivers/md/bcache/debug.h #define expensive_debug_checks(c) ((c)->expensive_debug_checks) c 15 drivers/md/bcache/debug.h #define key_merging_disabled(c) ((c)->key_merging_disabled) c 23 drivers/md/bcache/debug.h #define expensive_debug_checks(c) 0 c 24 drivers/md/bcache/debug.h #define key_merging_disabled(c) 0 c 30 drivers/md/bcache/debug.h void bch_debug_init_cache_set(struct cache_set *c); c 32 drivers/md/bcache/debug.h static inline void bch_debug_init_cache_set(struct cache_set *c) {} c 42 drivers/md/bcache/extents.c int64_t c = bkey_cmp(l.k, r.k); c 44 drivers/md/bcache/extents.c return c ? c > 0 : l.k < r.k; c 47 drivers/md/bcache/extents.c static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) c 52 drivers/md/bcache/extents.c if (ptr_available(c, k, i)) { c 53 drivers/md/bcache/extents.c struct cache *ca = PTR_CACHE(c, k, i); c 54 drivers/md/bcache/extents.c size_t bucket = PTR_BUCKET_NR(c, k, i); c 55 drivers/md/bcache/extents.c size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); c 57 drivers/md/bcache/extents.c if (KEY_SIZE(k) + r > c->sb.bucket_size || c 68 drivers/md/bcache/extents.c static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) c 73 drivers/md/bcache/extents.c if (ptr_available(c, k, i)) { c 74 drivers/md/bcache/extents.c struct cache *ca = PTR_CACHE(c, k, i); c 75 drivers/md/bcache/extents.c size_t bucket = PTR_BUCKET_NR(c, k, i); c 76 drivers/md/bcache/extents.c size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); c 78 drivers/md/bcache/extents.c if (KEY_SIZE(k) + r > c->sb.bucket_size) c 84 drivers/md/bcache/extents.c if (ptr_stale(c, k, i)) c 136 drivers/md/bcache/extents.c size_t n = PTR_BUCKET_NR(b->c, k, j); c 139 drivers/md/bcache/extents.c if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) c 141 drivers/md/bcache/extents.c PTR_BUCKET(b->c, k, j)->prio); c 144 drivers/md/bcache/extents.c pr_err(" %s\n", bch_ptr_status(b->c, k)); c 149 drivers/md/bcache/extents.c bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) c 156 drivers/md/bcache/extents.c if (__ptr_invalid(c, k)) c 162 drivers/md/bcache/extents.c cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); c 170 drivers/md/bcache/extents.c return __bch_btree_ptr_invalid(b->c, k); c 179 drivers/md/bcache/extents.c if (mutex_trylock(&b->c->bucket_lock)) { c 181 drivers/md/bcache/extents.c if (ptr_available(b->c, k, i)) { c 182 drivers/md/bcache/extents.c g = PTR_BUCKET(b->c, k, i); c 186 drivers/md/bcache/extents.c (b->c->gc_mark_valid && c 191 drivers/md/bcache/extents.c mutex_unlock(&b->c->bucket_lock); c 196 drivers/md/bcache/extents.c mutex_unlock(&b->c->bucket_lock); c 200 drivers/md/bcache/extents.c buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), c 216 drivers/md/bcache/extents.c if (!ptr_available(b->c, k, i) || c 217 drivers/md/bcache/extents.c ptr_stale(b->c, k, i)) c 220 drivers/md/bcache/extents.c if (expensive_debug_checks(b->c) && c 261 drivers/md/bcache/extents.c int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); c 263 drivers/md/bcache/extents.c return c ? c > 0 : l.k < r.k; c 314 drivers/md/bcache/extents.c struct cache_set *c, c 319 drivers/md/bcache/extents.c bcache_dev_sectors_dirty_add(c, KEY_INODE(k), c 328 drivers/md/bcache/extents.c struct cache_set *c = container_of(b, struct btree, keys)->c; c 407 drivers/md/bcache/extents.c bch_subtract_dirty(k, c, KEY_START(insert), c 458 drivers/md/bcache/extents.c bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); c 473 drivers/md/bcache/extents.c bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), c 480 drivers/md/bcache/extents.c bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) c 490 drivers/md/bcache/extents.c if (__ptr_invalid(c, k)) c 496 drivers/md/bcache/extents.c cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); c 504 drivers/md/bcache/extents.c return __bch_extent_invalid(b->c, k); c 510 drivers/md/bcache/extents.c struct bucket *g = PTR_BUCKET(b->c, k, ptr); c 513 drivers/md/bcache/extents.c if (mutex_trylock(&b->c->bucket_lock)) { c 514 drivers/md/bcache/extents.c if (b->c->gc_mark_valid && c 523 drivers/md/bcache/extents.c mutex_unlock(&b->c->bucket_lock); c 528 drivers/md/bcache/extents.c mutex_unlock(&b->c->bucket_lock); c 532 drivers/md/bcache/extents.c buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), c 548 drivers/md/bcache/extents.c if (!ptr_available(b->c, k, i)) c 552 drivers/md/bcache/extents.c stale = ptr_stale(b->c, k, i); c 562 drivers/md/bcache/extents.c stale, b->c->need_gc); c 567 drivers/md/bcache/extents.c if (expensive_debug_checks(b->c) && c 588 drivers/md/bcache/extents.c if (key_merging_disabled(b->c)) c 593 drivers/md/bcache/extents.c PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) c 12 drivers/md/bcache/extents.h bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k); c 13 drivers/md/bcache/extents.h bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k); c 17 drivers/md/bcache/io.c void bch_bbio_free(struct bio *bio, struct cache_set *c) c 21 drivers/md/bcache/io.c mempool_free(b, &c->bio_meta); c 24 drivers/md/bcache/io.c struct bio *bch_bbio_alloc(struct cache_set *c) c 26 drivers/md/bcache/io.c struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); c 29 drivers/md/bcache/io.c bio_init(bio, bio->bi_inline_vecs, bucket_pages(c)); c 34 drivers/md/bcache/io.c void __bch_submit_bbio(struct bio *bio, struct cache_set *c) c 39 drivers/md/bcache/io.c bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); c 42 drivers/md/bcache/io.c closure_bio_submit(c, bio, bio->bi_private); c 45 drivers/md/bcache/io.c void bch_submit_bbio(struct bio *bio, struct cache_set *c, c 51 drivers/md/bcache/io.c __bch_submit_bbio(bio, c); c 136 drivers/md/bcache/io.c void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, c 140 drivers/md/bcache/io.c struct cache *ca = PTR_CACHE(c, &b->key, 0); c 144 drivers/md/bcache/io.c ? c->congested_write_threshold_us c 145 drivers/md/bcache/io.c : c->congested_read_threshold_us; c 150 drivers/md/bcache/io.c int congested = atomic_read(&c->congested); c 155 drivers/md/bcache/io.c c->congested_last_us = t; c 158 drivers/md/bcache/io.c atomic_sub(ms, &c->congested); c 160 drivers/md/bcache/io.c atomic_inc(&c->congested); c 166 drivers/md/bcache/io.c void bch_bbio_endio(struct cache_set *c, struct bio *bio, c 171 drivers/md/bcache/io.c bch_bbio_count_io_errors(c, bio, error, m); c 171 drivers/md/bcache/journal.c int bch_journal_read(struct cache_set *c, struct list_head *list) c 186 drivers/md/bcache/journal.c for_each_cache(ca, c, iter) { c 292 drivers/md/bcache/journal.c c->journal.seq = list_entry(list->prev, c 300 drivers/md/bcache/journal.c void bch_journal_mark(struct cache_set *c, struct list_head *list) c 305 drivers/md/bcache/journal.c struct journal *j = &c->journal; c 334 drivers/md/bcache/journal.c if (!__bch_extent_invalid(c, k)) { c 338 drivers/md/bcache/journal.c if (ptr_available(c, k, j)) c 339 drivers/md/bcache/journal.c atomic_inc(&PTR_BUCKET(c, k, j)->pin); c 341 drivers/md/bcache/journal.c bch_initial_mark_key(c, 0, k); c 422 drivers/md/bcache/journal.c static void btree_flush_write(struct cache_set *c) c 430 drivers/md/bcache/journal.c if (c->journal.btree_flushing) c 433 drivers/md/bcache/journal.c spin_lock(&c->journal.flush_write_lock); c 434 drivers/md/bcache/journal.c if (c->journal.btree_flushing) { c 435 drivers/md/bcache/journal.c spin_unlock(&c->journal.flush_write_lock); c 438 drivers/md/bcache/journal.c c->journal.btree_flushing = true; c 439 drivers/md/bcache/journal.c spin_unlock(&c->journal.flush_write_lock); c 442 drivers/md/bcache/journal.c spin_lock(&c->journal.lock); c 443 drivers/md/bcache/journal.c fifo_front_p = &fifo_front(&c->journal.pin); c 450 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 453 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 455 drivers/md/bcache/journal.c mask = c->journal.pin.mask; c 457 drivers/md/bcache/journal.c atomic_long_inc(&c->flush_write); c 460 drivers/md/bcache/journal.c mutex_lock(&c->bucket_lock); c 461 drivers/md/bcache/journal.c list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { c 468 drivers/md/bcache/journal.c now_fifo_front_p = &fifo_front(&c->journal.pin); c 535 drivers/md/bcache/journal.c mutex_unlock(&c->bucket_lock); c 571 drivers/md/bcache/journal.c spin_lock(&c->journal.flush_write_lock); c 572 drivers/md/bcache/journal.c c->journal.btree_flushing = false; c 573 drivers/md/bcache/journal.c spin_unlock(&c->journal.flush_write_lock); c 639 drivers/md/bcache/journal.c static void journal_reclaim(struct cache_set *c) c 641 drivers/md/bcache/journal.c struct bkey *k = &c->journal.key; c 647 drivers/md/bcache/journal.c atomic_long_inc(&c->reclaim); c 649 drivers/md/bcache/journal.c while (!atomic_read(&fifo_front(&c->journal.pin))) c 650 drivers/md/bcache/journal.c fifo_pop(&c->journal.pin, p); c 652 drivers/md/bcache/journal.c last_seq = last_seq(&c->journal); c 656 drivers/md/bcache/journal.c for_each_cache(ca, c, iter) { c 665 drivers/md/bcache/journal.c for_each_cache(ca, c, iter) c 668 drivers/md/bcache/journal.c if (c->journal.blocks_free) c 676 drivers/md/bcache/journal.c for_each_cache(ca, c, iter) { c 686 drivers/md/bcache/journal.c bucket_to_sector(c, ca->sb.d[ja->cur_idx]), c 688 drivers/md/bcache/journal.c atomic_long_inc(&c->reclaimed_journal_buckets); c 694 drivers/md/bcache/journal.c c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; c 697 drivers/md/bcache/journal.c if (!journal_full(&c->journal)) c 698 drivers/md/bcache/journal.c __closure_wake_up(&c->journal.wait); c 729 drivers/md/bcache/journal.c cache_set_err_on(bio->bi_status, w->c, "journal io error"); c 730 drivers/md/bcache/journal.c closure_put(&w->c->journal.io); c 747 drivers/md/bcache/journal.c __releases(&c->journal.lock) c 749 drivers/md/bcache/journal.c struct cache_set *c = container_of(cl, struct cache_set, journal.io); c 751 drivers/md/bcache/journal.c c->journal.io_in_flight = 0; c 752 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 756 drivers/md/bcache/journal.c __releases(c->journal.lock) c 758 drivers/md/bcache/journal.c struct cache_set *c = container_of(cl, struct cache_set, journal.io); c 760 drivers/md/bcache/journal.c struct journal_write *w = c->journal.cur; c 761 drivers/md/bcache/journal.c struct bkey *k = &c->journal.key; c 762 drivers/md/bcache/journal.c unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) * c 763 drivers/md/bcache/journal.c c->sb.block_size; c 773 drivers/md/bcache/journal.c } else if (journal_full(&c->journal)) { c 774 drivers/md/bcache/journal.c journal_reclaim(c); c 775 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 777 drivers/md/bcache/journal.c btree_flush_write(c); c 782 drivers/md/bcache/journal.c c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); c 784 drivers/md/bcache/journal.c w->data->btree_level = c->root->level; c 786 drivers/md/bcache/journal.c bkey_copy(&w->data->btree_root, &c->root->key); c 787 drivers/md/bcache/journal.c bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); c 789 drivers/md/bcache/journal.c for_each_cache(ca, c, i) c 792 drivers/md/bcache/journal.c w->data->magic = jset_magic(&c->sb); c 794 drivers/md/bcache/journal.c w->data->last_seq = last_seq(&c->journal); c 798 drivers/md/bcache/journal.c ca = PTR_CACHE(c, k, i); c 825 drivers/md/bcache/journal.c atomic_dec_bug(&fifo_back(&c->journal.pin)); c 826 drivers/md/bcache/journal.c bch_journal_next(&c->journal); c 827 drivers/md/bcache/journal.c journal_reclaim(c); c 829 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 832 drivers/md/bcache/journal.c closure_bio_submit(c, bio, cl); c 839 drivers/md/bcache/journal.c struct cache_set *c = container_of(cl, struct cache_set, journal.io); c 841 drivers/md/bcache/journal.c spin_lock(&c->journal.lock); c 845 drivers/md/bcache/journal.c static void journal_try_write(struct cache_set *c) c 846 drivers/md/bcache/journal.c __releases(c->journal.lock) c 848 drivers/md/bcache/journal.c struct closure *cl = &c->journal.io; c 849 drivers/md/bcache/journal.c struct journal_write *w = c->journal.cur; c 853 drivers/md/bcache/journal.c if (!c->journal.io_in_flight) { c 854 drivers/md/bcache/journal.c c->journal.io_in_flight = 1; c 855 drivers/md/bcache/journal.c closure_call(cl, journal_write_unlocked, NULL, &c->cl); c 857 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 861 drivers/md/bcache/journal.c static struct journal_write *journal_wait_for_write(struct cache_set *c, c 863 drivers/md/bcache/journal.c __acquires(&c->journal.lock) c 871 drivers/md/bcache/journal.c spin_lock(&c->journal.lock); c 874 drivers/md/bcache/journal.c struct journal_write *w = c->journal.cur; c 877 drivers/md/bcache/journal.c block_bytes(c)) * c->sb.block_size; c 880 drivers/md/bcache/journal.c c->journal.blocks_free * c->sb.block_size, c 885 drivers/md/bcache/journal.c closure_wait(&c->journal.wait, &cl); c 887 drivers/md/bcache/journal.c if (!journal_full(&c->journal)) { c 889 drivers/md/bcache/journal.c trace_bcache_journal_entry_full(c); c 899 drivers/md/bcache/journal.c journal_try_write(c); /* unlocks */ c 902 drivers/md/bcache/journal.c trace_bcache_journal_full(c); c 904 drivers/md/bcache/journal.c journal_reclaim(c); c 905 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 907 drivers/md/bcache/journal.c btree_flush_write(c); c 911 drivers/md/bcache/journal.c spin_lock(&c->journal.lock); c 918 drivers/md/bcache/journal.c struct cache_set *c = container_of(to_delayed_work(work), c 921 drivers/md/bcache/journal.c spin_lock(&c->journal.lock); c 922 drivers/md/bcache/journal.c if (c->journal.cur->dirty) c 923 drivers/md/bcache/journal.c journal_try_write(c); c 925 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 934 drivers/md/bcache/journal.c atomic_t *bch_journal(struct cache_set *c, c 942 drivers/md/bcache/journal.c if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) c 945 drivers/md/bcache/journal.c if (!CACHE_SYNC(&c->sb)) c 948 drivers/md/bcache/journal.c w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); c 953 drivers/md/bcache/journal.c ret = &fifo_back(&c->journal.pin); c 958 drivers/md/bcache/journal.c journal_try_write(c); c 961 drivers/md/bcache/journal.c schedule_delayed_work(&c->journal.work, c 962 drivers/md/bcache/journal.c msecs_to_jiffies(c->journal_delay_ms)); c 963 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 965 drivers/md/bcache/journal.c spin_unlock(&c->journal.lock); c 972 drivers/md/bcache/journal.c void bch_journal_meta(struct cache_set *c, struct closure *cl) c 979 drivers/md/bcache/journal.c ref = bch_journal(c, &keys, cl); c 984 drivers/md/bcache/journal.c void bch_journal_free(struct cache_set *c) c 986 drivers/md/bcache/journal.c free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); c 987 drivers/md/bcache/journal.c free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); c 988 drivers/md/bcache/journal.c free_fifo(&c->journal.pin); c 991 drivers/md/bcache/journal.c int bch_journal_alloc(struct cache_set *c) c 993 drivers/md/bcache/journal.c struct journal *j = &c->journal; c 999 drivers/md/bcache/journal.c c->journal_delay_ms = 100; c 1001 drivers/md/bcache/journal.c j->w[0].c = c; c 1002 drivers/md/bcache/journal.c j->w[1].c = c; c 97 drivers/md/bcache/journal.h struct cache_set *c; c 161 drivers/md/bcache/journal.h #define journal_pin_cmp(c, l, r) \ c 162 drivers/md/bcache/journal.h (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r))) c 174 drivers/md/bcache/journal.h atomic_t *bch_journal(struct cache_set *c, c 178 drivers/md/bcache/journal.h void bch_journal_mark(struct cache_set *c, struct list_head *list); c 179 drivers/md/bcache/journal.h void bch_journal_meta(struct cache_set *c, struct closure *cl); c 180 drivers/md/bcache/journal.h int bch_journal_read(struct cache_set *c, struct list_head *list); c 181 drivers/md/bcache/journal.h int bch_journal_replay(struct cache_set *c, struct list_head *list); c 183 drivers/md/bcache/journal.h void bch_journal_free(struct cache_set *c); c 184 drivers/md/bcache/journal.h int bch_journal_alloc(struct cache_set *c); c 24 drivers/md/bcache/movinggc.c struct cache_set *c = container_of(buf, struct cache_set, c 29 drivers/md/bcache/movinggc.c if (ptr_available(c, k, i) && c 30 drivers/md/bcache/movinggc.c GC_MOVE(PTR_BUCKET(c, k, i))) c 55 drivers/md/bcache/movinggc.c bch_keybuf_del(&io->op.c->moving_gc_keys, io->w); c 57 drivers/md/bcache/movinggc.c up(&io->op.c->moving_in_flight); c 71 drivers/md/bcache/movinggc.c ptr_stale(io->op.c, &b->key, 0)) { c 75 drivers/md/bcache/movinggc.c bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); c 121 drivers/md/bcache/movinggc.c bch_submit_bbio(bio, io->op.c, &io->w->key, 0); c 126 drivers/md/bcache/movinggc.c static void read_moving(struct cache_set *c) c 137 drivers/md/bcache/movinggc.c while (!test_bit(CACHE_SET_STOPPING, &c->flags)) { c 138 drivers/md/bcache/movinggc.c w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, c 143 drivers/md/bcache/movinggc.c if (ptr_stale(c, &w->key, 0)) { c 144 drivers/md/bcache/movinggc.c bch_keybuf_del(&c->moving_gc_keys, w); c 157 drivers/md/bcache/movinggc.c io->op.c = c; c 158 drivers/md/bcache/movinggc.c io->op.wq = c->moving_gc_wq; c 171 drivers/md/bcache/movinggc.c down(&c->moving_in_flight); c 179 drivers/md/bcache/movinggc.c bch_keybuf_del(&c->moving_gc_keys, w); c 197 drivers/md/bcache/movinggc.c void bch_moving_gc(struct cache_set *c) c 203 drivers/md/bcache/movinggc.c if (!c->copy_gc_enabled) c 206 drivers/md/bcache/movinggc.c mutex_lock(&c->bucket_lock); c 208 drivers/md/bcache/movinggc.c for_each_cache(ca, c, i) { c 243 drivers/md/bcache/movinggc.c mutex_unlock(&c->bucket_lock); c 245 drivers/md/bcache/movinggc.c c->moving_gc_keys.last_scanned = ZERO_KEY; c 247 drivers/md/bcache/movinggc.c read_moving(c); c 250 drivers/md/bcache/movinggc.c void bch_moving_init_cache_set(struct cache_set *c) c 252 drivers/md/bcache/movinggc.c bch_keybuf_init(&c->moving_gc_keys); c 253 drivers/md/bcache/movinggc.c sema_init(&c->moving_in_flight, 64); c 78 drivers/md/bcache/request.c journal_ref = bch_journal(op->c, &op->insert_keys, c 81 drivers/md/bcache/request.c ret = bch_btree_insert(op->c, &op->insert_keys, c 103 drivers/md/bcache/request.c struct cache_set *c) c 114 drivers/md/bcache/request.c if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) c 132 drivers/md/bcache/request.c if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) c 196 drivers/md/bcache/request.c bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); c 207 drivers/md/bcache/request.c if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) c 208 drivers/md/bcache/request.c wake_up_gc(op->c); c 219 drivers/md/bcache/request.c struct bio_set *split = &op->c->bio_split; c 224 drivers/md/bcache/request.c op->c)) { c 234 drivers/md/bcache/request.c if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), c 248 drivers/md/bcache/request.c SET_GC_MARK(PTR_BUCKET(op->c, k, i), c 260 drivers/md/bcache/request.c bch_submit_bbio(n, op->c, k, 0); c 324 drivers/md/bcache/request.c trace_bcache_write(op->c, op->inode, op->bio, c 336 drivers/md/bcache/request.c unsigned int bch_get_congested(const struct cache_set *c) c 340 drivers/md/bcache/request.c if (!c->congested_read_threshold_us && c 341 drivers/md/bcache/request.c !c->congested_write_threshold_us) c 344 drivers/md/bcache/request.c i = (local_clock_us() - c->congested_last_us) / 1024; c 348 drivers/md/bcache/request.c i += atomic_read(&c->congested); c 377 drivers/md/bcache/request.c struct cache_set *c = dc->disk.c; c 384 drivers/md/bcache/request.c c->gc_stats.in_use > CUTOFF_CACHE_ADD || c 409 drivers/md/bcache/request.c if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || c 410 drivers/md/bcache/request.c bio_sectors(bio) & (c->sb.block_size - 1)) { c 422 drivers/md/bcache/request.c congested = bch_get_congested(c); c 466 drivers/md/bcache/request.c bch_rescale_priorities(c, bio_sectors(bio)); c 469 drivers/md/bcache/request.c bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); c 512 drivers/md/bcache/request.c ptr_stale(s->iop.c, &b->key, 0)) { c 513 drivers/md/bcache/request.c atomic_long_inc(&s->iop.c->cache_read_races); c 517 drivers/md/bcache/request.c bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); c 556 drivers/md/bcache/request.c PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; c 585 drivers/md/bcache/request.c __bch_submit_bbio(n, b->c); c 598 drivers/md/bcache/request.c ret = bch_btree_map_keys(&s->op, s->iop.c, c 617 drivers/md/bcache/request.c if (s->d && s->d->c && c 618 drivers/md/bcache/request.c !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { c 717 drivers/md/bcache/request.c atomic_dec(&s->iop.c->search_inflight); c 724 drivers/md/bcache/request.c mempool_free(s, &s->iop.c->search); c 732 drivers/md/bcache/request.c s = mempool_alloc(&d->c->search, GFP_NOIO); c 736 drivers/md/bcache/request.c atomic_inc(&d->c->search_inflight); c 747 drivers/md/bcache/request.c s->iop.c = d->c; c 778 drivers/md/bcache/request.c bch_mark_cache_miss_collision(s->iop.c, s->d); c 808 drivers/md/bcache/request.c closure_bio_submit(s->iop.c, bio, cl); c 820 drivers/md/bcache/request.c bch_mark_cache_miss_collision(s->iop.c, s->d); c 863 drivers/md/bcache/request.c !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { c 876 drivers/md/bcache/request.c bch_mark_cache_accounting(s->iop.c, s->d, c 906 drivers/md/bcache/request.c s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) c 945 drivers/md/bcache/request.c bch_mark_cache_readahead(s->iop.c, s->d); c 951 drivers/md/bcache/request.c closure_bio_submit(s->iop.c, cache_bio, &s->cl); c 960 drivers/md/bcache/request.c closure_bio_submit(s->iop.c, miss, &s->cl); c 990 drivers/md/bcache/request.c bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); c 1029 drivers/md/bcache/request.c closure_bio_submit(s->iop.c, bio, cl); c 1053 drivers/md/bcache/request.c closure_bio_submit(s->iop.c, flush, cl); c 1059 drivers/md/bcache/request.c closure_bio_submit(s->iop.c, bio, cl); c 1073 drivers/md/bcache/request.c bch_journal_meta(s->iop.c, cl); c 1077 drivers/md/bcache/request.c closure_bio_submit(s->iop.c, bio, cl); c 1136 drivers/md/bcache/request.c static void quit_max_writeback_rate(struct cache_set *c, c 1153 drivers/md/bcache/request.c for (i = 0; i < c->devices_max_used; i++) { c 1154 drivers/md/bcache/request.c if (!c->devices[i]) c 1157 drivers/md/bcache/request.c if (UUID_FLASH_ONLY(&c->uuids[i])) c 1160 drivers/md/bcache/request.c d = c->devices[i]; c 1184 drivers/md/bcache/request.c if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) || c 1191 drivers/md/bcache/request.c if (likely(d->c)) { c 1192 drivers/md/bcache/request.c if (atomic_read(&d->c->idle_counter)) c 1193 drivers/md/bcache/request.c atomic_set(&d->c->idle_counter, 0); c 1200 drivers/md/bcache/request.c if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) { c 1201 drivers/md/bcache/request.c atomic_set(&d->c->at_max_writeback_rate, 0); c 1202 drivers/md/bcache/request.c quit_max_writeback_rate(d->c, dc); c 1266 drivers/md/bcache/request.c for_each_cache(ca, d->c, i) { c 1311 drivers/md/bcache/request.c bch_journal_meta(s->iop.c, cl); c 1323 drivers/md/bcache/request.c if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { c 1347 drivers/md/bcache/request.c bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, c 1378 drivers/md/bcache/request.c for_each_cache(ca, d->c, i) { c 7 drivers/md/bcache/request.h struct cache_set *c; c 36 drivers/md/bcache/request.h unsigned int bch_get_congested(const struct cache_set *c); c 203 drivers/md/bcache/stats.c void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, c 209 drivers/md/bcache/stats.c mark_cache_stats(&c->accounting.collector, hit, bypass); c 212 drivers/md/bcache/stats.c void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) c 217 drivers/md/bcache/stats.c atomic_inc(&c->accounting.collector.cache_readaheads); c 220 drivers/md/bcache/stats.c void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) c 225 drivers/md/bcache/stats.c atomic_inc(&c->accounting.collector.cache_miss_collisions); c 228 drivers/md/bcache/stats.c void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, c 232 drivers/md/bcache/stats.c atomic_add(sectors, &c->accounting.collector.sectors_bypassed); c 56 drivers/md/bcache/stats.h void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, c 58 drivers/md/bcache/stats.h void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d); c 59 drivers/md/bcache/stats.h void bch_mark_cache_miss_collision(struct cache_set *c, c 61 drivers/md/bcache/stats.h void bch_mark_sectors_bypassed(struct cache_set *c, c 283 drivers/md/bcache/super.c struct cache_set *c = container_of(cl, struct cache_set, sb_write); c 285 drivers/md/bcache/super.c up(&c->sb_write_mutex); c 288 drivers/md/bcache/super.c void bcache_write_super(struct cache_set *c) c 290 drivers/md/bcache/super.c struct closure *cl = &c->sb_write; c 294 drivers/md/bcache/super.c down(&c->sb_write_mutex); c 295 drivers/md/bcache/super.c closure_init(cl, &c->cl); c 297 drivers/md/bcache/super.c c->sb.seq++; c 299 drivers/md/bcache/super.c for_each_cache(ca, c, i) { c 303 drivers/md/bcache/super.c ca->sb.seq = c->sb.seq; c 304 drivers/md/bcache/super.c ca->sb.last_mount = c->sb.last_mount; c 306 drivers/md/bcache/super.c SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); c 325 drivers/md/bcache/super.c struct cache_set *c = container_of(cl, struct cache_set, uuid_write); c 327 drivers/md/bcache/super.c cache_set_err_on(bio->bi_status, c, "accessing uuids"); c 328 drivers/md/bcache/super.c bch_bbio_free(bio, c); c 334 drivers/md/bcache/super.c struct cache_set *c = container_of(cl, struct cache_set, uuid_write); c 336 drivers/md/bcache/super.c up(&c->uuid_write_mutex); c 339 drivers/md/bcache/super.c static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, c 342 drivers/md/bcache/super.c struct closure *cl = &c->uuid_write; c 348 drivers/md/bcache/super.c down(&c->uuid_write_mutex); c 352 drivers/md/bcache/super.c struct bio *bio = bch_bbio_alloc(c); c 360 drivers/md/bcache/super.c bch_bio_map(bio, c->uuids); c 362 drivers/md/bcache/super.c bch_submit_bbio(bio, c, k, i); c 371 drivers/md/bcache/super.c for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) c 374 drivers/md/bcache/super.c u - c->uuids, u->uuid, u->label, c 380 drivers/md/bcache/super.c static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) c 384 drivers/md/bcache/super.c if (__bch_btree_ptr_invalid(c, k)) c 387 drivers/md/bcache/super.c bkey_copy(&c->uuid_bucket, k); c 388 drivers/md/bcache/super.c uuid_io(c, REQ_OP_READ, 0, k, cl); c 391 drivers/md/bcache/super.c struct uuid_entry_v0 *u0 = (void *) c->uuids; c 392 drivers/md/bcache/super.c struct uuid_entry *u1 = (void *) c->uuids; c 403 drivers/md/bcache/super.c for (i = c->nr_uuids - 1; c 421 drivers/md/bcache/super.c static int __uuid_write(struct cache_set *c) c 430 drivers/md/bcache/super.c if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) c 433 drivers/md/bcache/super.c SET_KEY_SIZE(&k.key, c->sb.bucket_size); c 434 drivers/md/bcache/super.c uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl); c 438 drivers/md/bcache/super.c ca = PTR_CACHE(c, &k.key, 0); c 441 drivers/md/bcache/super.c bkey_copy(&c->uuid_bucket, &k.key); c 442 drivers/md/bcache/super.c bkey_put(c, &k.key); c 446 drivers/md/bcache/super.c int bch_uuid_write(struct cache_set *c) c 448 drivers/md/bcache/super.c int ret = __uuid_write(c); c 451 drivers/md/bcache/super.c bch_journal_meta(c, NULL); c 456 drivers/md/bcache/super.c static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) c 460 drivers/md/bcache/super.c for (u = c->uuids; c 461 drivers/md/bcache/super.c u < c->uuids + c->nr_uuids; u++) c 468 drivers/md/bcache/super.c static struct uuid_entry *uuid_find_empty(struct cache_set *c) c 472 drivers/md/bcache/super.c return uuid_find(c, zero_uuid); c 696 drivers/md/bcache/super.c if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { c 700 drivers/md/bcache/super.c sysfs_remove_link(&d->c->kobj, d->name); c 703 drivers/md/bcache/super.c for_each_cache(ca, d->c, i) c 708 drivers/md/bcache/super.c static void bcache_device_link(struct bcache_device *d, struct cache_set *c, c 715 drivers/md/bcache/super.c for_each_cache(ca, d->c, i) c 721 drivers/md/bcache/super.c ret = sysfs_create_link(&d->kobj, &c->kobj, "cache"); c 725 drivers/md/bcache/super.c ret = sysfs_create_link(&c->kobj, &d->kobj, d->name); c 736 drivers/md/bcache/super.c atomic_dec(&d->c->attached_dev_nr); c 739 drivers/md/bcache/super.c struct uuid_entry *u = d->c->uuids + d->id; c 744 drivers/md/bcache/super.c bch_uuid_write(d->c); c 749 drivers/md/bcache/super.c d->c->devices[d->id] = NULL; c 750 drivers/md/bcache/super.c closure_put(&d->c->caching); c 751 drivers/md/bcache/super.c d->c = NULL; c 754 drivers/md/bcache/super.c static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, c 758 drivers/md/bcache/super.c d->c = c; c 759 drivers/md/bcache/super.c c->devices[id] = d; c 761 drivers/md/bcache/super.c if (id >= c->devices_max_used) c 762 drivers/md/bcache/super.c c->devices_max_used = id + 1; c 764 drivers/md/bcache/super.c closure_get(&c->caching); c 788 drivers/md/bcache/super.c if (d->c) c 894 drivers/md/bcache/super.c static void calc_cached_dev_sectors(struct cache_set *c) c 899 drivers/md/bcache/super.c list_for_each_entry(dc, &c->cached_devs, list) c 902 drivers/md/bcache/super.c c->cached_dev_sectors = sectors; c 972 drivers/md/bcache/super.c if (!d->c && c 1064 drivers/md/bcache/super.c calc_cached_dev_sectors(dc->disk.c); c 1100 drivers/md/bcache/super.c int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, c 1108 drivers/md/bcache/super.c if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || c 1109 drivers/md/bcache/super.c (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) c 1112 drivers/md/bcache/super.c if (dc->disk.c) { c 1118 drivers/md/bcache/super.c if (test_bit(CACHE_SET_STOPPING, &c->flags)) { c 1124 drivers/md/bcache/super.c if (dc->sb.block_size < c->sb.block_size) { c 1132 drivers/md/bcache/super.c list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { c 1141 drivers/md/bcache/super.c u = uuid_find(c, dc->sb.uuid); c 1158 drivers/md/bcache/super.c u = uuid_find_empty(c); c 1179 drivers/md/bcache/super.c bch_uuid_write(c); c 1181 drivers/md/bcache/super.c memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16); c 1188 drivers/md/bcache/super.c bch_uuid_write(c); c 1191 drivers/md/bcache/super.c bcache_device_attach(&dc->disk, c, u - c->uuids); c 1192 drivers/md/bcache/super.c list_move(&dc->list, &c->cached_devs); c 1193 drivers/md/bcache/super.c calc_cached_dev_sectors(c); c 1234 drivers/md/bcache/super.c bcache_device_link(&dc->disk, c, "bdev"); c 1235 drivers/md/bcache/super.c atomic_inc(&c->attached_dev_nr); c 1243 drivers/md/bcache/super.c dc->disk.c->sb.set_uuid); c 1360 drivers/md/bcache/super.c struct cache_set *c; c 1387 drivers/md/bcache/super.c list_for_each_entry(c, &bch_cache_sets, list) c 1388 drivers/md/bcache/super.c bch_cached_dev_attach(dc, c, NULL); c 1421 drivers/md/bcache/super.c &d->c->flash_dev_dirty_sectors); c 1438 drivers/md/bcache/super.c static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) c 1450 drivers/md/bcache/super.c if (bcache_device_init(d, block_bytes(c), u->sectors)) c 1453 drivers/md/bcache/super.c bcache_device_attach(d, c, u - c->uuids); c 1461 drivers/md/bcache/super.c bcache_device_link(d, c, "volume"); c 1469 drivers/md/bcache/super.c static int flash_devs_run(struct cache_set *c) c 1474 drivers/md/bcache/super.c for (u = c->uuids; c 1475 drivers/md/bcache/super.c u < c->uuids + c->nr_uuids && !ret; c 1478 drivers/md/bcache/super.c ret = flash_dev_run(c, u); c 1483 drivers/md/bcache/super.c int bch_flash_dev_create(struct cache_set *c, uint64_t size) c 1487 drivers/md/bcache/super.c if (test_bit(CACHE_SET_STOPPING, &c->flags)) c 1490 drivers/md/bcache/super.c if (!test_bit(CACHE_SET_RUNNING, &c->flags)) c 1493 drivers/md/bcache/super.c u = uuid_find_empty(c); c 1506 drivers/md/bcache/super.c bch_uuid_write(c); c 1508 drivers/md/bcache/super.c return flash_dev_run(c, u); c 1530 drivers/md/bcache/super.c bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) c 1534 drivers/md/bcache/super.c if (c->on_error != ON_ERROR_PANIC && c 1535 drivers/md/bcache/super.c test_bit(CACHE_SET_STOPPING, &c->flags)) c 1538 drivers/md/bcache/super.c if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) c 1546 drivers/md/bcache/super.c pr_err("bcache: error on %pU: ", c->sb.set_uuid); c 1554 drivers/md/bcache/super.c if (c->on_error == ON_ERROR_PANIC) c 1557 drivers/md/bcache/super.c bch_cache_set_unregister(c); c 1564 drivers/md/bcache/super.c struct cache_set *c = container_of(kobj, struct cache_set, kobj); c 1566 drivers/md/bcache/super.c kfree(c); c 1572 drivers/md/bcache/super.c struct cache_set *c = container_of(cl, struct cache_set, cl); c 1576 drivers/md/bcache/super.c debugfs_remove(c->debug); c 1578 drivers/md/bcache/super.c bch_open_buckets_free(c); c 1579 drivers/md/bcache/super.c bch_btree_cache_free(c); c 1580 drivers/md/bcache/super.c bch_journal_free(c); c 1583 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 1586 drivers/md/bcache/super.c c->cache[ca->sb.nr_this_dev] = NULL; c 1590 drivers/md/bcache/super.c bch_bset_sort_state_free(&c->sort); c 1591 drivers/md/bcache/super.c free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); c 1593 drivers/md/bcache/super.c if (c->moving_gc_wq) c 1594 drivers/md/bcache/super.c destroy_workqueue(c->moving_gc_wq); c 1595 drivers/md/bcache/super.c bioset_exit(&c->bio_split); c 1596 drivers/md/bcache/super.c mempool_exit(&c->fill_iter); c 1597 drivers/md/bcache/super.c mempool_exit(&c->bio_meta); c 1598 drivers/md/bcache/super.c mempool_exit(&c->search); c 1599 drivers/md/bcache/super.c kfree(c->devices); c 1601 drivers/md/bcache/super.c list_del(&c->list); c 1604 drivers/md/bcache/super.c pr_info("Cache set %pU unregistered", c->sb.set_uuid); c 1607 drivers/md/bcache/super.c closure_debug_destroy(&c->cl); c 1608 drivers/md/bcache/super.c kobject_put(&c->kobj); c 1613 drivers/md/bcache/super.c struct cache_set *c = container_of(cl, struct cache_set, caching); c 1618 drivers/md/bcache/super.c bch_cache_accounting_destroy(&c->accounting); c 1620 drivers/md/bcache/super.c kobject_put(&c->internal); c 1621 drivers/md/bcache/super.c kobject_del(&c->kobj); c 1623 drivers/md/bcache/super.c if (!IS_ERR_OR_NULL(c->gc_thread)) c 1624 drivers/md/bcache/super.c kthread_stop(c->gc_thread); c 1626 drivers/md/bcache/super.c if (!IS_ERR_OR_NULL(c->root)) c 1627 drivers/md/bcache/super.c list_add(&c->root->list, &c->btree_cache); c 1633 drivers/md/bcache/super.c if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) c 1634 drivers/md/bcache/super.c list_for_each_entry(b, &c->btree_cache, list) { c 1641 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 1645 drivers/md/bcache/super.c if (c->journal.cur) { c 1646 drivers/md/bcache/super.c cancel_delayed_work_sync(&c->journal.work); c 1648 drivers/md/bcache/super.c c->journal.work.work.func(&c->journal.work.work); c 1670 drivers/md/bcache/super.c static void conditional_stop_bcache_device(struct cache_set *c, c 1676 drivers/md/bcache/super.c d->disk->disk_name, c->sb.set_uuid); c 1712 drivers/md/bcache/super.c struct cache_set *c = container_of(cl, struct cache_set, caching); c 1719 drivers/md/bcache/super.c for (i = 0; i < c->devices_max_used; i++) { c 1720 drivers/md/bcache/super.c d = c->devices[i]; c 1724 drivers/md/bcache/super.c if (!UUID_FLASH_ONLY(&c->uuids[i]) && c 1725 drivers/md/bcache/super.c test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { c 1728 drivers/md/bcache/super.c if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) c 1729 drivers/md/bcache/super.c conditional_stop_bcache_device(c, d, dc); c 1740 drivers/md/bcache/super.c void bch_cache_set_stop(struct cache_set *c) c 1742 drivers/md/bcache/super.c if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) c 1744 drivers/md/bcache/super.c closure_queue(&c->caching); c 1747 drivers/md/bcache/super.c void bch_cache_set_unregister(struct cache_set *c) c 1749 drivers/md/bcache/super.c set_bit(CACHE_SET_UNREGISTERING, &c->flags); c 1750 drivers/md/bcache/super.c bch_cache_set_stop(c); c 1753 drivers/md/bcache/super.c #define alloc_bucket_pages(gfp, c) \ c 1754 drivers/md/bcache/super.c ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) c 1759 drivers/md/bcache/super.c struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); c 1761 drivers/md/bcache/super.c if (!c) c 1765 drivers/md/bcache/super.c closure_init(&c->cl, NULL); c 1766 drivers/md/bcache/super.c set_closure_fn(&c->cl, cache_set_free, system_wq); c 1768 drivers/md/bcache/super.c closure_init(&c->caching, &c->cl); c 1769 drivers/md/bcache/super.c set_closure_fn(&c->caching, __cache_set_unregister, system_wq); c 1772 drivers/md/bcache/super.c closure_set_stopped(&c->cl); c 1773 drivers/md/bcache/super.c closure_put(&c->cl); c 1775 drivers/md/bcache/super.c kobject_init(&c->kobj, &bch_cache_set_ktype); c 1776 drivers/md/bcache/super.c kobject_init(&c->internal, &bch_cache_set_internal_ktype); c 1778 drivers/md/bcache/super.c bch_cache_accounting_init(&c->accounting, &c->cl); c 1780 drivers/md/bcache/super.c memcpy(c->sb.set_uuid, sb->set_uuid, 16); c 1781 drivers/md/bcache/super.c c->sb.block_size = sb->block_size; c 1782 drivers/md/bcache/super.c c->sb.bucket_size = sb->bucket_size; c 1783 drivers/md/bcache/super.c c->sb.nr_in_set = sb->nr_in_set; c 1784 drivers/md/bcache/super.c c->sb.last_mount = sb->last_mount; c 1785 drivers/md/bcache/super.c c->bucket_bits = ilog2(sb->bucket_size); c 1786 drivers/md/bcache/super.c c->block_bits = ilog2(sb->block_size); c 1787 drivers/md/bcache/super.c c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); c 1788 drivers/md/bcache/super.c c->devices_max_used = 0; c 1789 drivers/md/bcache/super.c atomic_set(&c->attached_dev_nr, 0); c 1790 drivers/md/bcache/super.c c->btree_pages = bucket_pages(c); c 1791 drivers/md/bcache/super.c if (c->btree_pages > BTREE_MAX_PAGES) c 1792 drivers/md/bcache/super.c c->btree_pages = max_t(int, c->btree_pages / 4, c 1795 drivers/md/bcache/super.c sema_init(&c->sb_write_mutex, 1); c 1796 drivers/md/bcache/super.c mutex_init(&c->bucket_lock); c 1797 drivers/md/bcache/super.c init_waitqueue_head(&c->btree_cache_wait); c 1798 drivers/md/bcache/super.c init_waitqueue_head(&c->bucket_wait); c 1799 drivers/md/bcache/super.c init_waitqueue_head(&c->gc_wait); c 1800 drivers/md/bcache/super.c sema_init(&c->uuid_write_mutex, 1); c 1802 drivers/md/bcache/super.c spin_lock_init(&c->btree_gc_time.lock); c 1803 drivers/md/bcache/super.c spin_lock_init(&c->btree_split_time.lock); c 1804 drivers/md/bcache/super.c spin_lock_init(&c->btree_read_time.lock); c 1806 drivers/md/bcache/super.c bch_moving_init_cache_set(c); c 1808 drivers/md/bcache/super.c INIT_LIST_HEAD(&c->list); c 1809 drivers/md/bcache/super.c INIT_LIST_HEAD(&c->cached_devs); c 1810 drivers/md/bcache/super.c INIT_LIST_HEAD(&c->btree_cache); c 1811 drivers/md/bcache/super.c INIT_LIST_HEAD(&c->btree_cache_freeable); c 1812 drivers/md/bcache/super.c INIT_LIST_HEAD(&c->btree_cache_freed); c 1813 drivers/md/bcache/super.c INIT_LIST_HEAD(&c->data_buckets); c 1818 drivers/md/bcache/super.c if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) || c 1819 drivers/md/bcache/super.c mempool_init_slab_pool(&c->search, 32, bch_search_cache) || c 1820 drivers/md/bcache/super.c mempool_init_kmalloc_pool(&c->bio_meta, 2, c 1822 drivers/md/bcache/super.c bucket_pages(c)) || c 1823 drivers/md/bcache/super.c mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) || c 1824 drivers/md/bcache/super.c bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), c 1826 drivers/md/bcache/super.c !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || c 1827 drivers/md/bcache/super.c !(c->moving_gc_wq = alloc_workqueue("bcache_gc", c 1829 drivers/md/bcache/super.c bch_journal_alloc(c) || c 1830 drivers/md/bcache/super.c bch_btree_cache_alloc(c) || c 1831 drivers/md/bcache/super.c bch_open_buckets_alloc(c) || c 1832 drivers/md/bcache/super.c bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) c 1835 drivers/md/bcache/super.c c->congested_read_threshold_us = 2000; c 1836 drivers/md/bcache/super.c c->congested_write_threshold_us = 20000; c 1837 drivers/md/bcache/super.c c->error_limit = DEFAULT_IO_ERROR_LIMIT; c 1838 drivers/md/bcache/super.c WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); c 1840 drivers/md/bcache/super.c return c; c 1842 drivers/md/bcache/super.c bch_cache_set_unregister(c); c 1846 drivers/md/bcache/super.c static int run_cache_set(struct cache_set *c) c 1858 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 1859 drivers/md/bcache/super.c c->nbuckets += ca->sb.nbuckets; c 1860 drivers/md/bcache/super.c set_gc_sectors(c); c 1862 drivers/md/bcache/super.c if (CACHE_SYNC(&c->sb)) { c 1867 drivers/md/bcache/super.c if (bch_journal_read(c, &journal)) c 1879 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 1891 drivers/md/bcache/super.c if (__bch_btree_ptr_invalid(c, k)) c 1895 drivers/md/bcache/super.c c->root = bch_btree_node_get(c, NULL, k, c 1898 drivers/md/bcache/super.c if (IS_ERR_OR_NULL(c->root)) c 1901 drivers/md/bcache/super.c list_del_init(&c->root->list); c 1902 drivers/md/bcache/super.c rw_unlock(true, c->root); c 1904 drivers/md/bcache/super.c err = uuid_read(c, j, &cl); c 1909 drivers/md/bcache/super.c if (bch_btree_check(c)) c 1918 drivers/md/bcache/super.c if (!c->shrinker_disabled) { c 1922 drivers/md/bcache/super.c sc.nr_to_scan = c->btree_cache_used * c->btree_pages; c 1924 drivers/md/bcache/super.c c->shrink.scan_objects(&c->shrink, &sc); c 1926 drivers/md/bcache/super.c c->shrink.scan_objects(&c->shrink, &sc); c 1929 drivers/md/bcache/super.c bch_journal_mark(c, &journal); c 1930 drivers/md/bcache/super.c bch_initial_gc_finish(c); c 1938 drivers/md/bcache/super.c bch_journal_next(&c->journal); c 1941 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 1956 drivers/md/bcache/super.c __uuid_write(c); c 1959 drivers/md/bcache/super.c if (bch_journal_replay(c, &journal)) c 1964 drivers/md/bcache/super.c for_each_cache(ca, c, i) { c 1974 drivers/md/bcache/super.c bch_initial_gc_finish(c); c 1977 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 1981 drivers/md/bcache/super.c mutex_lock(&c->bucket_lock); c 1982 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 1984 drivers/md/bcache/super.c mutex_unlock(&c->bucket_lock); c 1987 drivers/md/bcache/super.c if (__uuid_write(c)) c 1991 drivers/md/bcache/super.c c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); c 1992 drivers/md/bcache/super.c if (IS_ERR_OR_NULL(c->root)) c 1995 drivers/md/bcache/super.c mutex_lock(&c->root->write_lock); c 1996 drivers/md/bcache/super.c bkey_copy_key(&c->root->key, &MAX_KEY); c 1997 drivers/md/bcache/super.c bch_btree_node_write(c->root, &cl); c 1998 drivers/md/bcache/super.c mutex_unlock(&c->root->write_lock); c 2000 drivers/md/bcache/super.c bch_btree_set_root(c->root); c 2001 drivers/md/bcache/super.c rw_unlock(true, c->root); c 2008 drivers/md/bcache/super.c SET_CACHE_SYNC(&c->sb, true); c 2010 drivers/md/bcache/super.c bch_journal_next(&c->journal); c 2011 drivers/md/bcache/super.c bch_journal_meta(c, &cl); c 2015 drivers/md/bcache/super.c if (bch_gc_thread_start(c)) c 2019 drivers/md/bcache/super.c c->sb.last_mount = (u32)ktime_get_real_seconds(); c 2020 drivers/md/bcache/super.c bcache_write_super(c); c 2023 drivers/md/bcache/super.c bch_cached_dev_attach(dc, c, NULL); c 2025 drivers/md/bcache/super.c flash_devs_run(c); c 2027 drivers/md/bcache/super.c set_bit(CACHE_SET_RUNNING, &c->flags); c 2038 drivers/md/bcache/super.c bch_cache_set_error(c, "%s", err); c 2043 drivers/md/bcache/super.c static bool can_attach_cache(struct cache *ca, struct cache_set *c) c 2045 drivers/md/bcache/super.c return ca->sb.block_size == c->sb.block_size && c 2046 drivers/md/bcache/super.c ca->sb.bucket_size == c->sb.bucket_size && c 2047 drivers/md/bcache/super.c ca->sb.nr_in_set == c->sb.nr_in_set; c 2054 drivers/md/bcache/super.c struct cache_set *c; c 2056 drivers/md/bcache/super.c list_for_each_entry(c, &bch_cache_sets, list) c 2057 drivers/md/bcache/super.c if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) { c 2058 drivers/md/bcache/super.c if (c->cache[ca->sb.nr_this_dev]) c 2061 drivers/md/bcache/super.c if (!can_attach_cache(ca, c)) c 2065 drivers/md/bcache/super.c SET_CACHE_SYNC(&c->sb, false); c 2070 drivers/md/bcache/super.c c = bch_cache_set_alloc(&ca->sb); c 2071 drivers/md/bcache/super.c if (!c) c 2075 drivers/md/bcache/super.c if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) || c 2076 drivers/md/bcache/super.c kobject_add(&c->internal, &c->kobj, "internal")) c 2079 drivers/md/bcache/super.c if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) c 2082 drivers/md/bcache/super.c bch_debug_init_cache_set(c); c 2084 drivers/md/bcache/super.c list_add(&c->list, &bch_cache_sets); c 2087 drivers/md/bcache/super.c if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || c 2088 drivers/md/bcache/super.c sysfs_create_link(&c->kobj, &ca->kobj, buf)) c 2091 drivers/md/bcache/super.c if (ca->sb.seq > c->sb.seq) { c 2092 drivers/md/bcache/super.c c->sb.version = ca->sb.version; c 2093 drivers/md/bcache/super.c memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); c 2094 drivers/md/bcache/super.c c->sb.flags = ca->sb.flags; c 2095 drivers/md/bcache/super.c c->sb.seq = ca->sb.seq; c 2096 drivers/md/bcache/super.c pr_debug("set version = %llu", c->sb.version); c 2100 drivers/md/bcache/super.c ca->set = c; c 2102 drivers/md/bcache/super.c c->cache_by_alloc[c->caches_loaded++] = ca; c 2104 drivers/md/bcache/super.c if (c->caches_loaded == c->sb.nr_in_set) { c 2106 drivers/md/bcache/super.c if (run_cache_set(c) < 0) c 2112 drivers/md/bcache/super.c bch_cache_set_unregister(c); c 2341 drivers/md/bcache/super.c struct cache_set *c, *tc; c 2344 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) c 2345 drivers/md/bcache/super.c list_for_each_entry_safe(dc, t, &c->cached_devs, list) c 2356 drivers/md/bcache/super.c struct cache_set *c, *tc; c 2360 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) c 2361 drivers/md/bcache/super.c for_each_cache(ca, c, i) c 2500 drivers/md/bcache/super.c struct cache_set *c, *tc; c 2512 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { c 2514 drivers/md/bcache/super.c char *set_uuid = c->sb.uuid; c 2547 drivers/md/bcache/super.c struct cache_set *c, *tc; c 2585 drivers/md/bcache/super.c list_for_each_entry_safe(c, tc, &bch_cache_sets, list) c 2586 drivers/md/bcache/super.c bch_cache_set_stop(c); c 285 drivers/md/bcache/sysfs.c struct cache_set *c; c 393 drivers/md/bcache/sysfs.c if (dc->disk.c) { c 394 drivers/md/bcache/sysfs.c memcpy(dc->disk.c->uuids[dc->disk.id].label, c 396 drivers/md/bcache/sysfs.c bch_uuid_write(dc->disk.c); c 417 drivers/md/bcache/sysfs.c list_for_each_entry(c, &bch_cache_sets, list) { c 418 drivers/md/bcache/sysfs.c v = bch_cached_dev_attach(dc, c, set_uuid); c 427 drivers/md/bcache/sysfs.c if (attr == &sysfs_detach && dc->disk.c) c 473 drivers/md/bcache/sysfs.c if ((dc->disk.c != NULL) && c 528 drivers/md/bcache/sysfs.c struct uuid_entry *u = &d->c->uuids[d->id]; c 547 drivers/md/bcache/sysfs.c struct uuid_entry *u = &d->c->uuids[d->id]; c 561 drivers/md/bcache/sysfs.c bch_uuid_write(d->c); c 567 drivers/md/bcache/sysfs.c bch_uuid_write(d->c); c 606 drivers/md/bcache/sysfs.c static int bch_bset_print_stats(struct cache_set *c, char *buf) c 614 drivers/md/bcache/sysfs.c ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); c 632 drivers/md/bcache/sysfs.c static unsigned int bch_root_usage(struct cache_set *c) c 644 drivers/md/bcache/sysfs.c b = c->root; c 646 drivers/md/bcache/sysfs.c } while (b != c->root); c 653 drivers/md/bcache/sysfs.c return (bytes * 100) / btree_bytes(c); c 656 drivers/md/bcache/sysfs.c static size_t bch_cache_size(struct cache_set *c) c 661 drivers/md/bcache/sysfs.c mutex_lock(&c->bucket_lock); c 662 drivers/md/bcache/sysfs.c list_for_each_entry(b, &c->btree_cache, list) c 665 drivers/md/bcache/sysfs.c mutex_unlock(&c->bucket_lock); c 669 drivers/md/bcache/sysfs.c static unsigned int bch_cache_max_chain(struct cache_set *c) c 674 drivers/md/bcache/sysfs.c mutex_lock(&c->bucket_lock); c 676 drivers/md/bcache/sysfs.c for (h = c->bucket_hash; c 677 drivers/md/bcache/sysfs.c h < c->bucket_hash + (1 << BUCKET_HASH_BITS); c 688 drivers/md/bcache/sysfs.c mutex_unlock(&c->bucket_lock); c 692 drivers/md/bcache/sysfs.c static unsigned int bch_btree_used(struct cache_set *c) c 694 drivers/md/bcache/sysfs.c return div64_u64(c->gc_stats.key_bytes * 100, c 695 drivers/md/bcache/sysfs.c (c->gc_stats.nodes ?: 1) * btree_bytes(c)); c 698 drivers/md/bcache/sysfs.c static unsigned int bch_average_key_size(struct cache_set *c) c 700 drivers/md/bcache/sysfs.c return c->gc_stats.nkeys c 701 drivers/md/bcache/sysfs.c ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) c 707 drivers/md/bcache/sysfs.c struct cache_set *c = container_of(kobj, struct cache_set, kobj); c 709 drivers/md/bcache/sysfs.c sysfs_print(synchronous, CACHE_SYNC(&c->sb)); c 710 drivers/md/bcache/sysfs.c sysfs_print(journal_delay_ms, c->journal_delay_ms); c 711 drivers/md/bcache/sysfs.c sysfs_hprint(bucket_size, bucket_bytes(c)); c 712 drivers/md/bcache/sysfs.c sysfs_hprint(block_size, block_bytes(c)); c 713 drivers/md/bcache/sysfs.c sysfs_print(tree_depth, c->root->level); c 714 drivers/md/bcache/sysfs.c sysfs_print(root_usage_percent, bch_root_usage(c)); c 716 drivers/md/bcache/sysfs.c sysfs_hprint(btree_cache_size, bch_cache_size(c)); c 717 drivers/md/bcache/sysfs.c sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); c 718 drivers/md/bcache/sysfs.c sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); c 720 drivers/md/bcache/sysfs.c sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); c 721 drivers/md/bcache/sysfs.c sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); c 722 drivers/md/bcache/sysfs.c sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); c 723 drivers/md/bcache/sysfs.c sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); c 725 drivers/md/bcache/sysfs.c sysfs_print(btree_used_percent, bch_btree_used(c)); c 726 drivers/md/bcache/sysfs.c sysfs_print(btree_nodes, c->gc_stats.nodes); c 727 drivers/md/bcache/sysfs.c sysfs_hprint(average_key_size, bch_average_key_size(c)); c 730 drivers/md/bcache/sysfs.c atomic_long_read(&c->cache_read_races)); c 733 drivers/md/bcache/sysfs.c atomic_long_read(&c->reclaim)); c 736 drivers/md/bcache/sysfs.c atomic_long_read(&c->reclaimed_journal_buckets)); c 739 drivers/md/bcache/sysfs.c atomic_long_read(&c->flush_write)); c 742 drivers/md/bcache/sysfs.c atomic_long_read(&c->writeback_keys_done)); c 744 drivers/md/bcache/sysfs.c atomic_long_read(&c->writeback_keys_failed)); c 748 drivers/md/bcache/sysfs.c c->on_error); c 751 drivers/md/bcache/sysfs.c sysfs_print(io_error_halflife, c->error_decay * 88); c 752 drivers/md/bcache/sysfs.c sysfs_print(io_error_limit, c->error_limit); c 755 drivers/md/bcache/sysfs.c ((uint64_t) bch_get_congested(c)) << 9); c 757 drivers/md/bcache/sysfs.c c->congested_read_threshold_us); c 759 drivers/md/bcache/sysfs.c c->congested_write_threshold_us); c 764 drivers/md/bcache/sysfs.c sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); c 765 drivers/md/bcache/sysfs.c sysfs_printf(verify, "%i", c->verify); c 766 drivers/md/bcache/sysfs.c sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); c 768 drivers/md/bcache/sysfs.c "%i", c->expensive_debug_checks); c 769 drivers/md/bcache/sysfs.c sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); c 770 drivers/md/bcache/sysfs.c sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); c 771 drivers/md/bcache/sysfs.c sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); c 772 drivers/md/bcache/sysfs.c sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback); c 774 drivers/md/bcache/sysfs.c test_bit(CACHE_SET_IO_DISABLE, &c->flags)); c 777 drivers/md/bcache/sysfs.c return bch_bset_print_stats(c, buf); c 785 drivers/md/bcache/sysfs.c struct cache_set *c = container_of(kobj, struct cache_set, kobj); c 793 drivers/md/bcache/sysfs.c bch_cache_set_unregister(c); c 796 drivers/md/bcache/sysfs.c bch_cache_set_stop(c); c 801 drivers/md/bcache/sysfs.c if (sync != CACHE_SYNC(&c->sb)) { c 802 drivers/md/bcache/sysfs.c SET_CACHE_SYNC(&c->sb, sync); c 803 drivers/md/bcache/sysfs.c bcache_write_super(c); c 813 drivers/md/bcache/sysfs.c r = bch_flash_dev_create(c, v); c 819 drivers/md/bcache/sysfs.c atomic_long_set(&c->writeback_keys_done, 0); c 820 drivers/md/bcache/sysfs.c atomic_long_set(&c->writeback_keys_failed, 0); c 822 drivers/md/bcache/sysfs.c memset(&c->gc_stats, 0, sizeof(struct gc_stat)); c 823 drivers/md/bcache/sysfs.c bch_cache_accounting_clear(&c->accounting); c 827 drivers/md/bcache/sysfs.c force_wake_up_gc(c); c 834 drivers/md/bcache/sysfs.c c->shrink.scan_objects(&c->shrink, &sc); c 838 drivers/md/bcache/sysfs.c c->congested_read_threshold_us, c 841 drivers/md/bcache/sysfs.c c->congested_write_threshold_us, c 849 drivers/md/bcache/sysfs.c c->on_error = v; c 852 drivers/md/bcache/sysfs.c sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX); c 861 drivers/md/bcache/sysfs.c c->error_decay = v / 88; c 871 drivers/md/bcache/sysfs.c &c->flags)) c 875 drivers/md/bcache/sysfs.c &c->flags)) c 881 drivers/md/bcache/sysfs.c c->journal_delay_ms, c 883 drivers/md/bcache/sysfs.c sysfs_strtoul_bool(verify, c->verify); c 884 drivers/md/bcache/sysfs.c sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled); c 885 drivers/md/bcache/sysfs.c sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); c 886 drivers/md/bcache/sysfs.c sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite); c 887 drivers/md/bcache/sysfs.c sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled); c 888 drivers/md/bcache/sysfs.c sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled); c 894 drivers/md/bcache/sysfs.c sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1); c 902 drivers/md/bcache/sysfs.c struct cache_set *c = container_of(kobj, struct cache_set, internal); c 904 drivers/md/bcache/sysfs.c return bch_cache_set_show(&c->kobj, attr, buf); c 909 drivers/md/bcache/sysfs.c struct cache_set *c = container_of(kobj, struct cache_set, internal); c 915 drivers/md/bcache/sysfs.c return bch_cache_set_store(&c->kobj, attr, buf, size); c 20 drivers/md/bcache/util.c #define simple_strtoint(c, end, base) simple_strtol(c, end, base) c 21 drivers/md/bcache/util.c #define simple_strtouint(c, end, base) simple_strtoul(c, end, base) c 122 drivers/md/bcache/util.h #define fifo_for_each(c, fifo, iter) \ c 124 drivers/md/bcache/util.h c = (fifo)->data[iter], iter != (fifo)->back; \ c 20 drivers/md/bcache/writeback.c static void update_gc_after_writeback(struct cache_set *c) c 22 drivers/md/bcache/writeback.c if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) || c 23 drivers/md/bcache/writeback.c c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD) c 26 drivers/md/bcache/writeback.c c->gc_after_writeback |= BCH_DO_AUTO_GC; c 32 drivers/md/bcache/writeback.c struct cache_set *c = dc->disk.c; c 38 drivers/md/bcache/writeback.c uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - c 39 drivers/md/bcache/writeback.c atomic_long_read(&c->flash_dev_dirty_sectors); c 49 drivers/md/bcache/writeback.c c->cached_dev_sectors); c 122 drivers/md/bcache/writeback.c static bool set_at_max_writeback_rate(struct cache_set *c, c 126 drivers/md/bcache/writeback.c if (!c->gc_mark_valid) c 141 drivers/md/bcache/writeback.c if (atomic_inc_return(&c->idle_counter) < c 142 drivers/md/bcache/writeback.c atomic_read(&c->attached_dev_nr) * 6) c 145 drivers/md/bcache/writeback.c if (atomic_read(&c->at_max_writeback_rate) != 1) c 146 drivers/md/bcache/writeback.c atomic_set(&c->at_max_writeback_rate, 1); c 161 drivers/md/bcache/writeback.c if ((atomic_read(&c->idle_counter) < c 162 drivers/md/bcache/writeback.c atomic_read(&c->attached_dev_nr) * 6) || c 163 drivers/md/bcache/writeback.c !atomic_read(&c->at_max_writeback_rate)) c 174 drivers/md/bcache/writeback.c struct cache_set *c = dc->disk.c; c 189 drivers/md/bcache/writeback.c test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { c 203 drivers/md/bcache/writeback.c if (!set_at_max_writeback_rate(c, dc)) { c 206 drivers/md/bcache/writeback.c update_gc_after_writeback(c); c 217 drivers/md/bcache/writeback.c !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { c 291 drivers/md/bcache/writeback.c atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); c 293 drivers/md/bcache/writeback.c ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key); c 299 drivers/md/bcache/writeback.c ? &dc->disk.c->writeback_keys_failed c 300 drivers/md/bcache/writeback.c : &dc->disk.c->writeback_keys_done); c 362 drivers/md/bcache/writeback.c closure_bio_submit(io->dc->disk.c, &io->bio, cl); c 377 drivers/md/bcache/writeback.c bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), c 388 drivers/md/bcache/writeback.c closure_bio_submit(io->dc->disk.c, &io->bio, cl); c 415 drivers/md/bcache/writeback.c !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && c 421 drivers/md/bcache/writeback.c BUG_ON(ptr_stale(dc->disk.c, &next->key, 0)); c 474 drivers/md/bcache/writeback.c PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); c 495 drivers/md/bcache/writeback.c !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && c 518 drivers/md/bcache/writeback.c void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, c 521 drivers/md/bcache/writeback.c struct bcache_device *d = c->devices[inode]; c 527 drivers/md/bcache/writeback.c if (UUID_FLASH_ONLY(&c->uuids[inode])) c 528 drivers/md/bcache/writeback.c atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); c 593 drivers/md/bcache/writeback.c bch_refill_keybuf(dc->disk.c, buf, c 639 drivers/md/bcache/writeback.c bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred); c 649 drivers/md/bcache/writeback.c bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred); c 657 drivers/md/bcache/writeback.c struct cache_set *c = dc->disk.c; c 663 drivers/md/bcache/writeback.c !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { c 678 drivers/md/bcache/writeback.c test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { c 717 drivers/md/bcache/writeback.c if (c->gc_after_writeback == c 719 drivers/md/bcache/writeback.c c->gc_after_writeback &= ~BCH_DO_AUTO_GC; c 720 drivers/md/bcache/writeback.c force_wake_up_gc(c); c 733 drivers/md/bcache/writeback.c !test_bit(CACHE_SET_IO_DISABLE, &c->flags) && c 771 drivers/md/bcache/writeback.c bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), c 775 drivers/md/bcache/writeback.c if (atomic_read(&b->c->search_inflight) && c 795 drivers/md/bcache/writeback.c ret = bch_btree_map_keys(&op.op, d->c, &op.start, c 67 drivers/md/bcache/writeback.h unsigned int in_use = dc->disk.c->gc_stats.in_use; c 110 drivers/md/bcache/writeback.h void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, c 151 drivers/md/dm-bufio.c struct dm_bufio_client *c; c 165 drivers/md/dm-bufio.c static void dm_bufio_lock(struct dm_bufio_client *c) c 167 drivers/md/dm-bufio.c mutex_lock_nested(&c->lock, dm_bufio_in_request()); c 170 drivers/md/dm-bufio.c static int dm_bufio_trylock(struct dm_bufio_client *c) c 172 drivers/md/dm-bufio.c return mutex_trylock(&c->lock); c 175 drivers/md/dm-bufio.c static void dm_bufio_unlock(struct dm_bufio_client *c) c 177 drivers/md/dm-bufio.c mutex_unlock(&c->lock); c 248 drivers/md/dm-bufio.c static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) c 250 drivers/md/dm-bufio.c struct rb_node *n = c->buffer_tree.rb_node; c 265 drivers/md/dm-bufio.c static void __insert(struct dm_bufio_client *c, struct dm_buffer *b) c 267 drivers/md/dm-bufio.c struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL; c 284 drivers/md/dm-bufio.c rb_insert_color(&b->node, &c->buffer_tree); c 287 drivers/md/dm-bufio.c static void __remove(struct dm_bufio_client *c, struct dm_buffer *b) c 289 drivers/md/dm-bufio.c rb_erase(&b->node, &c->buffer_tree); c 306 drivers/md/dm-bufio.c diff = (long)b->c->block_size; c 375 drivers/md/dm-bufio.c static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, c 378 drivers/md/dm-bufio.c if (unlikely(c->slab_cache != NULL)) { c 380 drivers/md/dm-bufio.c return kmem_cache_alloc(c->slab_cache, gfp_mask); c 383 drivers/md/dm-bufio.c if (c->block_size <= KMALLOC_MAX_SIZE && c 387 drivers/md/dm-bufio.c c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); c 403 drivers/md/dm-bufio.c void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); c 409 drivers/md/dm-bufio.c return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); c 415 drivers/md/dm-bufio.c static void free_buffer_data(struct dm_bufio_client *c, c 420 drivers/md/dm-bufio.c kmem_cache_free(c->slab_cache, data); c 425 drivers/md/dm-bufio.c c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT)); c 442 drivers/md/dm-bufio.c static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) c 444 drivers/md/dm-bufio.c struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask); c 449 drivers/md/dm-bufio.c b->c = c; c 451 drivers/md/dm-bufio.c b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); c 453 drivers/md/dm-bufio.c kmem_cache_free(c->slab_buffer, b); c 468 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 470 drivers/md/dm-bufio.c free_buffer_data(c, b->data, b->data_mode); c 471 drivers/md/dm-bufio.c kmem_cache_free(c->slab_buffer, b); c 479 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 481 drivers/md/dm-bufio.c c->n_buffers[dirty]++; c 484 drivers/md/dm-bufio.c list_add(&b->lru_list, &c->lru[dirty]); c 485 drivers/md/dm-bufio.c __insert(b->c, b); c 496 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 498 drivers/md/dm-bufio.c BUG_ON(!c->n_buffers[b->list_mode]); c 500 drivers/md/dm-bufio.c c->n_buffers[b->list_mode]--; c 501 drivers/md/dm-bufio.c __remove(b->c, b); c 512 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 516 drivers/md/dm-bufio.c BUG_ON(!c->n_buffers[b->list_mode]); c 518 drivers/md/dm-bufio.c c->n_buffers[b->list_mode]--; c 519 drivers/md/dm-bufio.c c->n_buffers[dirty]++; c 521 drivers/md/dm-bufio.c list_move(&b->lru_list, &c->lru[dirty]); c 563 drivers/md/dm-bufio.c .client = b->c->dm_io, c 566 drivers/md/dm-bufio.c .bdev = b->c->bdev, c 599 drivers/md/dm-bufio.c vec_size = b->c->block_size >> PAGE_SHIFT; c 600 drivers/md/dm-bufio.c if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) c 611 drivers/md/dm-bufio.c bio_set_dev(bio, b->c->bdev); c 642 drivers/md/dm-bufio.c if (likely(b->c->sectors_per_block_bits >= 0)) c 643 drivers/md/dm-bufio.c sector = b->block << b->c->sectors_per_block_bits; c 645 drivers/md/dm-bufio.c sector = b->block * (b->c->block_size >> SECTOR_SHIFT); c 646 drivers/md/dm-bufio.c sector += b->c->start; c 649 drivers/md/dm-bufio.c n_sectors = b->c->block_size >> SECTOR_SHIFT; c 652 drivers/md/dm-bufio.c if (b->c->write_callback) c 653 drivers/md/dm-bufio.c b->c->write_callback(b); c 659 drivers/md/dm-bufio.c if (unlikely(end > b->c->block_size)) c 660 drivers/md/dm-bufio.c end = b->c->block_size; c 686 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 688 drivers/md/dm-bufio.c (void)cmpxchg(&c->async_write_error, 0, c 763 drivers/md/dm-bufio.c static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) c 767 drivers/md/dm-bufio.c list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { c 779 drivers/md/dm-bufio.c list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { c 800 drivers/md/dm-bufio.c static void __wait_for_free_buffer(struct dm_bufio_client *c) c 804 drivers/md/dm-bufio.c add_wait_queue(&c->free_buffer_wait, &wait); c 806 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 810 drivers/md/dm-bufio.c remove_wait_queue(&c->free_buffer_wait, &wait); c 812 drivers/md/dm-bufio.c dm_bufio_lock(c); c 828 drivers/md/dm-bufio.c static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) c 848 drivers/md/dm-bufio.c b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); c 857 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 858 drivers/md/dm-bufio.c b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); c 859 drivers/md/dm-bufio.c dm_bufio_lock(c); c 865 drivers/md/dm-bufio.c if (!list_empty(&c->reserved_buffers)) { c 866 drivers/md/dm-bufio.c b = list_entry(c->reserved_buffers.next, c 869 drivers/md/dm-bufio.c c->need_reserved_buffers++; c 874 drivers/md/dm-bufio.c b = __get_unclaimed_buffer(c); c 878 drivers/md/dm-bufio.c __wait_for_free_buffer(c); c 882 drivers/md/dm-bufio.c static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf) c 884 drivers/md/dm-bufio.c struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf); c 889 drivers/md/dm-bufio.c if (c->alloc_callback) c 890 drivers/md/dm-bufio.c c->alloc_callback(b); c 900 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 902 drivers/md/dm-bufio.c if (!c->need_reserved_buffers) c 905 drivers/md/dm-bufio.c list_add(&b->lru_list, &c->reserved_buffers); c 906 drivers/md/dm-bufio.c c->need_reserved_buffers--; c 909 drivers/md/dm-bufio.c wake_up(&c->free_buffer_wait); c 912 drivers/md/dm-bufio.c static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait, c 917 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { c 939 drivers/md/dm-bufio.c static void __check_watermark(struct dm_bufio_client *c, c 942 drivers/md/dm-bufio.c if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO) c 943 drivers/md/dm-bufio.c __write_dirty_buffers_async(c, 1, write_list); c 950 drivers/md/dm-bufio.c static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, c 958 drivers/md/dm-bufio.c b = __find(c, block); c 965 drivers/md/dm-bufio.c new_b = __alloc_buffer_wait(c, nf); c 973 drivers/md/dm-bufio.c b = __find(c, block); c 979 drivers/md/dm-bufio.c __check_watermark(c, write_list); c 1039 drivers/md/dm-bufio.c static void *new_read(struct dm_bufio_client *c, sector_t block, c 1047 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1048 drivers/md/dm-bufio.c b = __bufio_new(c, block, nf, &need_submit, &write_list); c 1053 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1078 drivers/md/dm-bufio.c void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, c 1081 drivers/md/dm-bufio.c return new_read(c, block, NF_GET, bp); c 1085 drivers/md/dm-bufio.c void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, c 1090 drivers/md/dm-bufio.c return new_read(c, block, NF_READ, bp); c 1094 drivers/md/dm-bufio.c void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, c 1099 drivers/md/dm-bufio.c return new_read(c, block, NF_FRESH, bp); c 1103 drivers/md/dm-bufio.c void dm_bufio_prefetch(struct dm_bufio_client *c, c 1113 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1118 drivers/md/dm-bufio.c b = __bufio_new(c, block, NF_PREFETCH, &need_submit, c 1121 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1125 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1128 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1138 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1142 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1151 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 1153 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1159 drivers/md/dm-bufio.c wake_up(&c->free_buffer_wait); c 1175 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1182 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 1185 drivers/md/dm-bufio.c BUG_ON(end > b->c->block_size); c 1187 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1202 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1208 drivers/md/dm-bufio.c dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); c 1212 drivers/md/dm-bufio.c void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c) c 1218 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1219 drivers/md/dm-bufio.c __write_dirty_buffers_async(c, 0, &write_list); c 1220 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1232 drivers/md/dm-bufio.c int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) c 1240 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1241 drivers/md/dm-bufio.c __write_dirty_buffers_async(c, 0, &write_list); c 1242 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1244 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1247 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { c 1250 drivers/md/dm-bufio.c if (buffers_processed < c->n_buffers[LIST_DIRTY]) c 1256 drivers/md/dm-bufio.c if (buffers_processed < c->n_buffers[LIST_DIRTY]) { c 1259 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1262 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1292 drivers/md/dm-bufio.c wake_up(&c->free_buffer_wait); c 1293 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1295 drivers/md/dm-bufio.c a = xchg(&c->async_write_error, 0); c 1296 drivers/md/dm-bufio.c f = dm_bufio_issue_flush(c); c 1307 drivers/md/dm-bufio.c int dm_bufio_issue_flush(struct dm_bufio_client *c) c 1314 drivers/md/dm-bufio.c .client = c->dm_io, c 1317 drivers/md/dm-bufio.c .bdev = c->bdev, c 1342 drivers/md/dm-bufio.c struct dm_bufio_client *c = b->c; c 1347 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1350 drivers/md/dm-bufio.c new = __find(c, new_block); c 1353 drivers/md/dm-bufio.c __wait_for_free_buffer(c); c 1375 drivers/md/dm-bufio.c b->dirty_end = c->block_size; c 1399 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1410 drivers/md/dm-bufio.c void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) c 1414 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1416 drivers/md/dm-bufio.c b = __find(c, block); c 1422 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1426 drivers/md/dm-bufio.c void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n) c 1428 drivers/md/dm-bufio.c c->minimum_buffers = n; c 1432 drivers/md/dm-bufio.c unsigned dm_bufio_get_block_size(struct dm_bufio_client *c) c 1434 drivers/md/dm-bufio.c return c->block_size; c 1438 drivers/md/dm-bufio.c sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) c 1440 drivers/md/dm-bufio.c sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT; c 1441 drivers/md/dm-bufio.c if (likely(c->sectors_per_block_bits >= 0)) c 1442 drivers/md/dm-bufio.c s >>= c->sectors_per_block_bits; c 1444 drivers/md/dm-bufio.c sector_div(s, c->block_size >> SECTOR_SHIFT); c 1469 drivers/md/dm-bufio.c return b->c; c 1473 drivers/md/dm-bufio.c static void drop_buffers(struct dm_bufio_client *c) c 1484 drivers/md/dm-bufio.c dm_bufio_write_dirty_buffers_async(c); c 1486 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1488 drivers/md/dm-bufio.c while ((b = __get_unclaimed_buffer(c))) c 1492 drivers/md/dm-bufio.c list_for_each_entry(b, &c->lru[i], lru_list) { c 1505 drivers/md/dm-bufio.c while ((b = __get_unclaimed_buffer(c))) c 1510 drivers/md/dm-bufio.c BUG_ON(!list_empty(&c->lru[i])); c 1512 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1542 drivers/md/dm-bufio.c static unsigned long get_retain_buffers(struct dm_bufio_client *c) c 1545 drivers/md/dm-bufio.c if (likely(c->sectors_per_block_bits >= 0)) c 1546 drivers/md/dm-bufio.c retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT; c 1548 drivers/md/dm-bufio.c retain_bytes /= c->block_size; c 1552 drivers/md/dm-bufio.c static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, c 1558 drivers/md/dm-bufio.c unsigned long count = c->n_buffers[LIST_CLEAN] + c 1559 drivers/md/dm-bufio.c c->n_buffers[LIST_DIRTY]; c 1560 drivers/md/dm-bufio.c unsigned long retain_target = get_retain_buffers(c); c 1563 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { c 1577 drivers/md/dm-bufio.c struct dm_bufio_client *c; c 1580 drivers/md/dm-bufio.c c = container_of(shrink, struct dm_bufio_client, shrinker); c 1582 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1583 drivers/md/dm-bufio.c else if (!dm_bufio_trylock(c)) c 1586 drivers/md/dm-bufio.c freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); c 1587 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1594 drivers/md/dm-bufio.c struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); c 1595 drivers/md/dm-bufio.c unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + c 1596 drivers/md/dm-bufio.c READ_ONCE(c->n_buffers[LIST_DIRTY]); c 1597 drivers/md/dm-bufio.c unsigned long retain_target = get_retain_buffers(c); c 1611 drivers/md/dm-bufio.c struct dm_bufio_client *c; c 1621 drivers/md/dm-bufio.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 1622 drivers/md/dm-bufio.c if (!c) { c 1626 drivers/md/dm-bufio.c c->buffer_tree = RB_ROOT; c 1628 drivers/md/dm-bufio.c c->bdev = bdev; c 1629 drivers/md/dm-bufio.c c->block_size = block_size; c 1631 drivers/md/dm-bufio.c c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; c 1633 drivers/md/dm-bufio.c c->sectors_per_block_bits = -1; c 1635 drivers/md/dm-bufio.c c->alloc_callback = alloc_callback; c 1636 drivers/md/dm-bufio.c c->write_callback = write_callback; c 1639 drivers/md/dm-bufio.c INIT_LIST_HEAD(&c->lru[i]); c 1640 drivers/md/dm-bufio.c c->n_buffers[i] = 0; c 1643 drivers/md/dm-bufio.c mutex_init(&c->lock); c 1644 drivers/md/dm-bufio.c INIT_LIST_HEAD(&c->reserved_buffers); c 1645 drivers/md/dm-bufio.c c->need_reserved_buffers = reserved_buffers; c 1647 drivers/md/dm-bufio.c dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS); c 1649 drivers/md/dm-bufio.c init_waitqueue_head(&c->free_buffer_wait); c 1650 drivers/md/dm-bufio.c c->async_write_error = 0; c 1652 drivers/md/dm-bufio.c c->dm_io = dm_io_client_create(); c 1653 drivers/md/dm-bufio.c if (IS_ERR(c->dm_io)) { c 1654 drivers/md/dm-bufio.c r = PTR_ERR(c->dm_io); c 1662 drivers/md/dm-bufio.c c->slab_cache = kmem_cache_create(slab_name, block_size, align, c 1664 drivers/md/dm-bufio.c if (!c->slab_cache) { c 1673 drivers/md/dm-bufio.c c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, c 1675 drivers/md/dm-bufio.c if (!c->slab_buffer) { c 1680 drivers/md/dm-bufio.c while (c->need_reserved_buffers) { c 1681 drivers/md/dm-bufio.c struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL); c 1690 drivers/md/dm-bufio.c c->shrinker.count_objects = dm_bufio_shrink_count; c 1691 drivers/md/dm-bufio.c c->shrinker.scan_objects = dm_bufio_shrink_scan; c 1692 drivers/md/dm-bufio.c c->shrinker.seeks = 1; c 1693 drivers/md/dm-bufio.c c->shrinker.batch = 0; c 1694 drivers/md/dm-bufio.c r = register_shrinker(&c->shrinker); c 1700 drivers/md/dm-bufio.c list_add(&c->client_list, &dm_bufio_all_clients); c 1704 drivers/md/dm-bufio.c return c; c 1707 drivers/md/dm-bufio.c while (!list_empty(&c->reserved_buffers)) { c 1708 drivers/md/dm-bufio.c struct dm_buffer *b = list_entry(c->reserved_buffers.next, c 1713 drivers/md/dm-bufio.c kmem_cache_destroy(c->slab_cache); c 1714 drivers/md/dm-bufio.c kmem_cache_destroy(c->slab_buffer); c 1715 drivers/md/dm-bufio.c dm_io_client_destroy(c->dm_io); c 1717 drivers/md/dm-bufio.c mutex_destroy(&c->lock); c 1718 drivers/md/dm-bufio.c kfree(c); c 1728 drivers/md/dm-bufio.c void dm_bufio_client_destroy(struct dm_bufio_client *c) c 1732 drivers/md/dm-bufio.c drop_buffers(c); c 1734 drivers/md/dm-bufio.c unregister_shrinker(&c->shrinker); c 1738 drivers/md/dm-bufio.c list_del(&c->client_list); c 1744 drivers/md/dm-bufio.c BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree)); c 1745 drivers/md/dm-bufio.c BUG_ON(c->need_reserved_buffers); c 1747 drivers/md/dm-bufio.c while (!list_empty(&c->reserved_buffers)) { c 1748 drivers/md/dm-bufio.c struct dm_buffer *b = list_entry(c->reserved_buffers.next, c 1755 drivers/md/dm-bufio.c if (c->n_buffers[i]) c 1756 drivers/md/dm-bufio.c DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]); c 1759 drivers/md/dm-bufio.c BUG_ON(c->n_buffers[i]); c 1761 drivers/md/dm-bufio.c kmem_cache_destroy(c->slab_cache); c 1762 drivers/md/dm-bufio.c kmem_cache_destroy(c->slab_buffer); c 1763 drivers/md/dm-bufio.c dm_io_client_destroy(c->dm_io); c 1764 drivers/md/dm-bufio.c mutex_destroy(&c->lock); c 1765 drivers/md/dm-bufio.c kfree(c); c 1769 drivers/md/dm-bufio.c void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) c 1771 drivers/md/dm-bufio.c c->start = start; c 1790 drivers/md/dm-bufio.c static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) c 1793 drivers/md/dm-bufio.c unsigned long retain_target = get_retain_buffers(c); c 1797 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1799 drivers/md/dm-bufio.c __check_watermark(c, &write_list); c 1801 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1803 drivers/md/dm-bufio.c dm_bufio_lock(c); c 1806 drivers/md/dm-bufio.c count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; c 1807 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { c 1820 drivers/md/dm-bufio.c dm_bufio_unlock(c); c 1859 drivers/md/dm-bufio.c current_client = b->c; c 1894 drivers/md/dm-bufio.c struct dm_bufio_client *c; c 1900 drivers/md/dm-bufio.c list_for_each_entry(c, &dm_bufio_all_clients, client_list) c 1901 drivers/md/dm-bufio.c __evict_old_buffers(c, max_age_hz); c 1154 drivers/md/dm-cache-metadata.c struct dm_bitset_cursor c; c 1166 drivers/md/dm-cache-metadata.c from_dblock(cmd->discard_nr_blocks), &c); c 1172 drivers/md/dm-cache-metadata.c dm_bitset_cursor_get_value(&c)); c 1179 drivers/md/dm-cache-metadata.c r = dm_bitset_cursor_next(&c); c 1184 drivers/md/dm-cache-metadata.c dm_bitset_cursor_end(&c); c 532 drivers/md/dm-clone-metadata.c struct dm_bitset_cursor c; c 539 drivers/md/dm-clone-metadata.c r = dm_bitset_cursor_begin(&cmd->bitset_info, cmd->bitset_root, cmd->nr_regions, &c); c 544 drivers/md/dm-clone-metadata.c if (dm_bitset_cursor_get_value(&c)) c 552 drivers/md/dm-clone-metadata.c r = dm_bitset_cursor_next(&c); c 558 drivers/md/dm-clone-metadata.c dm_bitset_cursor_end(&c); c 139 drivers/md/dm-delay.c static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv) c 149 drivers/md/dm-delay.c c->start = tmpll; c 151 drivers/md/dm-delay.c if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) { c 156 drivers/md/dm-delay.c ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev); c 243 drivers/md/dm-delay.c static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio) c 248 drivers/md/dm-delay.c if (!c->delay || !atomic_read(&dc->may_delay)) c 254 drivers/md/dm-delay.c delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay); c 257 drivers/md/dm-delay.c c->ops++; c 285 drivers/md/dm-delay.c struct delay_class *c; c 290 drivers/md/dm-delay.c c = &dc->flush; c 292 drivers/md/dm-delay.c c = &dc->write; c 294 drivers/md/dm-delay.c c = &dc->read; c 296 drivers/md/dm-delay.c delayed->class = c; c 297 drivers/md/dm-delay.c bio_set_dev(bio, c->dev->bdev); c 299 drivers/md/dm-delay.c bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector); c 301 drivers/md/dm-delay.c return delay_bio(dc, c, bio); c 304 drivers/md/dm-delay.c #define DMEMIT_DELAY_CLASS(c) \ c 305 drivers/md/dm-delay.c DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay) c 246 drivers/md/dm-snap.c struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); c 247 drivers/md/dm-snap.c INIT_HLIST_NODE(&c->node); c 252 drivers/md/dm-snap.c struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); c 253 drivers/md/dm-snap.c return !hlist_unhashed(&c->node); c 258 drivers/md/dm-snap.c struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); c 260 drivers/md/dm-snap.c c->chunk = chunk; c 263 drivers/md/dm-snap.c hlist_add_head(&c->node, c 270 drivers/md/dm-snap.c struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); c 274 drivers/md/dm-snap.c hlist_del(&c->node); c 280 drivers/md/dm-snap.c struct dm_snap_tracked_chunk *c; c 285 drivers/md/dm-snap.c hlist_for_each_entry(c, c 287 drivers/md/dm-snap.c if (c->chunk == chunk) { c 430 drivers/md/dm-writecache.c struct completion c; c 442 drivers/md/dm-writecache.c complete(&endio->c); c 457 drivers/md/dm-writecache.c COMPLETION_INITIALIZER_ONSTACK(endio.c), c 495 drivers/md/dm-writecache.c wait_for_completion_io(&endio.c); c 1345 drivers/md/dm-writecache.c struct copy_struct *c = ptr; c 1346 drivers/md/dm-writecache.c struct dm_writecache *wc = c->wc; c 1348 drivers/md/dm-writecache.c c->error = likely(!(read_err | write_err)) ? 0 : -EIO; c 1353 drivers/md/dm-writecache.c list_add_tail(&c->endio_entry, &wc->endio_list); c 1398 drivers/md/dm-writecache.c struct copy_struct *c; c 1402 drivers/md/dm-writecache.c c = list_entry(list->next, struct copy_struct, endio_entry); c 1403 drivers/md/dm-writecache.c list_del(&c->endio_entry); c 1405 drivers/md/dm-writecache.c if (unlikely(c->error)) c 1406 drivers/md/dm-writecache.c writecache_error(wc, c->error, "copy error"); c 1408 drivers/md/dm-writecache.c e = c->e; c 1419 drivers/md/dm-writecache.c } while (--c->n_entries); c 1420 drivers/md/dm-writecache.c mempool_free(c, &wc->copy_pool); c 1562 drivers/md/dm-writecache.c struct copy_struct *c; c 1580 drivers/md/dm-writecache.c c = mempool_alloc(&wc->copy_pool, GFP_NOIO); c 1581 drivers/md/dm-writecache.c c->wc = wc; c 1582 drivers/md/dm-writecache.c c->e = e; c 1583 drivers/md/dm-writecache.c c->n_entries = e->wc_list_contiguous; c 1593 drivers/md/dm-writecache.c dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); c 2463 drivers/md/dm.c struct bio *c; c 2471 drivers/md/dm.c c = bio_list_pop(&md->deferred); c 2474 drivers/md/dm.c if (!c) c 2478 drivers/md/dm.c (void) generic_make_request(c); c 2480 drivers/md/dm.c (void) dm_process_bio(md, map, c); c 901 drivers/md/persistent-data/dm-array.c static int load_ablock(struct dm_array_cursor *c) c 907 drivers/md/persistent-data/dm-array.c if (c->block) c 908 drivers/md/persistent-data/dm-array.c unlock_ablock(c->info, c->block); c 910 drivers/md/persistent-data/dm-array.c c->block = NULL; c 911 drivers/md/persistent-data/dm-array.c c->ab = NULL; c 912 drivers/md/persistent-data/dm-array.c c->index = 0; c 914 drivers/md/persistent-data/dm-array.c r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le); c 917 drivers/md/persistent-data/dm-array.c dm_btree_cursor_end(&c->cursor); c 920 drivers/md/persistent-data/dm-array.c r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab); c 923 drivers/md/persistent-data/dm-array.c dm_btree_cursor_end(&c->cursor); c 931 drivers/md/persistent-data/dm-array.c struct dm_array_cursor *c) c 935 drivers/md/persistent-data/dm-array.c memset(c, 0, sizeof(*c)); c 936 drivers/md/persistent-data/dm-array.c c->info = info; c 937 drivers/md/persistent-data/dm-array.c r = dm_btree_cursor_begin(&info->btree_info, root, true, &c->cursor); c 943 drivers/md/persistent-data/dm-array.c return load_ablock(c); c 947 drivers/md/persistent-data/dm-array.c void dm_array_cursor_end(struct dm_array_cursor *c) c 949 drivers/md/persistent-data/dm-array.c if (c->block) { c 950 drivers/md/persistent-data/dm-array.c unlock_ablock(c->info, c->block); c 951 drivers/md/persistent-data/dm-array.c dm_btree_cursor_end(&c->cursor); c 956 drivers/md/persistent-data/dm-array.c int dm_array_cursor_next(struct dm_array_cursor *c) c 960 drivers/md/persistent-data/dm-array.c if (!c->block) c 963 drivers/md/persistent-data/dm-array.c c->index++; c 965 drivers/md/persistent-data/dm-array.c if (c->index >= le32_to_cpu(c->ab->nr_entries)) { c 966 drivers/md/persistent-data/dm-array.c r = dm_btree_cursor_next(&c->cursor); c 970 drivers/md/persistent-data/dm-array.c r = load_ablock(c); c 979 drivers/md/persistent-data/dm-array.c int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count) c 984 drivers/md/persistent-data/dm-array.c uint32_t remaining = le32_to_cpu(c->ab->nr_entries) - c->index; c 987 drivers/md/persistent-data/dm-array.c c->index += count; c 992 drivers/md/persistent-data/dm-array.c r = dm_array_cursor_next(c); c 1000 drivers/md/persistent-data/dm-array.c void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le) c 1002 drivers/md/persistent-data/dm-array.c *value_le = element_at(c->info, c->ab, c->index); c 205 drivers/md/persistent-data/dm-array.h dm_block_t root, struct dm_array_cursor *c); c 206 drivers/md/persistent-data/dm-array.h void dm_array_cursor_end(struct dm_array_cursor *c); c 208 drivers/md/persistent-data/dm-array.h uint32_t dm_array_cursor_index(struct dm_array_cursor *c); c 209 drivers/md/persistent-data/dm-array.h int dm_array_cursor_next(struct dm_array_cursor *c); c 210 drivers/md/persistent-data/dm-array.h int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count); c 215 drivers/md/persistent-data/dm-array.h void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le); c 213 drivers/md/persistent-data/dm-bitset.c static int cursor_next_array_entry(struct dm_bitset_cursor *c) c 218 drivers/md/persistent-data/dm-bitset.c r = dm_array_cursor_next(&c->cursor); c 222 drivers/md/persistent-data/dm-bitset.c dm_array_cursor_get_value(&c->cursor, (void **) &value); c 223 drivers/md/persistent-data/dm-bitset.c c->array_index++; c 224 drivers/md/persistent-data/dm-bitset.c c->bit_index = 0; c 225 drivers/md/persistent-data/dm-bitset.c c->current_bits = le64_to_cpu(*value); c 231 drivers/md/persistent-data/dm-bitset.c struct dm_bitset_cursor *c) c 239 drivers/md/persistent-data/dm-bitset.c c->info = info; c 240 drivers/md/persistent-data/dm-bitset.c c->entries_remaining = nr_entries; c 242 drivers/md/persistent-data/dm-bitset.c r = dm_array_cursor_begin(&info->array_info, root, &c->cursor); c 246 drivers/md/persistent-data/dm-bitset.c dm_array_cursor_get_value(&c->cursor, (void **) &value); c 247 drivers/md/persistent-data/dm-bitset.c c->array_index = 0; c 248 drivers/md/persistent-data/dm-bitset.c c->bit_index = 0; c 249 drivers/md/persistent-data/dm-bitset.c c->current_bits = le64_to_cpu(*value); c 255 drivers/md/persistent-data/dm-bitset.c void dm_bitset_cursor_end(struct dm_bitset_cursor *c) c 257 drivers/md/persistent-data/dm-bitset.c return dm_array_cursor_end(&c->cursor); c 261 drivers/md/persistent-data/dm-bitset.c int dm_bitset_cursor_next(struct dm_bitset_cursor *c) c 265 drivers/md/persistent-data/dm-bitset.c if (!c->entries_remaining) c 268 drivers/md/persistent-data/dm-bitset.c c->entries_remaining--; c 269 drivers/md/persistent-data/dm-bitset.c if (++c->bit_index > 63) c 270 drivers/md/persistent-data/dm-bitset.c r = cursor_next_array_entry(c); c 276 drivers/md/persistent-data/dm-bitset.c int dm_bitset_cursor_skip(struct dm_bitset_cursor *c, uint32_t count) c 281 drivers/md/persistent-data/dm-bitset.c uint32_t remaining_in_word = 64 - c->bit_index; c 283 drivers/md/persistent-data/dm-bitset.c if (c->entries_remaining < count) c 287 drivers/md/persistent-data/dm-bitset.c c->bit_index += count; c 288 drivers/md/persistent-data/dm-bitset.c c->entries_remaining -= count; c 292 drivers/md/persistent-data/dm-bitset.c c->entries_remaining -= remaining_in_word; c 297 drivers/md/persistent-data/dm-bitset.c r = dm_array_cursor_skip(&c->cursor, nr_array_skip); c 301 drivers/md/persistent-data/dm-bitset.c dm_array_cursor_get_value(&c->cursor, (void **) &value); c 302 drivers/md/persistent-data/dm-bitset.c c->entries_remaining -= count; c 303 drivers/md/persistent-data/dm-bitset.c c->array_index += nr_array_skip; c 304 drivers/md/persistent-data/dm-bitset.c c->bit_index = count & 63; c 305 drivers/md/persistent-data/dm-bitset.c c->current_bits = le64_to_cpu(*value); c 311 drivers/md/persistent-data/dm-bitset.c bool dm_bitset_cursor_get_value(struct dm_bitset_cursor *c) c 313 drivers/md/persistent-data/dm-bitset.c return test_bit(c->bit_index, (unsigned long *) &c->current_bits); c 196 drivers/md/persistent-data/dm-bitset.h struct dm_bitset_cursor *c); c 197 drivers/md/persistent-data/dm-bitset.h void dm_bitset_cursor_end(struct dm_bitset_cursor *c); c 199 drivers/md/persistent-data/dm-bitset.h int dm_bitset_cursor_next(struct dm_bitset_cursor *c); c 200 drivers/md/persistent-data/dm-bitset.h int dm_bitset_cursor_skip(struct dm_bitset_cursor *c, uint32_t count); c 201 drivers/md/persistent-data/dm-bitset.h bool dm_bitset_cursor_get_value(struct dm_bitset_cursor *c); c 168 drivers/md/persistent-data/dm-btree-remove.c static void exit_child(struct dm_btree_info *info, struct child *c) c 170 drivers/md/persistent-data/dm-btree-remove.c dm_tm_unlock(info->tm, c->block); c 270 drivers/md/persistent-data/dm-btree-remove.c struct child *l, struct child *c, struct child *r, c 290 drivers/md/persistent-data/dm-btree-remove.c delete_at(parent, c->index); c 293 drivers/md/persistent-data/dm-btree-remove.c dm_tm_dec(info->tm, dm_block_location(c->block)); c 301 drivers/md/persistent-data/dm-btree-remove.c struct child *l, struct child *c, struct child *r, c 343 drivers/md/persistent-data/dm-btree-remove.c *key_ptr(parent, c->index) = center->keys[0]; c 348 drivers/md/persistent-data/dm-btree-remove.c struct child *l, struct child *c, struct child *r) c 351 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *center = c->n; c 364 drivers/md/persistent-data/dm-btree-remove.c delete_center_node(info, parent, l, c, r, left, center, right, c 367 drivers/md/persistent-data/dm-btree-remove.c redistribute3(info, parent, l, c, r, left, center, right, c 995 drivers/md/persistent-data/dm-btree.c static void prefetch_values(struct dm_btree_cursor *c) c 999 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth - 1; c 1001 drivers/md/persistent-data/dm-btree.c struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm); c 1003 drivers/md/persistent-data/dm-btree.c BUG_ON(c->info->value_type.size != sizeof(value_le)); c 1012 drivers/md/persistent-data/dm-btree.c static bool leaf_node(struct dm_btree_cursor *c) c 1014 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth - 1; c 1020 drivers/md/persistent-data/dm-btree.c static int push_node(struct dm_btree_cursor *c, dm_block_t b) c 1023 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth; c 1025 drivers/md/persistent-data/dm-btree.c if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) { c 1030 drivers/md/persistent-data/dm-btree.c r = bn_read_lock(c->info, b, &n->b); c 1035 drivers/md/persistent-data/dm-btree.c c->depth++; c 1037 drivers/md/persistent-data/dm-btree.c if (c->prefetch_leaves || !leaf_node(c)) c 1038 drivers/md/persistent-data/dm-btree.c prefetch_values(c); c 1043 drivers/md/persistent-data/dm-btree.c static void pop_node(struct dm_btree_cursor *c) c 1045 drivers/md/persistent-data/dm-btree.c c->depth--; c 1046 drivers/md/persistent-data/dm-btree.c unlock_block(c->info, c->nodes[c->depth].b); c 1049 drivers/md/persistent-data/dm-btree.c static int inc_or_backtrack(struct dm_btree_cursor *c) c 1055 drivers/md/persistent-data/dm-btree.c if (!c->depth) c 1058 drivers/md/persistent-data/dm-btree.c n = c->nodes + c->depth - 1; c 1065 drivers/md/persistent-data/dm-btree.c pop_node(c); c 1071 drivers/md/persistent-data/dm-btree.c static int find_leaf(struct dm_btree_cursor *c) c 1079 drivers/md/persistent-data/dm-btree.c n = c->nodes + c->depth - 1; c 1086 drivers/md/persistent-data/dm-btree.c r = push_node(c, le64_to_cpu(value_le)); c 1100 drivers/md/persistent-data/dm-btree.c bool prefetch_leaves, struct dm_btree_cursor *c) c 1104 drivers/md/persistent-data/dm-btree.c c->info = info; c 1105 drivers/md/persistent-data/dm-btree.c c->root = root; c 1106 drivers/md/persistent-data/dm-btree.c c->depth = 0; c 1107 drivers/md/persistent-data/dm-btree.c c->prefetch_leaves = prefetch_leaves; c 1109 drivers/md/persistent-data/dm-btree.c r = push_node(c, root); c 1113 drivers/md/persistent-data/dm-btree.c return find_leaf(c); c 1117 drivers/md/persistent-data/dm-btree.c void dm_btree_cursor_end(struct dm_btree_cursor *c) c 1119 drivers/md/persistent-data/dm-btree.c while (c->depth) c 1120 drivers/md/persistent-data/dm-btree.c pop_node(c); c 1124 drivers/md/persistent-data/dm-btree.c int dm_btree_cursor_next(struct dm_btree_cursor *c) c 1126 drivers/md/persistent-data/dm-btree.c int r = inc_or_backtrack(c); c 1128 drivers/md/persistent-data/dm-btree.c r = find_leaf(c); c 1137 drivers/md/persistent-data/dm-btree.c int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count) c 1142 drivers/md/persistent-data/dm-btree.c r = dm_btree_cursor_next(c); c 1148 drivers/md/persistent-data/dm-btree.c int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le) c 1150 drivers/md/persistent-data/dm-btree.c if (c->depth) { c 1151 drivers/md/persistent-data/dm-btree.c struct cursor_node *n = c->nodes + c->depth - 1; c 1158 drivers/md/persistent-data/dm-btree.c memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size); c 209 drivers/md/persistent-data/dm-btree.h bool prefetch_leaves, struct dm_btree_cursor *c); c 210 drivers/md/persistent-data/dm-btree.h void dm_btree_cursor_end(struct dm_btree_cursor *c); c 211 drivers/md/persistent-data/dm-btree.h int dm_btree_cursor_next(struct dm_btree_cursor *c); c 212 drivers/md/persistent-data/dm-btree.h int dm_btree_cursor_skip(struct dm_btree_cursor *c, uint32_t count); c 213 drivers/md/persistent-data/dm-btree.h int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le); c 82 drivers/md/raid0.c int i, c, err; c 99 drivers/md/raid0.c c = 0; c 129 drivers/md/raid0.c c = 1; c 135 drivers/md/raid0.c if (!c) { c 253 drivers/md/raid0.c c = 0; c 266 drivers/md/raid0.c bdevname(rdev->bdev, b), c); c 267 drivers/md/raid0.c dev[c] = rdev; c 268 drivers/md/raid0.c c++; c 277 drivers/md/raid0.c zone->nb_dev = c; c 278 drivers/md/raid0.c sectors = (smallest->sectors - zone->dev_start) * c; c 351 drivers/media/cec/cec-adap.c complete(&data->c); c 891 drivers/media/cec/cec-adap.c init_completion(&data->c); c 910 drivers/media/cec/cec-adap.c wait_for_completion_killable(&data->c); c 366 drivers/media/common/saa7146/saa7146_hlp.c x[i] = vv->ov.clips[i].c.left; c 367 drivers/media/common/saa7146/saa7146_hlp.c y[i] = vv->ov.clips[i].c.top; c 368 drivers/media/common/saa7146/saa7146_hlp.c w[i] = vv->ov.clips[i].c.width; c 369 drivers/media/common/saa7146/saa7146_hlp.c h[i] = vv->ov.clips[i].c.height; c 121 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 134 drivers/media/common/siano/smsdvb-main.c c->strength.len = 1; c 135 drivers/media/common/siano/smsdvb-main.c c->cnr.len = 1; c 136 drivers/media/common/siano/smsdvb-main.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 137 drivers/media/common/siano/smsdvb-main.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 140 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.len = n_layers; c 141 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.len = n_layers; c 142 drivers/media/common/siano/smsdvb-main.c c->block_error.len = n_layers; c 143 drivers/media/common/siano/smsdvb-main.c c->block_count.len = n_layers; c 150 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE; c 151 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE; c 152 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE; c 153 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE; c 216 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 218 drivers/media/common/siano/smsdvb-main.c c->frequency = p->frequency; c 220 drivers/media/common/siano/smsdvb-main.c c->bandwidth_hz = sms_to_bw(p->bandwidth); c 221 drivers/media/common/siano/smsdvb-main.c c->transmission_mode = sms_to_mode(p->transmission_mode); c 222 drivers/media/common/siano/smsdvb-main.c c->guard_interval = sms_to_guard_interval(p->guard_interval); c 223 drivers/media/common/siano/smsdvb-main.c c->code_rate_HP = sms_to_code_rate(p->code_rate); c 224 drivers/media/common/siano/smsdvb-main.c c->code_rate_LP = sms_to_code_rate(p->lp_code_rate); c 225 drivers/media/common/siano/smsdvb-main.c c->hierarchy = sms_to_hierarchy(p->hierarchy); c 226 drivers/media/common/siano/smsdvb-main.c c->modulation = sms_to_modulation(p->constellation); c 233 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 237 drivers/media/common/siano/smsdvb-main.c c->modulation = sms_to_modulation(p->constellation); c 240 drivers/media/common/siano/smsdvb-main.c c->strength.stat[0].uvalue = p->in_band_power * 1000; c 243 drivers/media/common/siano/smsdvb-main.c c->cnr.stat[0].svalue = p->snr * 1000; c 250 drivers/media/common/siano/smsdvb-main.c client->last_per = c->block_error.stat[0].uvalue; c 251 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 252 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 253 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].uvalue += p->ets_packets; c 254 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].uvalue += p->ets_packets + p->ts_packets; c 257 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 258 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 259 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].uvalue += p->ber_error_count; c 260 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].uvalue += p->ber_bit_count; c 273 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 281 drivers/media/common/siano/smsdvb-main.c c->frequency = p->frequency; c 283 drivers/media/common/siano/smsdvb-main.c c->bandwidth_hz = sms_to_bw(p->bandwidth); c 284 drivers/media/common/siano/smsdvb-main.c c->transmission_mode = sms_to_mode(p->transmission_mode); c 285 drivers/media/common/siano/smsdvb-main.c c->guard_interval = sms_to_guard_interval(p->guard_interval); c 286 drivers/media/common/siano/smsdvb-main.c c->code_rate_HP = sms_to_code_rate(p->code_rate); c 287 drivers/media/common/siano/smsdvb-main.c c->code_rate_LP = sms_to_code_rate(p->lp_code_rate); c 288 drivers/media/common/siano/smsdvb-main.c c->hierarchy = sms_to_hierarchy(p->hierarchy); c 289 drivers/media/common/siano/smsdvb-main.c c->modulation = sms_to_modulation(p->constellation); c 292 drivers/media/common/siano/smsdvb-main.c c->lna = p->is_external_lna_on ? 1 : 0; c 295 drivers/media/common/siano/smsdvb-main.c c->cnr.stat[0].svalue = p->SNR * 1000; c 298 drivers/media/common/siano/smsdvb-main.c c->strength.stat[0].uvalue = p->in_band_pwr * 1000; c 305 drivers/media/common/siano/smsdvb-main.c client->last_per = c->block_error.stat[0].uvalue; c 306 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 307 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 308 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].uvalue += p->error_ts_packets; c 309 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].uvalue += p->total_ts_packets; c 312 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 313 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 314 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].uvalue += p->ber_error_count; c 315 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].uvalue += p->ber_bit_count; c 325 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 340 drivers/media/common/siano/smsdvb-main.c c->strength.stat[0].uvalue = ((s32)p->transmission_mode) * 1000; c 341 drivers/media/common/siano/smsdvb-main.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 346 drivers/media/common/siano/smsdvb-main.c c->frequency = p->frequency; c 347 drivers/media/common/siano/smsdvb-main.c c->bandwidth_hz = sms_to_bw(p->bandwidth); c 348 drivers/media/common/siano/smsdvb-main.c c->transmission_mode = sms_to_mode(p->transmission_mode); c 349 drivers/media/common/siano/smsdvb-main.c c->guard_interval = sms_to_guard_interval(p->guard_interval); c 350 drivers/media/common/siano/smsdvb-main.c c->isdbt_partial_reception = p->partial_reception ? 1 : 0; c 356 drivers/media/common/siano/smsdvb-main.c c->isdbt_layer_enabled = 0; c 359 drivers/media/common/siano/smsdvb-main.c c->lna = p->is_external_lna_on ? 1 : 0; c 362 drivers/media/common/siano/smsdvb-main.c c->cnr.stat[0].svalue = p->SNR * 1000; c 365 drivers/media/common/siano/smsdvb-main.c c->strength.stat[0].uvalue = p->in_band_pwr * 1000; c 371 drivers/media/common/siano/smsdvb-main.c client->last_per = c->block_error.stat[0].uvalue; c 374 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].uvalue = 0; c 375 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].uvalue = 0; c 376 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 377 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 378 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].uvalue = 0; c 379 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].uvalue = 0; c 380 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 381 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 388 drivers/media/common/siano/smsdvb-main.c c->isdbt_layer_enabled |= 1 << i; c 389 drivers/media/common/siano/smsdvb-main.c c->layer[i].segment_count = lr->number_of_segments; c 393 drivers/media/common/siano/smsdvb-main.c c->layer[i].modulation = sms_to_modulation(lr->constellation); c 396 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[i + 1].scale = FE_SCALE_COUNTER; c 397 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[i + 1].scale = FE_SCALE_COUNTER; c 398 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[i + 1].uvalue += lr->error_ts_packets; c 399 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[i + 1].uvalue += lr->total_ts_packets; c 402 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].uvalue += lr->error_ts_packets; c 403 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].uvalue += lr->total_ts_packets; c 406 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[i + 1].scale = FE_SCALE_COUNTER; c 407 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[i + 1].scale = FE_SCALE_COUNTER; c 408 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[i + 1].uvalue += lr->ber_error_count; c 409 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[i + 1].uvalue += lr->ber_bit_count; c 412 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].uvalue += lr->ber_error_count; c 413 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].uvalue += lr->ber_bit_count; c 421 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 429 drivers/media/common/siano/smsdvb-main.c c->frequency = p->frequency; c 431 drivers/media/common/siano/smsdvb-main.c c->bandwidth_hz = sms_to_bw(p->bandwidth); c 432 drivers/media/common/siano/smsdvb-main.c c->transmission_mode = sms_to_mode(p->transmission_mode); c 433 drivers/media/common/siano/smsdvb-main.c c->guard_interval = sms_to_guard_interval(p->guard_interval); c 434 drivers/media/common/siano/smsdvb-main.c c->isdbt_partial_reception = p->partial_reception ? 1 : 0; c 440 drivers/media/common/siano/smsdvb-main.c c->isdbt_layer_enabled = 0; c 443 drivers/media/common/siano/smsdvb-main.c c->lna = p->is_external_lna_on ? 1 : 0; c 446 drivers/media/common/siano/smsdvb-main.c c->cnr.stat[0].svalue = p->SNR * 1000; c 449 drivers/media/common/siano/smsdvb-main.c c->strength.stat[0].uvalue = p->in_band_pwr * 1000; c 455 drivers/media/common/siano/smsdvb-main.c client->last_per = c->block_error.stat[0].uvalue; c 458 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].uvalue = 0; c 459 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].uvalue = 0; c 460 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 461 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 462 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].uvalue = 0; c 463 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].uvalue = 0; c 464 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 465 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 467 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.len = n_layers + 1; c 468 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.len = n_layers + 1; c 469 drivers/media/common/siano/smsdvb-main.c c->block_error.len = n_layers + 1; c 470 drivers/media/common/siano/smsdvb-main.c c->block_count.len = n_layers + 1; c 476 drivers/media/common/siano/smsdvb-main.c c->isdbt_layer_enabled |= 1 << i; c 477 drivers/media/common/siano/smsdvb-main.c c->layer[i].segment_count = lr->number_of_segments; c 481 drivers/media/common/siano/smsdvb-main.c c->layer[i].modulation = sms_to_modulation(lr->constellation); c 484 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[i + 1].scale = FE_SCALE_COUNTER; c 485 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[i + 1].scale = FE_SCALE_COUNTER; c 486 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[i + 1].uvalue += lr->error_ts_packets; c 487 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[i + 1].uvalue += lr->total_ts_packets; c 490 drivers/media/common/siano/smsdvb-main.c c->block_error.stat[0].uvalue += lr->error_ts_packets; c 491 drivers/media/common/siano/smsdvb-main.c c->block_count.stat[0].uvalue += lr->total_ts_packets; c 494 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[i + 1].scale = FE_SCALE_COUNTER; c 495 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[i + 1].scale = FE_SCALE_COUNTER; c 496 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[i + 1].uvalue += lr->ber_error_count; c 497 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[i + 1].uvalue += lr->ber_bit_count; c 500 drivers/media/common/siano/smsdvb-main.c c->post_bit_error.stat[0].uvalue += lr->ber_error_count; c 501 drivers/media/common/siano/smsdvb-main.c c->post_bit_count.stat[0].uvalue += lr->ber_bit_count; c 512 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 585 drivers/media/common/siano/smsdvb-main.c if (client->last_per == c->block_error.stat[0].uvalue) c 779 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 781 drivers/media/common/siano/smsdvb-main.c s32 power = (s32) c->strength.stat[0].uvalue; c 802 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 811 drivers/media/common/siano/smsdvb-main.c *snr = ((u32)c->cnr.stat[0].svalue) / 100; c 821 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 828 drivers/media/common/siano/smsdvb-main.c *ucblocks = c->block_error.stat[0].uvalue; c 848 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 869 drivers/media/common/siano/smsdvb-main.c msg.Data[0] = c->frequency; c 872 drivers/media/common/siano/smsdvb-main.c pr_debug("%s: freq %d band %d\n", __func__, c->frequency, c 873 drivers/media/common/siano/smsdvb-main.c c->bandwidth_hz); c 875 drivers/media/common/siano/smsdvb-main.c switch (c->bandwidth_hz / 1000000) { c 914 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 935 drivers/media/common/siano/smsdvb-main.c if (c->isdbt_sb_segment_idx == -1) c 936 drivers/media/common/siano/smsdvb-main.c c->isdbt_sb_segment_idx = 0; c 938 drivers/media/common/siano/smsdvb-main.c if (!c->isdbt_layer_enabled) c 939 drivers/media/common/siano/smsdvb-main.c c->isdbt_layer_enabled = 7; c 941 drivers/media/common/siano/smsdvb-main.c msg.Data[0] = c->frequency; c 944 drivers/media/common/siano/smsdvb-main.c msg.Data[3] = c->isdbt_sb_segment_idx; c 946 drivers/media/common/siano/smsdvb-main.c if (c->isdbt_partial_reception) { c 948 drivers/media/common/siano/smsdvb-main.c c->isdbt_sb_segment_count > 3) c 950 drivers/media/common/siano/smsdvb-main.c else if (c->isdbt_sb_segment_count > 1) c 955 drivers/media/common/siano/smsdvb-main.c c->bandwidth_hz = 6000000; c 958 drivers/media/common/siano/smsdvb-main.c c->frequency, c->isdbt_sb_segment_count, c 959 drivers/media/common/siano/smsdvb-main.c c->isdbt_sb_segment_idx); c 984 drivers/media/common/siano/smsdvb-main.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 990 drivers/media/common/siano/smsdvb-main.c c->strength.stat[0].uvalue = 0; c 991 drivers/media/common/siano/smsdvb-main.c c->cnr.stat[0].uvalue = 0; c 738 drivers/media/common/tveeprom.c int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len) c 744 drivers/media/common/tveeprom.c err = i2c_master_send(c, &buf, 1); c 749 drivers/media/common/tveeprom.c err = i2c_master_recv(c, eedata, len); c 1356 drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c int c; c 1384 drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c for (c = 0; c <= V4L2_COLORSPACE_DCI_P3; c++) { c 1389 drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c if (colorspaces[c] == 0) c 1396 drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c csc(c, x, &r, &g, &b); c 1399 drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c colorspace_names[c], c 2291 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c const struct v4l2_rect *c = &tpg->crop; c 2315 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (b->left >= c->left && c 2316 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b->left < c->left + c->width) c 2319 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (b->left + b->width > c->left && c 2320 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b->left + b->width <= c->left + c->width) c 2333 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c sq->left < c->left + c->width && c 2334 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c sq->left + sq->width >= c->left) { c 2338 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (c->left > left) { c 2339 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c width -= c->left - left; c 2340 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c left = c->left; c 2342 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (c->left + c->width < left + width) c 2343 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c width -= left + width - c->left - c->width; c 2344 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c left -= c->left; c 175 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c, c 179 drivers/media/dvb-core/dvb_frontend.c const struct dtv_frontend_properties *c, c 240 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 248 drivers/media/dvb-core/dvb_frontend.c dtv_get_frontend(fe, c, &fepriv->parameters_out); c 385 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; c 386 drivers/media/dvb-core/dvb_frontend.c int original_inversion = c->inversion; c 387 drivers/media/dvb-core/dvb_frontend.c u32 original_frequency = c->frequency; c 391 drivers/media/dvb-core/dvb_frontend.c (c->inversion == INVERSION_AUTO)); c 458 drivers/media/dvb-core/dvb_frontend.c c->frequency += fepriv->lnb_drift; c 460 drivers/media/dvb-core/dvb_frontend.c c->inversion = fepriv->inversion; c 461 drivers/media/dvb-core/dvb_frontend.c tmp = *c; c 464 drivers/media/dvb-core/dvb_frontend.c *c = tmp; c 470 drivers/media/dvb-core/dvb_frontend.c c->frequency = original_frequency; c 471 drivers/media/dvb-core/dvb_frontend.c c->inversion = original_inversion; c 482 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; c 494 drivers/media/dvb-core/dvb_frontend.c tmp = *c; c 497 drivers/media/dvb-core/dvb_frontend.c *c = tmp; c 527 drivers/media/dvb-core/dvb_frontend.c (c->inversion == INVERSION_AUTO)) { c 528 drivers/media/dvb-core/dvb_frontend.c c->inversion = fepriv->inversion; c 648 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 752 drivers/media/dvb-core/dvb_frontend.c dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out); c 891 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 915 drivers/media/dvb-core/dvb_frontend.c switch (c->delivery_system) { c 935 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 940 drivers/media/dvb-core/dvb_frontend.c switch (c->delivery_system) { c 956 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 962 drivers/media/dvb-core/dvb_frontend.c if ((freq_min && c->frequency < freq_min) || c 963 drivers/media/dvb-core/dvb_frontend.c (freq_max && c->frequency > freq_max)) { c 965 drivers/media/dvb-core/dvb_frontend.c fe->dvb->num, fe->id, c->frequency, c 971 drivers/media/dvb-core/dvb_frontend.c switch (c->delivery_system) { c 978 drivers/media/dvb-core/dvb_frontend.c c->symbol_rate < fe->ops.info.symbol_rate_min) || c 980 drivers/media/dvb-core/dvb_frontend.c c->symbol_rate > fe->ops.info.symbol_rate_max)) { c 982 drivers/media/dvb-core/dvb_frontend.c fe->dvb->num, fe->id, c->symbol_rate, c 996 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1000 drivers/media/dvb-core/dvb_frontend.c delsys = c->delivery_system; c 1001 drivers/media/dvb-core/dvb_frontend.c memset(c, 0, offsetof(struct dtv_frontend_properties, strength)); c 1002 drivers/media/dvb-core/dvb_frontend.c c->delivery_system = delsys; c 1005 drivers/media/dvb-core/dvb_frontend.c __func__, c->delivery_system); c 1007 drivers/media/dvb-core/dvb_frontend.c c->transmission_mode = TRANSMISSION_MODE_AUTO; c 1008 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 0; /* AUTO */ c 1009 drivers/media/dvb-core/dvb_frontend.c c->guard_interval = GUARD_INTERVAL_AUTO; c 1010 drivers/media/dvb-core/dvb_frontend.c c->hierarchy = HIERARCHY_AUTO; c 1011 drivers/media/dvb-core/dvb_frontend.c c->symbol_rate = 0; c 1012 drivers/media/dvb-core/dvb_frontend.c c->code_rate_HP = FEC_AUTO; c 1013 drivers/media/dvb-core/dvb_frontend.c c->code_rate_LP = FEC_AUTO; c 1014 drivers/media/dvb-core/dvb_frontend.c c->fec_inner = FEC_AUTO; c 1015 drivers/media/dvb-core/dvb_frontend.c c->rolloff = ROLLOFF_AUTO; c 1016 drivers/media/dvb-core/dvb_frontend.c c->voltage = SEC_VOLTAGE_OFF; c 1017 drivers/media/dvb-core/dvb_frontend.c c->sectone = SEC_TONE_OFF; c 1018 drivers/media/dvb-core/dvb_frontend.c c->pilot = PILOT_AUTO; c 1020 drivers/media/dvb-core/dvb_frontend.c c->isdbt_partial_reception = 0; c 1021 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_mode = 0; c 1022 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_subchannel = 0; c 1023 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_segment_idx = 0; c 1024 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_segment_count = 0; c 1025 drivers/media/dvb-core/dvb_frontend.c c->isdbt_layer_enabled = 7; /* All layers (A,B,C) */ c 1027 drivers/media/dvb-core/dvb_frontend.c c->layer[i].fec = FEC_AUTO; c 1028 drivers/media/dvb-core/dvb_frontend.c c->layer[i].modulation = QAM_AUTO; c 1029 drivers/media/dvb-core/dvb_frontend.c c->layer[i].interleaving = 0; c 1030 drivers/media/dvb-core/dvb_frontend.c c->layer[i].segment_count = 0; c 1033 drivers/media/dvb-core/dvb_frontend.c c->stream_id = NO_STREAM_ID_FILTER; c 1034 drivers/media/dvb-core/dvb_frontend.c c->scrambling_sequence_index = 0;/* default sequence */ c 1036 drivers/media/dvb-core/dvb_frontend.c switch (c->delivery_system) { c 1040 drivers/media/dvb-core/dvb_frontend.c c->modulation = QPSK; /* implied for DVB-S in legacy API */ c 1041 drivers/media/dvb-core/dvb_frontend.c c->rolloff = ROLLOFF_35;/* implied for DVB-S */ c 1044 drivers/media/dvb-core/dvb_frontend.c c->modulation = VSB_8; c 1047 drivers/media/dvb-core/dvb_frontend.c c->symbol_rate = 28860000; c 1048 drivers/media/dvb-core/dvb_frontend.c c->rolloff = ROLLOFF_35; c 1049 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = c->symbol_rate / 100 * 135; c 1052 drivers/media/dvb-core/dvb_frontend.c c->modulation = QAM_AUTO; c 1056 drivers/media/dvb-core/dvb_frontend.c c->lna = LNA_AUTO; c 1167 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c, c 1170 drivers/media/dvb-core/dvb_frontend.c c->frequency = p->frequency; c 1171 drivers/media/dvb-core/dvb_frontend.c c->inversion = p->inversion; c 1173 drivers/media/dvb-core/dvb_frontend.c switch (dvbv3_type(c->delivery_system)) { c 1176 drivers/media/dvb-core/dvb_frontend.c c->symbol_rate = p->u.qpsk.symbol_rate; c 1177 drivers/media/dvb-core/dvb_frontend.c c->fec_inner = p->u.qpsk.fec_inner; c 1181 drivers/media/dvb-core/dvb_frontend.c c->symbol_rate = p->u.qam.symbol_rate; c 1182 drivers/media/dvb-core/dvb_frontend.c c->fec_inner = p->u.qam.fec_inner; c 1183 drivers/media/dvb-core/dvb_frontend.c c->modulation = p->u.qam.modulation; c 1190 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 10000000; c 1193 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 8000000; c 1196 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 7000000; c 1199 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 6000000; c 1202 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 5000000; c 1205 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 1712000; c 1208 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 0; c 1211 drivers/media/dvb-core/dvb_frontend.c c->code_rate_HP = p->u.ofdm.code_rate_HP; c 1212 drivers/media/dvb-core/dvb_frontend.c c->code_rate_LP = p->u.ofdm.code_rate_LP; c 1213 drivers/media/dvb-core/dvb_frontend.c c->modulation = p->u.ofdm.constellation; c 1214 drivers/media/dvb-core/dvb_frontend.c c->transmission_mode = p->u.ofdm.transmission_mode; c 1215 drivers/media/dvb-core/dvb_frontend.c c->guard_interval = p->u.ofdm.guard_interval; c 1216 drivers/media/dvb-core/dvb_frontend.c c->hierarchy = p->u.ofdm.hierarchy_information; c 1220 drivers/media/dvb-core/dvb_frontend.c c->modulation = p->u.vsb.modulation; c 1221 drivers/media/dvb-core/dvb_frontend.c if (c->delivery_system == SYS_ATSCMH) c 1223 drivers/media/dvb-core/dvb_frontend.c if ((c->modulation == VSB_8) || (c->modulation == VSB_16)) c 1224 drivers/media/dvb-core/dvb_frontend.c c->delivery_system = SYS_ATSC; c 1226 drivers/media/dvb-core/dvb_frontend.c c->delivery_system = SYS_DVBC_ANNEX_B; c 1231 drivers/media/dvb-core/dvb_frontend.c __func__, c->delivery_system); c 1243 drivers/media/dvb-core/dvb_frontend.c const struct dtv_frontend_properties *c, c 1246 drivers/media/dvb-core/dvb_frontend.c p->frequency = c->frequency; c 1247 drivers/media/dvb-core/dvb_frontend.c p->inversion = c->inversion; c 1249 drivers/media/dvb-core/dvb_frontend.c switch (dvbv3_type(c->delivery_system)) { c 1253 drivers/media/dvb-core/dvb_frontend.c __func__, c->delivery_system); c 1257 drivers/media/dvb-core/dvb_frontend.c p->u.qpsk.symbol_rate = c->symbol_rate; c 1258 drivers/media/dvb-core/dvb_frontend.c p->u.qpsk.fec_inner = c->fec_inner; c 1262 drivers/media/dvb-core/dvb_frontend.c p->u.qam.symbol_rate = c->symbol_rate; c 1263 drivers/media/dvb-core/dvb_frontend.c p->u.qam.fec_inner = c->fec_inner; c 1264 drivers/media/dvb-core/dvb_frontend.c p->u.qam.modulation = c->modulation; c 1268 drivers/media/dvb-core/dvb_frontend.c switch (c->bandwidth_hz) { c 1291 drivers/media/dvb-core/dvb_frontend.c p->u.ofdm.code_rate_HP = c->code_rate_HP; c 1292 drivers/media/dvb-core/dvb_frontend.c p->u.ofdm.code_rate_LP = c->code_rate_LP; c 1293 drivers/media/dvb-core/dvb_frontend.c p->u.ofdm.constellation = c->modulation; c 1294 drivers/media/dvb-core/dvb_frontend.c p->u.ofdm.transmission_mode = c->transmission_mode; c 1295 drivers/media/dvb-core/dvb_frontend.c p->u.ofdm.guard_interval = c->guard_interval; c 1296 drivers/media/dvb-core/dvb_frontend.c p->u.ofdm.hierarchy_information = c->hierarchy; c 1300 drivers/media/dvb-core/dvb_frontend.c p->u.vsb.modulation = c->modulation; c 1317 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c, c 1323 drivers/media/dvb-core/dvb_frontend.c r = fe->ops.get_frontend(fe, c); c 1327 drivers/media/dvb-core/dvb_frontend.c dtv_property_legacy_params_sync(fe, c, p_out); c 1339 drivers/media/dvb-core/dvb_frontend.c const struct dtv_frontend_properties *c, c 1355 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->frequency; c 1358 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->modulation; c 1361 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->bandwidth_hz; c 1364 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->inversion; c 1367 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->symbol_rate; c 1370 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->fec_inner; c 1373 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->pilot; c 1376 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->rolloff; c 1379 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->delivery_system; c 1382 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->voltage; c 1385 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->sectone; c 1391 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->code_rate_HP; c 1394 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->code_rate_LP; c 1397 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->guard_interval; c 1400 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->transmission_mode; c 1403 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->hierarchy; c 1406 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->interleaving; c 1411 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->isdbt_partial_reception; c 1414 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->isdbt_sb_mode; c 1417 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->isdbt_sb_subchannel; c 1420 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->isdbt_sb_segment_idx; c 1423 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->isdbt_sb_segment_count; c 1426 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->isdbt_layer_enabled; c 1429 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[0].fec; c 1432 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[0].modulation; c 1435 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[0].segment_count; c 1438 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[0].interleaving; c 1441 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[1].fec; c 1444 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[1].modulation; c 1447 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[1].segment_count; c 1450 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[1].interleaving; c 1453 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[2].fec; c 1456 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[2].modulation; c 1459 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[2].segment_count; c 1462 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->layer[2].interleaving; c 1468 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->stream_id; c 1473 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->scrambling_sequence_index; c 1524 drivers/media/dvb-core/dvb_frontend.c tvp->u.data = c->lna; c 1529 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->strength; c 1532 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->cnr; c 1535 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->pre_bit_error; c 1538 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->pre_bit_count; c 1541 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->post_bit_error; c 1544 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->post_bit_count; c 1547 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->block_error; c 1550 drivers/media/dvb-core/dvb_frontend.c tvp->u.st = c->block_count; c 1596 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1598 drivers/media/dvb-core/dvb_frontend.c c->delivery_system = delsys; c 1603 drivers/media/dvb-core/dvb_frontend.c if (c->delivery_system == SYS_ISDBT) { c 1608 drivers/media/dvb-core/dvb_frontend.c if (!c->bandwidth_hz) c 1609 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 6000000; c 1611 drivers/media/dvb-core/dvb_frontend.c c->isdbt_partial_reception = 0; c 1612 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_mode = 0; c 1613 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_subchannel = 0; c 1614 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_segment_idx = 0; c 1615 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_segment_count = 0; c 1616 drivers/media/dvb-core/dvb_frontend.c c->isdbt_layer_enabled = 7; c 1618 drivers/media/dvb-core/dvb_frontend.c c->layer[i].fec = FEC_AUTO; c 1619 drivers/media/dvb-core/dvb_frontend.c c->layer[i].modulation = QAM_AUTO; c 1620 drivers/media/dvb-core/dvb_frontend.c c->layer[i].interleaving = 0; c 1621 drivers/media/dvb-core/dvb_frontend.c c->layer[i].segment_count = 0; c 1625 drivers/media/dvb-core/dvb_frontend.c __func__, c->delivery_system); c 1652 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1672 drivers/media/dvb-core/dvb_frontend.c c->delivery_system = desired_system; c 1755 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1758 drivers/media/dvb-core/dvb_frontend.c if (c->delivery_system == SYS_UNDEFINED) c 1759 drivers/media/dvb-core/dvb_frontend.c c->delivery_system = fe->ops.delsys[0]; c 1765 drivers/media/dvb-core/dvb_frontend.c if (is_dvbv3_delsys(c->delivery_system)) { c 1768 drivers/media/dvb-core/dvb_frontend.c __func__, c->delivery_system); c 1812 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1842 drivers/media/dvb-core/dvb_frontend.c c->frequency = data; c 1845 drivers/media/dvb-core/dvb_frontend.c c->modulation = data; c 1848 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = data; c 1851 drivers/media/dvb-core/dvb_frontend.c c->inversion = data; c 1854 drivers/media/dvb-core/dvb_frontend.c c->symbol_rate = data; c 1857 drivers/media/dvb-core/dvb_frontend.c c->fec_inner = data; c 1860 drivers/media/dvb-core/dvb_frontend.c c->pilot = data; c 1863 drivers/media/dvb-core/dvb_frontend.c c->rolloff = data; c 1869 drivers/media/dvb-core/dvb_frontend.c c->voltage = data; c 1871 drivers/media/dvb-core/dvb_frontend.c (void *)c->voltage); c 1874 drivers/media/dvb-core/dvb_frontend.c c->sectone = data; c 1876 drivers/media/dvb-core/dvb_frontend.c (void *)c->sectone); c 1879 drivers/media/dvb-core/dvb_frontend.c c->code_rate_HP = data; c 1882 drivers/media/dvb-core/dvb_frontend.c c->code_rate_LP = data; c 1885 drivers/media/dvb-core/dvb_frontend.c c->guard_interval = data; c 1888 drivers/media/dvb-core/dvb_frontend.c c->transmission_mode = data; c 1891 drivers/media/dvb-core/dvb_frontend.c c->hierarchy = data; c 1894 drivers/media/dvb-core/dvb_frontend.c c->interleaving = data; c 1899 drivers/media/dvb-core/dvb_frontend.c c->isdbt_partial_reception = data; c 1902 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_mode = data; c 1905 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_subchannel = data; c 1908 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_segment_idx = data; c 1911 drivers/media/dvb-core/dvb_frontend.c c->isdbt_sb_segment_count = data; c 1914 drivers/media/dvb-core/dvb_frontend.c c->isdbt_layer_enabled = data; c 1917 drivers/media/dvb-core/dvb_frontend.c c->layer[0].fec = data; c 1920 drivers/media/dvb-core/dvb_frontend.c c->layer[0].modulation = data; c 1923 drivers/media/dvb-core/dvb_frontend.c c->layer[0].segment_count = data; c 1926 drivers/media/dvb-core/dvb_frontend.c c->layer[0].interleaving = data; c 1929 drivers/media/dvb-core/dvb_frontend.c c->layer[1].fec = data; c 1932 drivers/media/dvb-core/dvb_frontend.c c->layer[1].modulation = data; c 1935 drivers/media/dvb-core/dvb_frontend.c c->layer[1].segment_count = data; c 1938 drivers/media/dvb-core/dvb_frontend.c c->layer[1].interleaving = data; c 1941 drivers/media/dvb-core/dvb_frontend.c c->layer[2].fec = data; c 1944 drivers/media/dvb-core/dvb_frontend.c c->layer[2].modulation = data; c 1947 drivers/media/dvb-core/dvb_frontend.c c->layer[2].segment_count = data; c 1950 drivers/media/dvb-core/dvb_frontend.c c->layer[2].interleaving = data; c 1956 drivers/media/dvb-core/dvb_frontend.c c->stream_id = data; c 1961 drivers/media/dvb-core/dvb_frontend.c c->scrambling_sequence_index = data; c 1973 drivers/media/dvb-core/dvb_frontend.c c->lna = data; c 1977 drivers/media/dvb-core/dvb_frontend.c c->lna = LNA_AUTO; c 2184 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 2196 drivers/media/dvb-core/dvb_frontend.c dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out); c 2217 drivers/media/dvb-core/dvb_frontend.c switch (c->delivery_system) { c 2220 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = 6000000; c 2234 drivers/media/dvb-core/dvb_frontend.c switch (c->rolloff) { c 2250 drivers/media/dvb-core/dvb_frontend.c c->bandwidth_hz = mult_frac(c->symbol_rate, rolloff, 100); c 2254 drivers/media/dvb-core/dvb_frontend.c c->inversion = INVERSION_AUTO; c 2260 drivers/media/dvb-core/dvb_frontend.c if (c->hierarchy == HIERARCHY_NONE && c->code_rate_LP == FEC_NONE) c 2261 drivers/media/dvb-core/dvb_frontend.c c->code_rate_LP = FEC_AUTO; c 2271 drivers/media/dvb-core/dvb_frontend.c switch (c->delivery_system) { c 2279 drivers/media/dvb-core/dvb_frontend.c fepriv->step_size = c->symbol_rate / 16000; c 2280 drivers/media/dvb-core/dvb_frontend.c fepriv->max_drift = c->symbol_rate / 2000; c 2395 drivers/media/dvb-core/dvb_frontend.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 2463 drivers/media/dvb-core/dvb_frontend.c switch (dvbv3_type(c->delivery_system)) { c 2479 drivers/media/dvb-core/dvb_frontend.c __func__, c->delivery_system); c 2483 drivers/media/dvb-core/dvb_frontend.c __func__, c->delivery_system, info->type); c 2687 drivers/media/dvb-core/dvb_frontend.c err = dtv_property_cache_sync(fe, c, parg); c 59 drivers/media/dvb-core/dvb_net.c static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt ) c 63 drivers/media/dvb-core/dvb_net.c c = crc32_be( c, iov[j].iov_base, iov[j].iov_len ); c 64 drivers/media/dvb-core/dvb_net.c return c; c 109 drivers/media/dvb-frontends/af9013.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 116 drivers/media/dvb-frontends/af9013.c c->frequency, c->bandwidth_hz); c 126 drivers/media/dvb-frontends/af9013.c if (c->bandwidth_hz != state->bandwidth_hz) { c 129 drivers/media/dvb-frontends/af9013.c coeff_lut[i].bandwidth_hz == c->bandwidth_hz) { c 147 drivers/media/dvb-frontends/af9013.c if (c->bandwidth_hz != state->bandwidth_hz || state->first_tune) { c 221 drivers/media/dvb-frontends/af9013.c switch (c->transmission_mode) { c 235 drivers/media/dvb-frontends/af9013.c switch (c->guard_interval) { c 255 drivers/media/dvb-frontends/af9013.c switch (c->hierarchy) { c 275 drivers/media/dvb-frontends/af9013.c switch (c->modulation) { c 295 drivers/media/dvb-frontends/af9013.c switch (c->code_rate_HP) { c 318 drivers/media/dvb-frontends/af9013.c switch (c->code_rate_LP) { c 343 drivers/media/dvb-frontends/af9013.c switch (c->bandwidth_hz) { c 387 drivers/media/dvb-frontends/af9013.c state->bandwidth_hz = c->bandwidth_hz; c 398 drivers/media/dvb-frontends/af9013.c struct dtv_frontend_properties *c) c 413 drivers/media/dvb-frontends/af9013.c c->modulation = QPSK; c 416 drivers/media/dvb-frontends/af9013.c c->modulation = QAM_16; c 419 drivers/media/dvb-frontends/af9013.c c->modulation = QAM_64; c 425 drivers/media/dvb-frontends/af9013.c c->transmission_mode = TRANSMISSION_MODE_2K; c 428 drivers/media/dvb-frontends/af9013.c c->transmission_mode = TRANSMISSION_MODE_8K; c 433 drivers/media/dvb-frontends/af9013.c c->guard_interval = GUARD_INTERVAL_1_32; c 436 drivers/media/dvb-frontends/af9013.c c->guard_interval = GUARD_INTERVAL_1_16; c 439 drivers/media/dvb-frontends/af9013.c c->guard_interval = GUARD_INTERVAL_1_8; c 442 drivers/media/dvb-frontends/af9013.c c->guard_interval = GUARD_INTERVAL_1_4; c 448 drivers/media/dvb-frontends/af9013.c c->hierarchy = HIERARCHY_NONE; c 451 drivers/media/dvb-frontends/af9013.c c->hierarchy = HIERARCHY_1; c 454 drivers/media/dvb-frontends/af9013.c c->hierarchy = HIERARCHY_2; c 457 drivers/media/dvb-frontends/af9013.c c->hierarchy = HIERARCHY_4; c 463 drivers/media/dvb-frontends/af9013.c c->code_rate_HP = FEC_1_2; c 466 drivers/media/dvb-frontends/af9013.c c->code_rate_HP = FEC_2_3; c 469 drivers/media/dvb-frontends/af9013.c c->code_rate_HP = FEC_3_4; c 472 drivers/media/dvb-frontends/af9013.c c->code_rate_HP = FEC_5_6; c 475 drivers/media/dvb-frontends/af9013.c c->code_rate_HP = FEC_7_8; c 481 drivers/media/dvb-frontends/af9013.c c->code_rate_LP = FEC_1_2; c 484 drivers/media/dvb-frontends/af9013.c c->code_rate_LP = FEC_2_3; c 487 drivers/media/dvb-frontends/af9013.c c->code_rate_LP = FEC_3_4; c 490 drivers/media/dvb-frontends/af9013.c c->code_rate_LP = FEC_5_6; c 493 drivers/media/dvb-frontends/af9013.c c->code_rate_LP = FEC_7_8; c 499 drivers/media/dvb-frontends/af9013.c c->bandwidth_hz = 6000000; c 502 drivers/media/dvb-frontends/af9013.c c->bandwidth_hz = 7000000; c 505 drivers/media/dvb-frontends/af9013.c c->bandwidth_hz = 8000000; c 519 drivers/media/dvb-frontends/af9013.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 629 drivers/media/dvb-frontends/af9013.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 630 drivers/media/dvb-frontends/af9013.c c->strength.stat[0].svalue = stmp1; c 633 drivers/media/dvb-frontends/af9013.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 711 drivers/media/dvb-frontends/af9013.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 712 drivers/media/dvb-frontends/af9013.c c->cnr.stat[0].svalue = utmp1; c 715 drivers/media/dvb-frontends/af9013.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 768 drivers/media/dvb-frontends/af9013.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 769 drivers/media/dvb-frontends/af9013.c c->post_bit_error.stat[0].uvalue += utmp1; c 770 drivers/media/dvb-frontends/af9013.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 771 drivers/media/dvb-frontends/af9013.c c->post_bit_count.stat[0].uvalue += utmp2; c 773 drivers/media/dvb-frontends/af9013.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 774 drivers/media/dvb-frontends/af9013.c c->block_error.stat[0].uvalue += utmp3; c 775 drivers/media/dvb-frontends/af9013.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 776 drivers/media/dvb-frontends/af9013.c c->block_count.stat[0].uvalue += utmp4; c 779 drivers/media/dvb-frontends/af9013.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 780 drivers/media/dvb-frontends/af9013.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 782 drivers/media/dvb-frontends/af9013.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 783 drivers/media/dvb-frontends/af9013.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1438 drivers/media/dvb-frontends/af9013.c struct dtv_frontend_properties *c; c 1519 drivers/media/dvb-frontends/af9013.c c = &state->fe.dtv_property_cache; c 1520 drivers/media/dvb-frontends/af9013.c c->strength.len = 1; c 1521 drivers/media/dvb-frontends/af9013.c c->cnr.len = 1; c 1522 drivers/media/dvb-frontends/af9013.c c->post_bit_error.len = 1; c 1523 drivers/media/dvb-frontends/af9013.c c->post_bit_count.len = 1; c 1524 drivers/media/dvb-frontends/af9013.c c->block_error.len = 1; c 1525 drivers/media/dvb-frontends/af9013.c c->block_count.len = 1; c 72 drivers/media/dvb-frontends/af9033.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 284 drivers/media/dvb-frontends/af9033.c c->strength.len = 1; c 285 drivers/media/dvb-frontends/af9033.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 286 drivers/media/dvb-frontends/af9033.c c->cnr.len = 1; c 287 drivers/media/dvb-frontends/af9033.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 288 drivers/media/dvb-frontends/af9033.c c->block_count.len = 1; c 289 drivers/media/dvb-frontends/af9033.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 290 drivers/media/dvb-frontends/af9033.c c->block_error.len = 1; c 291 drivers/media/dvb-frontends/af9033.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 292 drivers/media/dvb-frontends/af9033.c c->post_bit_count.len = 1; c 293 drivers/media/dvb-frontends/af9033.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 294 drivers/media/dvb-frontends/af9033.c c->post_bit_error.len = 1; c 295 drivers/media/dvb-frontends/af9033.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 358 drivers/media/dvb-frontends/af9033.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 365 drivers/media/dvb-frontends/af9033.c c->frequency, c->bandwidth_hz); c 368 drivers/media/dvb-frontends/af9033.c switch (c->bandwidth_hz) { c 389 drivers/media/dvb-frontends/af9033.c if (c->bandwidth_hz != dev->bandwidth_hz) { c 392 drivers/media/dvb-frontends/af9033.c coeff_lut[i].bandwidth_hz == c->bandwidth_hz) { c 411 drivers/media/dvb-frontends/af9033.c if (c->bandwidth_hz != dev->bandwidth_hz) { c 449 drivers/media/dvb-frontends/af9033.c dev->bandwidth_hz = c->bandwidth_hz; c 466 drivers/media/dvb-frontends/af9033.c if (c->frequency <= 230000000) c 486 drivers/media/dvb-frontends/af9033.c struct dtv_frontend_properties *c) c 502 drivers/media/dvb-frontends/af9033.c c->transmission_mode = TRANSMISSION_MODE_2K; c 505 drivers/media/dvb-frontends/af9033.c c->transmission_mode = TRANSMISSION_MODE_8K; c 511 drivers/media/dvb-frontends/af9033.c c->guard_interval = GUARD_INTERVAL_1_32; c 514 drivers/media/dvb-frontends/af9033.c c->guard_interval = GUARD_INTERVAL_1_16; c 517 drivers/media/dvb-frontends/af9033.c c->guard_interval = GUARD_INTERVAL_1_8; c 520 drivers/media/dvb-frontends/af9033.c c->guard_interval = GUARD_INTERVAL_1_4; c 526 drivers/media/dvb-frontends/af9033.c c->hierarchy = HIERARCHY_NONE; c 529 drivers/media/dvb-frontends/af9033.c c->hierarchy = HIERARCHY_1; c 532 drivers/media/dvb-frontends/af9033.c c->hierarchy = HIERARCHY_2; c 535 drivers/media/dvb-frontends/af9033.c c->hierarchy = HIERARCHY_4; c 541 drivers/media/dvb-frontends/af9033.c c->modulation = QPSK; c 544 drivers/media/dvb-frontends/af9033.c c->modulation = QAM_16; c 547 drivers/media/dvb-frontends/af9033.c c->modulation = QAM_64; c 553 drivers/media/dvb-frontends/af9033.c c->bandwidth_hz = 6000000; c 556 drivers/media/dvb-frontends/af9033.c c->bandwidth_hz = 7000000; c 559 drivers/media/dvb-frontends/af9033.c c->bandwidth_hz = 8000000; c 565 drivers/media/dvb-frontends/af9033.c c->code_rate_HP = FEC_1_2; c 568 drivers/media/dvb-frontends/af9033.c c->code_rate_HP = FEC_2_3; c 571 drivers/media/dvb-frontends/af9033.c c->code_rate_HP = FEC_3_4; c 574 drivers/media/dvb-frontends/af9033.c c->code_rate_HP = FEC_5_6; c 577 drivers/media/dvb-frontends/af9033.c c->code_rate_HP = FEC_7_8; c 580 drivers/media/dvb-frontends/af9033.c c->code_rate_HP = FEC_NONE; c 586 drivers/media/dvb-frontends/af9033.c c->code_rate_LP = FEC_1_2; c 589 drivers/media/dvb-frontends/af9033.c c->code_rate_LP = FEC_2_3; c 592 drivers/media/dvb-frontends/af9033.c c->code_rate_LP = FEC_3_4; c 595 drivers/media/dvb-frontends/af9033.c c->code_rate_LP = FEC_5_6; c 598 drivers/media/dvb-frontends/af9033.c c->code_rate_LP = FEC_7_8; c 601 drivers/media/dvb-frontends/af9033.c c->code_rate_LP = FEC_NONE; c 615 drivers/media/dvb-frontends/af9033.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 670 drivers/media/dvb-frontends/af9033.c c->strength.len = 1; c 671 drivers/media/dvb-frontends/af9033.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 672 drivers/media/dvb-frontends/af9033.c c->strength.stat[0].svalue = tmp; c 674 drivers/media/dvb-frontends/af9033.c c->strength.len = 1; c 675 drivers/media/dvb-frontends/af9033.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 764 drivers/media/dvb-frontends/af9033.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 765 drivers/media/dvb-frontends/af9033.c c->cnr.stat[0].svalue = utmp1; c 767 drivers/media/dvb-frontends/af9033.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 795 drivers/media/dvb-frontends/af9033.c c->block_count.len = 1; c 796 drivers/media/dvb-frontends/af9033.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 797 drivers/media/dvb-frontends/af9033.c c->block_count.stat[0].uvalue = dev->total_block_count; c 799 drivers/media/dvb-frontends/af9033.c c->block_error.len = 1; c 800 drivers/media/dvb-frontends/af9033.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 801 drivers/media/dvb-frontends/af9033.c c->block_error.stat[0].uvalue = dev->error_block_count; c 803 drivers/media/dvb-frontends/af9033.c c->post_bit_count.len = 1; c 804 drivers/media/dvb-frontends/af9033.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 805 drivers/media/dvb-frontends/af9033.c c->post_bit_count.stat[0].uvalue = dev->post_bit_count; c 807 drivers/media/dvb-frontends/af9033.c c->post_bit_error.len = 1; c 808 drivers/media/dvb-frontends/af9033.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 809 drivers/media/dvb-frontends/af9033.c c->post_bit_error.stat[0].uvalue = dev->post_bit_error; c 822 drivers/media/dvb-frontends/af9033.c struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; c 829 drivers/media/dvb-frontends/af9033.c if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) { c 833 drivers/media/dvb-frontends/af9033.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 836 drivers/media/dvb-frontends/af9033.c *snr = div_s64(c->cnr.stat[0].svalue, 1000); c 872 drivers/media/dvb-frontends/af9033.c struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; c 896 drivers/media/dvb-frontends/af9033.c if (c->frequency <= 300000000) c 28 drivers/media/dvb-frontends/as102_fe.c uint8_t c; c 32 drivers/media/dvb-frontends/as102_fe.c c = CODE_RATE_1_2; c 35 drivers/media/dvb-frontends/as102_fe.c c = CODE_RATE_2_3; c 38 drivers/media/dvb-frontends/as102_fe.c c = CODE_RATE_3_4; c 41 drivers/media/dvb-frontends/as102_fe.c c = CODE_RATE_5_6; c 44 drivers/media/dvb-frontends/as102_fe.c c = CODE_RATE_7_8; c 47 drivers/media/dvb-frontends/as102_fe.c c = CODE_RATE_UNKNOWN; c 51 drivers/media/dvb-frontends/as102_fe.c return c; c 57 drivers/media/dvb-frontends/as102_fe.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 61 drivers/media/dvb-frontends/as102_fe.c tune_args.freq = c->frequency / 1000; c 66 drivers/media/dvb-frontends/as102_fe.c switch (c->bandwidth_hz) { c 80 drivers/media/dvb-frontends/as102_fe.c switch (c->guard_interval) { c 99 drivers/media/dvb-frontends/as102_fe.c switch (c->modulation) { c 114 drivers/media/dvb-frontends/as102_fe.c switch (c->transmission_mode) { c 125 drivers/media/dvb-frontends/as102_fe.c switch (c->hierarchy) { c 144 drivers/media/dvb-frontends/as102_fe.c c->frequency, c 153 drivers/media/dvb-frontends/as102_fe.c ((c->code_rate_LP == FEC_NONE) || c 154 drivers/media/dvb-frontends/as102_fe.c (c->code_rate_HP == FEC_NONE))) { c 156 drivers/media/dvb-frontends/as102_fe.c if (c->code_rate_LP == FEC_NONE) { c 159 drivers/media/dvb-frontends/as102_fe.c as102_fe_get_code_rate(c->code_rate_HP); c 162 drivers/media/dvb-frontends/as102_fe.c if (c->code_rate_HP == FEC_NONE) { c 165 drivers/media/dvb-frontends/as102_fe.c as102_fe_get_code_rate(c->code_rate_LP); c 177 drivers/media/dvb-frontends/as102_fe.c as102_fe_get_code_rate(c->code_rate_HP); c 185 drivers/media/dvb-frontends/as102_fe.c struct dtv_frontend_properties *c) c 199 drivers/media/dvb-frontends/as102_fe.c c->modulation = QPSK; c 202 drivers/media/dvb-frontends/as102_fe.c c->modulation = QAM_16; c 205 drivers/media/dvb-frontends/as102_fe.c c->modulation = QAM_64; c 212 drivers/media/dvb-frontends/as102_fe.c c->hierarchy = HIERARCHY_NONE; c 215 drivers/media/dvb-frontends/as102_fe.c c->hierarchy = HIERARCHY_1; c 218 drivers/media/dvb-frontends/as102_fe.c c->hierarchy = HIERARCHY_2; c 221 drivers/media/dvb-frontends/as102_fe.c c->hierarchy = HIERARCHY_4; c 228 drivers/media/dvb-frontends/as102_fe.c c->code_rate_HP = FEC_1_2; c 231 drivers/media/dvb-frontends/as102_fe.c c->code_rate_HP = FEC_2_3; c 234 drivers/media/dvb-frontends/as102_fe.c c->code_rate_HP = FEC_3_4; c 237 drivers/media/dvb-frontends/as102_fe.c c->code_rate_HP = FEC_5_6; c 240 drivers/media/dvb-frontends/as102_fe.c c->code_rate_HP = FEC_7_8; c 247 drivers/media/dvb-frontends/as102_fe.c c->code_rate_LP = FEC_1_2; c 250 drivers/media/dvb-frontends/as102_fe.c c->code_rate_LP = FEC_2_3; c 253 drivers/media/dvb-frontends/as102_fe.c c->code_rate_LP = FEC_3_4; c 256 drivers/media/dvb-frontends/as102_fe.c c->code_rate_LP = FEC_5_6; c 259 drivers/media/dvb-frontends/as102_fe.c c->code_rate_LP = FEC_7_8; c 266 drivers/media/dvb-frontends/as102_fe.c c->guard_interval = GUARD_INTERVAL_1_32; c 269 drivers/media/dvb-frontends/as102_fe.c c->guard_interval = GUARD_INTERVAL_1_16; c 272 drivers/media/dvb-frontends/as102_fe.c c->guard_interval = GUARD_INTERVAL_1_8; c 275 drivers/media/dvb-frontends/as102_fe.c c->guard_interval = GUARD_INTERVAL_1_4; c 282 drivers/media/dvb-frontends/as102_fe.c c->transmission_mode = TRANSMISSION_MODE_2K; c 285 drivers/media/dvb-frontends/as102_fe.c c->transmission_mode = TRANSMISSION_MODE_8K; c 288 drivers/media/dvb-frontends/atbm8830.c struct dtv_frontend_properties *c) c 294 drivers/media/dvb-frontends/atbm8830.c c->inversion = INVERSION_OFF; c 297 drivers/media/dvb-frontends/atbm8830.c c->bandwidth_hz = 8000000; c 299 drivers/media/dvb-frontends/atbm8830.c c->code_rate_HP = FEC_AUTO; c 300 drivers/media/dvb-frontends/atbm8830.c c->code_rate_LP = FEC_AUTO; c 302 drivers/media/dvb-frontends/atbm8830.c c->modulation = QAM_AUTO; c 305 drivers/media/dvb-frontends/atbm8830.c c->transmission_mode = TRANSMISSION_MODE_AUTO; c 308 drivers/media/dvb-frontends/atbm8830.c c->guard_interval = GUARD_INTERVAL_AUTO; c 311 drivers/media/dvb-frontends/atbm8830.c c->hierarchy = HIERARCHY_NONE; c 748 drivers/media/dvb-frontends/au8522_decoder.c state->c = client; c 598 drivers/media/dvb-frontends/au8522_dig.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 602 drivers/media/dvb-frontends/au8522_dig.c dprintk("%s(frequency=%d)\n", __func__, c->frequency); c 604 drivers/media/dvb-frontends/au8522_dig.c if ((state->current_frequency == c->frequency) && c 605 drivers/media/dvb-frontends/au8522_dig.c (state->current_modulation == c->modulation)) c 627 drivers/media/dvb-frontends/au8522_dig.c au8522_enable_modulation(fe, c->modulation); c 629 drivers/media/dvb-frontends/au8522_dig.c state->current_frequency = c->frequency; c 807 drivers/media/dvb-frontends/au8522_dig.c struct dtv_frontend_properties *c) c 811 drivers/media/dvb-frontends/au8522_dig.c c->frequency = state->current_frequency; c 812 drivers/media/dvb-frontends/au8522_dig.c c->modulation = state->current_modulation; c 39 drivers/media/dvb-frontends/au8522_priv.h struct i2c_client *c; c 365 drivers/media/dvb-frontends/bcm3510.c struct bcm3510_hab_cmd_tune c; c 366 drivers/media/dvb-frontends/bcm3510.c memset(&c,0,sizeof(struct bcm3510_hab_cmd_tune)); c 369 drivers/media/dvb-frontends/bcm3510.c c.length = 0x10; c 370 drivers/media/dvb-frontends/bcm3510.c c.clock_width = 0; c 373 drivers/media/dvb-frontends/bcm3510.c c.misc = 0x10; c 375 drivers/media/dvb-frontends/bcm3510.c c.TUNCTL_state = 0x40; c 378 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[0].ctrl.size = BITS_8; c 379 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[0].data = 0x80 | bc; c 382 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[1].ctrl.size = BITS_8; c 383 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[1].data = 4; c 386 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[2].ctrl.size = BITS_3; c 387 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[2].data = 0x20; c 390 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[3].ctrl.size = BITS_3; c 391 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[3].ctrl.clk_off = 1; c 392 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[3].ctrl.cs0 = 1; c 393 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[3].data = 0x40; c 396 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[4].ctrl.size = BITS_8; c 397 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[4].data = n >> 3; c 400 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[5].ctrl.size = BITS_8; c 401 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[5].data = ((n & 0x7) << 5) | (a >> 2); c 404 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[6].ctrl.size = BITS_3; c 405 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[6].data = (a << 6) & 0xdf; c 408 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[7].ctrl.size = BITS_3; c 409 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[7].ctrl.clk_off = 1; c 410 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[7].ctrl.cs0 = 1; c 411 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[7].data = 0x40; c 414 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[8].ctrl.size = BITS_8; c 415 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[8].data = 0x80; c 418 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[9].ctrl.size = BITS_8; c 419 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[9].data = 0x10; c 422 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[10].ctrl.size = BITS_3; c 423 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[10].data = 0x20; c 426 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[11].ctrl.size = BITS_3; c 427 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[11].ctrl.clk_off = 1; c 428 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[11].ctrl.cs1 = 1; c 429 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[11].data = 0x40; c 432 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[12].ctrl.size = BITS_8; c 433 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[12].data = 0x2a; c 436 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[13].ctrl.size = BITS_8; c 437 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[13].data = 0x8e; c 440 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[14].ctrl.size = BITS_3; c 441 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[14].data = 0; c 444 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[15].ctrl.size = BITS_3; c 445 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[15].ctrl.clk_off = 1; c 446 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[15].ctrl.cs1 = 1; c 447 drivers/media/dvb-frontends/bcm3510.c c.ctl_dat[15].data = 0x40; c 449 drivers/media/dvb-frontends/bcm3510.c return bcm3510_do_hab_cmd(st,CMD_TUNE, MSGID_TUNE,(u8 *) &c,sizeof(c), NULL, 0); c 497 drivers/media/dvb-frontends/bcm3510.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 504 drivers/media/dvb-frontends/bcm3510.c switch (c->modulation) { c 571 drivers/media/dvb-frontends/bcm3510.c ret = bcm3510_set_freq(st, c->frequency); c 763 drivers/media/dvb-frontends/bcm3510.c struct bcm3510_hab_cmd_set_agc c; c 785 drivers/media/dvb-frontends/bcm3510.c memset(&c,0,1); c 786 drivers/media/dvb-frontends/bcm3510.c c.SEL = 1; c 787 drivers/media/dvb-frontends/bcm3510.c bcm3510_do_hab_cmd(st,CMD_AUTO_PARAM,MSGID_SET_RF_AGC_SEL,(u8 *)&c,sizeof(c),NULL,0); c 317 drivers/media/dvb-frontends/cx22700.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 328 drivers/media/dvb-frontends/cx22700.c cx22700_set_inversion(state, c->inversion); c 329 drivers/media/dvb-frontends/cx22700.c cx22700_set_tps(state, c); c 337 drivers/media/dvb-frontends/cx22700.c struct dtv_frontend_properties *c) c 342 drivers/media/dvb-frontends/cx22700.c c->inversion = reg09 & 0x1 ? INVERSION_ON : INVERSION_OFF; c 343 drivers/media/dvb-frontends/cx22700.c return cx22700_get_tps(state, c); c 554 drivers/media/dvb-frontends/cx22702.c struct dtv_frontend_properties *c) c 560 drivers/media/dvb-frontends/cx22702.c c->inversion = reg0C & 0x1 ? INVERSION_ON : INVERSION_OFF; c 561 drivers/media/dvb-frontends/cx22702.c return cx22702_get_tps(state, c); c 467 drivers/media/dvb-frontends/cx24113.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 473 drivers/media/dvb-frontends/cx24113.c bw = ((c->symbol_rate/100) * roll_off) / 1000; c 479 drivers/media/dvb-frontends/cx24113.c cx24113_set_frequency(state, c->frequency); c 1205 drivers/media/dvb-frontends/cx24116.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1212 drivers/media/dvb-frontends/cx24116.c switch (c->delivery_system) { c 1217 drivers/media/dvb-frontends/cx24116.c if (c->modulation != QPSK) { c 1219 drivers/media/dvb-frontends/cx24116.c __func__, c->modulation); c 1227 drivers/media/dvb-frontends/cx24116.c if (c->rolloff != ROLLOFF_35) { c 1229 drivers/media/dvb-frontends/cx24116.c __func__, c->rolloff); c 1242 drivers/media/dvb-frontends/cx24116.c if (c->modulation != PSK_8 && c->modulation != QPSK) { c 1244 drivers/media/dvb-frontends/cx24116.c __func__, c->modulation); c 1248 drivers/media/dvb-frontends/cx24116.c switch (c->pilot) { c 1250 drivers/media/dvb-frontends/cx24116.c state->dnxt.pilot_val = (c->modulation == QPSK) c 1262 drivers/media/dvb-frontends/cx24116.c __func__, c->pilot); c 1266 drivers/media/dvb-frontends/cx24116.c switch (c->rolloff) { c 1279 drivers/media/dvb-frontends/cx24116.c __func__, c->rolloff); c 1286 drivers/media/dvb-frontends/cx24116.c __func__, c->delivery_system); c 1289 drivers/media/dvb-frontends/cx24116.c state->dnxt.delsys = c->delivery_system; c 1290 drivers/media/dvb-frontends/cx24116.c state->dnxt.modulation = c->modulation; c 1291 drivers/media/dvb-frontends/cx24116.c state->dnxt.frequency = c->frequency; c 1292 drivers/media/dvb-frontends/cx24116.c state->dnxt.pilot = c->pilot; c 1293 drivers/media/dvb-frontends/cx24116.c state->dnxt.rolloff = c->rolloff; c 1295 drivers/media/dvb-frontends/cx24116.c ret = cx24116_set_inversion(state, c->inversion); c 1300 drivers/media/dvb-frontends/cx24116.c ret = cx24116_set_fec(state, c->delivery_system, c->modulation, c->fec_inner); c 1304 drivers/media/dvb-frontends/cx24116.c ret = cx24116_set_symbolrate(state, c->symbol_rate); c 1299 drivers/media/dvb-frontends/cx24117.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1308 drivers/media/dvb-frontends/cx24117.c switch (c->delivery_system) { c 1314 drivers/media/dvb-frontends/cx24117.c if (c->modulation != QPSK) { c 1317 drivers/media/dvb-frontends/cx24117.c __func__, state->demod, c->modulation); c 1336 drivers/media/dvb-frontends/cx24117.c if (c->modulation != PSK_8 && c->modulation != QPSK) { c 1339 drivers/media/dvb-frontends/cx24117.c __func__, state->demod, c->modulation); c 1343 drivers/media/dvb-frontends/cx24117.c switch (c->pilot) { c 1356 drivers/media/dvb-frontends/cx24117.c __func__, state->demod, c->pilot); c 1360 drivers/media/dvb-frontends/cx24117.c switch (c->rolloff) { c 1378 drivers/media/dvb-frontends/cx24117.c KBUILD_MODNAME, state->demod, c->rolloff); c 1386 drivers/media/dvb-frontends/cx24117.c KBUILD_MODNAME, state->demod, c->delivery_system); c 1390 drivers/media/dvb-frontends/cx24117.c state->dnxt.delsys = c->delivery_system; c 1391 drivers/media/dvb-frontends/cx24117.c state->dnxt.modulation = c->modulation; c 1392 drivers/media/dvb-frontends/cx24117.c state->dnxt.frequency = c->frequency; c 1393 drivers/media/dvb-frontends/cx24117.c state->dnxt.pilot = c->pilot; c 1394 drivers/media/dvb-frontends/cx24117.c state->dnxt.rolloff = c->rolloff; c 1396 drivers/media/dvb-frontends/cx24117.c ret = cx24117_set_inversion(state, c->inversion); c 1401 drivers/media/dvb-frontends/cx24117.c c->delivery_system, c->modulation, c->fec_inner); c 1405 drivers/media/dvb-frontends/cx24117.c ret = cx24117_set_symbolrate(state, c->symbol_rate); c 1554 drivers/media/dvb-frontends/cx24117.c struct dtv_frontend_properties *c) c 1584 drivers/media/dvb-frontends/cx24117.c c->inversion = INVERSION_OFF; c 1586 drivers/media/dvb-frontends/cx24117.c c->inversion = INVERSION_ON; c 1590 drivers/media/dvb-frontends/cx24117.c if (c->delivery_system == SYS_DVBS2) { c 1597 drivers/media/dvb-frontends/cx24117.c c->modulation = cx24117_modfec_modes[idx].modulation; c 1598 drivers/media/dvb-frontends/cx24117.c c->fec_inner = cx24117_modfec_modes[idx].fec; c 1603 drivers/media/dvb-frontends/cx24117.c c->frequency = freq + freq_os; c 1607 drivers/media/dvb-frontends/cx24117.c c->symbol_rate = -1000 * srate_os + state->dcur.symbol_rate; c 324 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 326 drivers/media/dvb-frontends/cx24120.c if (c->cnr.stat[0].scale != FE_SCALE_DECIBEL) c 329 drivers/media/dvb-frontends/cx24120.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 337 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 339 drivers/media/dvb-frontends/cx24120.c if (c->post_bit_error.stat[0].scale != FE_SCALE_COUNTER) { c 344 drivers/media/dvb-frontends/cx24120.c *ber = c->post_bit_error.stat[0].uvalue - state->ber_prev; c 345 drivers/media/dvb-frontends/cx24120.c state->ber_prev = c->post_bit_error.stat[0].uvalue; c 429 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 431 drivers/media/dvb-frontends/cx24120.c if (c->strength.stat[0].scale != FE_SCALE_RELATIVE) c 434 drivers/media/dvb-frontends/cx24120.c *signal_strength = c->strength.stat[0].uvalue; c 607 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 637 drivers/media/dvb-frontends/cx24120.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 638 drivers/media/dvb-frontends/cx24120.c c->strength.stat[0].uvalue = sig; c 640 drivers/media/dvb-frontends/cx24120.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 652 drivers/media/dvb-frontends/cx24120.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 653 drivers/media/dvb-frontends/cx24120.c c->cnr.stat[0].svalue = cnr; c 655 drivers/media/dvb-frontends/cx24120.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 660 drivers/media/dvb-frontends/cx24120.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 661 drivers/media/dvb-frontends/cx24120.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 662 drivers/media/dvb-frontends/cx24120.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 663 drivers/media/dvb-frontends/cx24120.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 678 drivers/media/dvb-frontends/cx24120.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 679 drivers/media/dvb-frontends/cx24120.c c->post_bit_error.stat[0].uvalue += ber; c 681 drivers/media/dvb-frontends/cx24120.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 682 drivers/media/dvb-frontends/cx24120.c c->post_bit_count.stat[0].uvalue += CX24120_BER_WSIZE; c 695 drivers/media/dvb-frontends/cx24120.c state->ucb_offset = c->block_error.stat[0].uvalue; c 697 drivers/media/dvb-frontends/cx24120.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 698 drivers/media/dvb-frontends/cx24120.c c->block_error.stat[0].uvalue = ucb + state->ucb_offset; c 700 drivers/media/dvb-frontends/cx24120.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 701 drivers/media/dvb-frontends/cx24120.c c->block_count.stat[0].uvalue += state->bitrate / 8 / 208; c 796 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 822 drivers/media/dvb-frontends/cx24120.c c->modulation = modfec_lookup_table[idx].mod; c 823 drivers/media/dvb-frontends/cx24120.c c->fec_inner = modfec_lookup_table[idx].fec; c 824 drivers/media/dvb-frontends/cx24120.c c->pilot = (ret & 0x80) ? PILOT_ON : PILOT_OFF; c 827 drivers/media/dvb-frontends/cx24120.c c->modulation, c->fec_inner, c->pilot); c 836 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 843 drivers/media/dvb-frontends/cx24120.c tmp = (u64)c->symbol_rate * rate; c 915 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 929 drivers/media/dvb-frontends/cx24120.c if (clock_ratios_table[idx].mod != c->modulation) c 931 drivers/media/dvb-frontends/cx24120.c if (clock_ratios_table[idx].fec != c->fec_inner) c 933 drivers/media/dvb-frontends/cx24120.c if (clock_ratios_table[idx].pilot != c->pilot) c 1133 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1138 drivers/media/dvb-frontends/cx24120.c switch (c->delivery_system) { c 1148 drivers/media/dvb-frontends/cx24120.c c->delivery_system); c 1152 drivers/media/dvb-frontends/cx24120.c state->dnxt.delsys = c->delivery_system; c 1153 drivers/media/dvb-frontends/cx24120.c state->dnxt.modulation = c->modulation; c 1154 drivers/media/dvb-frontends/cx24120.c state->dnxt.frequency = c->frequency; c 1155 drivers/media/dvb-frontends/cx24120.c state->dnxt.pilot = c->pilot; c 1157 drivers/media/dvb-frontends/cx24120.c ret = cx24120_set_inversion(state, c->inversion); c 1161 drivers/media/dvb-frontends/cx24120.c ret = cx24120_set_fec(state, c->modulation, c->fec_inner); c 1165 drivers/media/dvb-frontends/cx24120.c ret = cx24120_set_pilot(state, c->pilot); c 1169 drivers/media/dvb-frontends/cx24120.c ret = cx24120_set_symbolrate(state, c->symbol_rate); c 1269 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1448 drivers/media/dvb-frontends/cx24120.c c->strength.len = 1; c 1449 drivers/media/dvb-frontends/cx24120.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1450 drivers/media/dvb-frontends/cx24120.c c->cnr.len = 1; c 1451 drivers/media/dvb-frontends/cx24120.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1452 drivers/media/dvb-frontends/cx24120.c c->post_bit_error.len = 1; c 1453 drivers/media/dvb-frontends/cx24120.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1454 drivers/media/dvb-frontends/cx24120.c c->post_bit_count.len = 1; c 1455 drivers/media/dvb-frontends/cx24120.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1456 drivers/media/dvb-frontends/cx24120.c c->block_error.len = 1; c 1457 drivers/media/dvb-frontends/cx24120.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1458 drivers/media/dvb-frontends/cx24120.c c->block_count.len = 1; c 1459 drivers/media/dvb-frontends/cx24120.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1497 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c) c 1514 drivers/media/dvb-frontends/cx24120.c c->frequency = (freq3 << 16) | (freq2 << 8) | freq1; c 1515 drivers/media/dvb-frontends/cx24120.c dev_dbg(&state->i2c->dev, "frequency = %d\n", c->frequency); c 1534 drivers/media/dvb-frontends/cx24120.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1536 drivers/media/dvb-frontends/cx24120.c if (c->block_error.stat[0].scale != FE_SCALE_COUNTER) { c 1541 drivers/media/dvb-frontends/cx24120.c *ucblocks = c->block_error.stat[0].uvalue - state->ucb_offset; c 15 drivers/media/dvb-frontends/cxd2820r_c.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 40 drivers/media/dvb-frontends/cxd2820r_c.c c->delivery_system, c->modulation, c->frequency, c 41 drivers/media/dvb-frontends/cxd2820r_c.c c->symbol_rate, c->inversion); c 89 drivers/media/dvb-frontends/cxd2820r_c.c struct dtv_frontend_properties *c) c 103 drivers/media/dvb-frontends/cxd2820r_c.c c->symbol_rate = 2500 * ((buf[0] & 0x0f) << 8 | buf[1]); c 111 drivers/media/dvb-frontends/cxd2820r_c.c c->modulation = QAM_16; c 114 drivers/media/dvb-frontends/cxd2820r_c.c c->modulation = QAM_32; c 117 drivers/media/dvb-frontends/cxd2820r_c.c c->modulation = QAM_64; c 120 drivers/media/dvb-frontends/cxd2820r_c.c c->modulation = QAM_128; c 123 drivers/media/dvb-frontends/cxd2820r_c.c c->modulation = QAM_256; c 129 drivers/media/dvb-frontends/cxd2820r_c.c c->inversion = INVERSION_OFF; c 132 drivers/media/dvb-frontends/cxd2820r_c.c c->inversion = INVERSION_ON; c 146 drivers/media/dvb-frontends/cxd2820r_c.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 188 drivers/media/dvb-frontends/cxd2820r_c.c c->strength.len = 1; c 189 drivers/media/dvb-frontends/cxd2820r_c.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 190 drivers/media/dvb-frontends/cxd2820r_c.c c->strength.stat[0].uvalue = strength; c 192 drivers/media/dvb-frontends/cxd2820r_c.c c->strength.len = 1; c 193 drivers/media/dvb-frontends/cxd2820r_c.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 223 drivers/media/dvb-frontends/cxd2820r_c.c c->cnr.len = 1; c 224 drivers/media/dvb-frontends/cxd2820r_c.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 225 drivers/media/dvb-frontends/cxd2820r_c.c c->cnr.stat[0].svalue = cnr; c 227 drivers/media/dvb-frontends/cxd2820r_c.c c->cnr.len = 1; c 228 drivers/media/dvb-frontends/cxd2820r_c.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 264 drivers/media/dvb-frontends/cxd2820r_c.c c->post_bit_error.len = 1; c 265 drivers/media/dvb-frontends/cxd2820r_c.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 266 drivers/media/dvb-frontends/cxd2820r_c.c c->post_bit_error.stat[0].uvalue = priv->post_bit_error; c 268 drivers/media/dvb-frontends/cxd2820r_c.c c->post_bit_error.len = 1; c 269 drivers/media/dvb-frontends/cxd2820r_c.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 50 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 54 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 108 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 111 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 113 drivers/media/dvb-frontends/cxd2820r_core.c switch (c->delivery_system) { c 151 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 154 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 156 drivers/media/dvb-frontends/cxd2820r_core.c switch (c->delivery_system) { c 178 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 181 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 186 drivers/media/dvb-frontends/cxd2820r_core.c switch (c->delivery_system) { c 207 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 209 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 221 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 223 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 225 drivers/media/dvb-frontends/cxd2820r_core.c if (c->strength.stat[0].scale == FE_SCALE_RELATIVE) c 226 drivers/media/dvb-frontends/cxd2820r_core.c *strength = c->strength.stat[0].uvalue; c 237 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 239 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 241 drivers/media/dvb-frontends/cxd2820r_core.c if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) c 242 drivers/media/dvb-frontends/cxd2820r_core.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 253 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 255 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 271 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 274 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 276 drivers/media/dvb-frontends/cxd2820r_core.c switch (c->delivery_system) { c 298 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 301 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 303 drivers/media/dvb-frontends/cxd2820r_core.c switch (c->delivery_system) { c 324 drivers/media/dvb-frontends/cxd2820r_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 328 drivers/media/dvb-frontends/cxd2820r_core.c dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system); c 337 drivers/media/dvb-frontends/cxd2820r_core.c c->delivery_system = SYS_DVBT2; c 343 drivers/media/dvb-frontends/cxd2820r_core.c c->delivery_system = SYS_DVBT; c 15 drivers/media/dvb-frontends/cxd2820r_t.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 48 drivers/media/dvb-frontends/cxd2820r_t.c c->delivery_system, c->modulation, c->frequency, c 49 drivers/media/dvb-frontends/cxd2820r_t.c c->bandwidth_hz, c->inversion); c 51 drivers/media/dvb-frontends/cxd2820r_t.c switch (c->bandwidth_hz) { c 127 drivers/media/dvb-frontends/cxd2820r_t.c struct dtv_frontend_properties *c) c 143 drivers/media/dvb-frontends/cxd2820r_t.c c->modulation = QPSK; c 146 drivers/media/dvb-frontends/cxd2820r_t.c c->modulation = QAM_16; c 149 drivers/media/dvb-frontends/cxd2820r_t.c c->modulation = QAM_64; c 155 drivers/media/dvb-frontends/cxd2820r_t.c c->transmission_mode = TRANSMISSION_MODE_2K; c 158 drivers/media/dvb-frontends/cxd2820r_t.c c->transmission_mode = TRANSMISSION_MODE_8K; c 164 drivers/media/dvb-frontends/cxd2820r_t.c c->guard_interval = GUARD_INTERVAL_1_32; c 167 drivers/media/dvb-frontends/cxd2820r_t.c c->guard_interval = GUARD_INTERVAL_1_16; c 170 drivers/media/dvb-frontends/cxd2820r_t.c c->guard_interval = GUARD_INTERVAL_1_8; c 173 drivers/media/dvb-frontends/cxd2820r_t.c c->guard_interval = GUARD_INTERVAL_1_4; c 179 drivers/media/dvb-frontends/cxd2820r_t.c c->hierarchy = HIERARCHY_NONE; c 182 drivers/media/dvb-frontends/cxd2820r_t.c c->hierarchy = HIERARCHY_1; c 185 drivers/media/dvb-frontends/cxd2820r_t.c c->hierarchy = HIERARCHY_2; c 188 drivers/media/dvb-frontends/cxd2820r_t.c c->hierarchy = HIERARCHY_4; c 194 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_HP = FEC_1_2; c 197 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_HP = FEC_2_3; c 200 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_HP = FEC_3_4; c 203 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_HP = FEC_5_6; c 206 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_HP = FEC_7_8; c 212 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_LP = FEC_1_2; c 215 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_LP = FEC_2_3; c 218 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_LP = FEC_3_4; c 221 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_LP = FEC_5_6; c 224 drivers/media/dvb-frontends/cxd2820r_t.c c->code_rate_LP = FEC_7_8; c 234 drivers/media/dvb-frontends/cxd2820r_t.c c->inversion = INVERSION_OFF; c 237 drivers/media/dvb-frontends/cxd2820r_t.c c->inversion = INVERSION_ON; c 251 drivers/media/dvb-frontends/cxd2820r_t.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 293 drivers/media/dvb-frontends/cxd2820r_t.c c->strength.len = 1; c 294 drivers/media/dvb-frontends/cxd2820r_t.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 295 drivers/media/dvb-frontends/cxd2820r_t.c c->strength.stat[0].uvalue = strength; c 297 drivers/media/dvb-frontends/cxd2820r_t.c c->strength.len = 1; c 298 drivers/media/dvb-frontends/cxd2820r_t.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 317 drivers/media/dvb-frontends/cxd2820r_t.c c->cnr.len = 1; c 318 drivers/media/dvb-frontends/cxd2820r_t.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 319 drivers/media/dvb-frontends/cxd2820r_t.c c->cnr.stat[0].svalue = cnr; c 321 drivers/media/dvb-frontends/cxd2820r_t.c c->cnr.len = 1; c 322 drivers/media/dvb-frontends/cxd2820r_t.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 358 drivers/media/dvb-frontends/cxd2820r_t.c c->post_bit_error.len = 1; c 359 drivers/media/dvb-frontends/cxd2820r_t.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 360 drivers/media/dvb-frontends/cxd2820r_t.c c->post_bit_error.stat[0].uvalue = priv->post_bit_error; c 362 drivers/media/dvb-frontends/cxd2820r_t.c c->post_bit_error.len = 1; c 363 drivers/media/dvb-frontends/cxd2820r_t.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 15 drivers/media/dvb-frontends/cxd2820r_t2.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 62 drivers/media/dvb-frontends/cxd2820r_t2.c c->delivery_system, c->modulation, c->frequency, c 63 drivers/media/dvb-frontends/cxd2820r_t2.c c->bandwidth_hz, c->inversion, c->stream_id); c 65 drivers/media/dvb-frontends/cxd2820r_t2.c switch (c->bandwidth_hz) { c 118 drivers/media/dvb-frontends/cxd2820r_t2.c if (c->stream_id > 255) { c 125 drivers/media/dvb-frontends/cxd2820r_t2.c ret = regmap_write(priv->regmap[0], 0x23af, c->stream_id & 0xff); c 157 drivers/media/dvb-frontends/cxd2820r_t2.c struct dtv_frontend_properties *c) c 173 drivers/media/dvb-frontends/cxd2820r_t2.c c->transmission_mode = TRANSMISSION_MODE_2K; c 176 drivers/media/dvb-frontends/cxd2820r_t2.c c->transmission_mode = TRANSMISSION_MODE_8K; c 179 drivers/media/dvb-frontends/cxd2820r_t2.c c->transmission_mode = TRANSMISSION_MODE_4K; c 182 drivers/media/dvb-frontends/cxd2820r_t2.c c->transmission_mode = TRANSMISSION_MODE_1K; c 185 drivers/media/dvb-frontends/cxd2820r_t2.c c->transmission_mode = TRANSMISSION_MODE_16K; c 188 drivers/media/dvb-frontends/cxd2820r_t2.c c->transmission_mode = TRANSMISSION_MODE_32K; c 194 drivers/media/dvb-frontends/cxd2820r_t2.c c->guard_interval = GUARD_INTERVAL_1_32; c 197 drivers/media/dvb-frontends/cxd2820r_t2.c c->guard_interval = GUARD_INTERVAL_1_16; c 200 drivers/media/dvb-frontends/cxd2820r_t2.c c->guard_interval = GUARD_INTERVAL_1_8; c 203 drivers/media/dvb-frontends/cxd2820r_t2.c c->guard_interval = GUARD_INTERVAL_1_4; c 206 drivers/media/dvb-frontends/cxd2820r_t2.c c->guard_interval = GUARD_INTERVAL_1_128; c 209 drivers/media/dvb-frontends/cxd2820r_t2.c c->guard_interval = GUARD_INTERVAL_19_128; c 212 drivers/media/dvb-frontends/cxd2820r_t2.c c->guard_interval = GUARD_INTERVAL_19_256; c 222 drivers/media/dvb-frontends/cxd2820r_t2.c c->fec_inner = FEC_1_2; c 225 drivers/media/dvb-frontends/cxd2820r_t2.c c->fec_inner = FEC_3_5; c 228 drivers/media/dvb-frontends/cxd2820r_t2.c c->fec_inner = FEC_2_3; c 231 drivers/media/dvb-frontends/cxd2820r_t2.c c->fec_inner = FEC_3_4; c 234 drivers/media/dvb-frontends/cxd2820r_t2.c c->fec_inner = FEC_4_5; c 237 drivers/media/dvb-frontends/cxd2820r_t2.c c->fec_inner = FEC_5_6; c 243 drivers/media/dvb-frontends/cxd2820r_t2.c c->modulation = QPSK; c 246 drivers/media/dvb-frontends/cxd2820r_t2.c c->modulation = QAM_16; c 249 drivers/media/dvb-frontends/cxd2820r_t2.c c->modulation = QAM_64; c 252 drivers/media/dvb-frontends/cxd2820r_t2.c c->modulation = QAM_256; c 262 drivers/media/dvb-frontends/cxd2820r_t2.c c->inversion = INVERSION_OFF; c 265 drivers/media/dvb-frontends/cxd2820r_t2.c c->inversion = INVERSION_ON; c 278 drivers/media/dvb-frontends/cxd2820r_t2.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 318 drivers/media/dvb-frontends/cxd2820r_t2.c c->strength.len = 1; c 319 drivers/media/dvb-frontends/cxd2820r_t2.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 320 drivers/media/dvb-frontends/cxd2820r_t2.c c->strength.stat[0].uvalue = strength; c 322 drivers/media/dvb-frontends/cxd2820r_t2.c c->strength.len = 1; c 323 drivers/media/dvb-frontends/cxd2820r_t2.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 344 drivers/media/dvb-frontends/cxd2820r_t2.c c->cnr.len = 1; c 345 drivers/media/dvb-frontends/cxd2820r_t2.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 346 drivers/media/dvb-frontends/cxd2820r_t2.c c->cnr.stat[0].svalue = cnr; c 348 drivers/media/dvb-frontends/cxd2820r_t2.c c->cnr.len = 1; c 349 drivers/media/dvb-frontends/cxd2820r_t2.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 370 drivers/media/dvb-frontends/cxd2820r_t2.c c->post_bit_error.len = 1; c 371 drivers/media/dvb-frontends/cxd2820r_t2.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 372 drivers/media/dvb-frontends/cxd2820r_t2.c c->post_bit_error.stat[0].uvalue = priv->post_bit_error; c 374 drivers/media/dvb-frontends/cxd2820r_t2.c c->post_bit_error.len = 1; c 375 drivers/media/dvb-frontends/cxd2820r_t2.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 565 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c = NULL; c 574 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c = &fe->dtv_property_cache; c 577 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT || c 578 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->delivery_system == SYS_DVBT2) { c 608 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c = NULL; c 616 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c = &fe->dtv_property_cache; c 619 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 622 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 646 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c = NULL; c 654 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c = &fe->dtv_property_cache; c 657 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 660 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 1037 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c; c 1047 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c = &fe->dtv_property_cache; c 1049 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1050 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.stat[0].uvalue = 0; c 1051 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.len = 1; c 1052 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1053 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.stat[0].uvalue = 0; c 1054 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.len = 1; c 1055 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1056 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.stat[0].uvalue = 0; c 1057 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.len = 1; c 1058 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1059 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.stat[0].uvalue = 0; c 1060 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.len = 1; c 1061 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1062 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.stat[0].uvalue = 0; c 1063 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.len = 1; c 1064 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1065 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.stat[0].uvalue = 0; c 1066 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.len = 1; c 1068 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c switch (c->bandwidth_hz) { c 1091 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->delivery_system, c->frequency, bw); c 1093 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 1095 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c priv->dvbt_tune_param.center_freq_khz = c->frequency / 1000; c 1100 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 1102 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c priv->dvbt2_tune_param.center_freq_khz = c->frequency / 1000; c 1104 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c priv->dvbt2_tune_param.data_plp_id = (u16)c->stream_id; c 1124 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c = NULL; c 1136 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c = &fe->dtv_property_cache; c 1139 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.len = 1; c 1140 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1141 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.len = 1; c 1142 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1143 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.len = 1; c 1144 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1145 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.len = 1; c 1146 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1147 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.len = 1; c 1148 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1149 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.len = 1; c 1150 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1158 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 1164 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 1175 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.len = 1; c 1176 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 1177 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.stat[0].uvalue += pre_bit_err; c 1178 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.len = 1; c 1179 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 1180 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.stat[0].uvalue += pre_bit_count; c 1182 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.len = 1; c 1183 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1184 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.len = 1; c 1185 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1193 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 1199 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 1210 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.len = 1; c 1211 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 1212 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.stat[0].uvalue += post_bit_err; c 1213 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.len = 1; c 1214 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 1215 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.stat[0].uvalue += post_bit_count; c 1217 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.len = 1; c 1218 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_error.stat[0].scale = c 1220 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.len = 1; c 1221 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->post_bit_count.stat[0].scale = c 1230 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 1236 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 1246 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.len = 1; c 1247 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 1248 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.stat[0].uvalue += block_err; c 1249 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.len = 1; c 1250 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 1251 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.stat[0].uvalue += block_count; c 1253 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.len = 1; c 1254 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1255 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.len = 1; c 1256 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1310 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c = NULL; c 1318 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c = &fe->dtv_property_cache; c 1323 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 1328 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 1360 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (c->delivery_system == SYS_DVBT) { c 1363 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c } else if (c->delivery_system == SYS_DVBT2) { c 1408 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c) c 1419 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (!fe || !c) { c 1433 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_2K; c 1436 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_8K; c 1439 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_2K; c 1445 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_32; c 1448 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_16; c 1451 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_8; c 1454 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_4; c 1457 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_32; c 1463 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_2K; c 1464 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_32; c 1474 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->hierarchy = HIERARCHY_NONE; c 1477 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->hierarchy = HIERARCHY_1; c 1480 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->hierarchy = HIERARCHY_2; c 1483 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->hierarchy = HIERARCHY_4; c 1486 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->hierarchy = HIERARCHY_NONE; c 1494 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_HP = FEC_1_2; c 1497 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_HP = FEC_2_3; c 1500 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_HP = FEC_3_4; c 1503 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_HP = FEC_5_6; c 1506 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_HP = FEC_7_8; c 1509 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_HP = FEC_NONE; c 1516 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_LP = FEC_1_2; c 1519 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_LP = FEC_2_3; c 1522 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_LP = FEC_3_4; c 1525 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_LP = FEC_5_6; c 1528 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_LP = FEC_7_8; c 1531 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_LP = FEC_NONE; c 1538 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QPSK; c 1541 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QAM_16; c 1544 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QAM_64; c 1547 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QPSK; c 1553 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->hierarchy = HIERARCHY_NONE; c 1554 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_HP = FEC_NONE; c 1555 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->code_rate_LP = FEC_NONE; c 1556 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QPSK; c 1566 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_OFF; c 1569 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_ON; c 1572 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_OFF; c 1577 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_OFF; c 1585 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.len = 1; c 1586 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 1587 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.stat[0].svalue = strength; c 1589 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.len = 1; c 1590 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1596 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.len = 1; c 1597 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 1598 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.stat[0].svalue = snr; c 1600 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.len = 1; c 1601 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1609 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c struct dtv_frontend_properties *c) c 1620 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c if (!fe || !c) { c 1633 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_2K; c 1636 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_8K; c 1639 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_4K; c 1642 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_1K; c 1645 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_16K; c 1648 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_32K; c 1651 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_2K; c 1658 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_32; c 1661 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_16; c 1664 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_8; c 1667 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_4; c 1670 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_128; c 1673 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_19_128; c 1676 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_19_256; c 1679 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_32; c 1685 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->transmission_mode = TRANSMISSION_MODE_2K; c 1686 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->guard_interval = GUARD_INTERVAL_1_32; c 1698 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_1_2; c 1701 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_3_5; c 1704 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_2_3; c 1707 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_3_4; c 1710 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_4_5; c 1713 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_5_6; c 1716 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_NONE; c 1721 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->fec_inner = FEC_NONE; c 1733 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QPSK; c 1736 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QAM_16; c 1739 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QAM_64; c 1742 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QAM_256; c 1745 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QPSK; c 1750 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->modulation = QPSK; c 1760 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_OFF; c 1763 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_ON; c 1766 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_OFF; c 1771 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->inversion = INVERSION_OFF; c 1779 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.len = 1; c 1780 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 1781 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.stat[0].svalue = strength; c 1783 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.len = 1; c 1784 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1790 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.len = 1; c 1791 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 1792 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.stat[0].svalue = snr; c 1794 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.len = 1; c 1795 drivers/media/dvb-frontends/cxd2880/cxd2880_top.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 320 drivers/media/dvb-frontends/dib0090.c static void dib0090_write_regs(struct dib0090_state *state, u8 r, const u16 * b, u8 c) c 324 drivers/media/dvb-frontends/dib0090.c } while (--c); c 1503 drivers/media/dvb-frontends/dib0090.c u8 c, h, n; c 1526 drivers/media/dvb-frontends/dib0090.c c = e2 & 0x3f; c 1530 drivers/media/dvb-frontends/dib0090.c if ((c >= CAP_VALUE_MAX) || (c <= CAP_VALUE_MIN)) c 1531 drivers/media/dvb-frontends/dib0090.c c = 32; c 1533 drivers/media/dvb-frontends/dib0090.c c += 14; c 1540 drivers/media/dvb-frontends/dib0090.c e2 = (n << 11) | ((h >> 2)<<6) | c; c 2239 drivers/media/dvb-frontends/dib0090.c u8 c, i; c 2470 drivers/media/dvb-frontends/dib0090.c c = 4; c 2474 drivers/media/dvb-frontends/dib0090.c c = wbd->wbd_gain; c 2476 drivers/media/dvb-frontends/dib0090.c state->wbdmux = (c << 13) | (i << 11) | (WBD | (state->config->use_pwm_agc << 1)); c 114 drivers/media/dvb-frontends/dib3000mb.c struct dtv_frontend_properties *c); c 119 drivers/media/dvb-frontends/dib3000mb.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 127 drivers/media/dvb-frontends/dib3000mb.c switch (c->bandwidth_hz) { c 146 drivers/media/dvb-frontends/dib3000mb.c deb_setf("bandwidth: %d MHZ\n", c->bandwidth_hz / 1000000); c 150 drivers/media/dvb-frontends/dib3000mb.c switch (c->transmission_mode) { c 166 drivers/media/dvb-frontends/dib3000mb.c switch (c->guard_interval) { c 190 drivers/media/dvb-frontends/dib3000mb.c switch (c->inversion) { c 206 drivers/media/dvb-frontends/dib3000mb.c switch (c->modulation) { c 224 drivers/media/dvb-frontends/dib3000mb.c switch (c->hierarchy) { c 247 drivers/media/dvb-frontends/dib3000mb.c if (c->hierarchy == HIERARCHY_NONE) { c 250 drivers/media/dvb-frontends/dib3000mb.c fe_cr = c->code_rate_HP; c 251 drivers/media/dvb-frontends/dib3000mb.c } else if (c->hierarchy != HIERARCHY_AUTO) { c 254 drivers/media/dvb-frontends/dib3000mb.c fe_cr = c->code_rate_LP; c 288 drivers/media/dvb-frontends/dib3000mb.c [c->transmission_mode == TRANSMISSION_MODE_AUTO] c 289 drivers/media/dvb-frontends/dib3000mb.c [c->guard_interval == GUARD_INTERVAL_AUTO] c 290 drivers/media/dvb-frontends/dib3000mb.c [c->inversion == INVERSION_AUTO]; c 298 drivers/media/dvb-frontends/dib3000mb.c if (c->transmission_mode == TRANSMISSION_MODE_2K) { c 299 drivers/media/dvb-frontends/dib3000mb.c if (c->guard_interval == GUARD_INTERVAL_1_8) { c 327 drivers/media/dvb-frontends/dib3000mb.c if (c->modulation == QAM_AUTO || c 328 drivers/media/dvb-frontends/dib3000mb.c c->hierarchy == HIERARCHY_AUTO || c 330 drivers/media/dvb-frontends/dib3000mb.c c->inversion == INVERSION_AUTO) { c 350 drivers/media/dvb-frontends/dib3000mb.c if (dib3000mb_get_frontend(fe, c) == 0) { c 442 drivers/media/dvb-frontends/dib3000mb.c struct dtv_frontend_properties *c) c 471 drivers/media/dvb-frontends/dib3000mb.c c->inversion = c 476 drivers/media/dvb-frontends/dib3000mb.c deb_getf("inversion %d %d, %d\n", inv_test2, inv_test1, c->inversion); c 481 drivers/media/dvb-frontends/dib3000mb.c c->modulation = QPSK; c 485 drivers/media/dvb-frontends/dib3000mb.c c->modulation = QAM_16; c 489 drivers/media/dvb-frontends/dib3000mb.c c->modulation = QAM_64; c 499 drivers/media/dvb-frontends/dib3000mb.c cr = &c->code_rate_LP; c 500 drivers/media/dvb-frontends/dib3000mb.c c->code_rate_HP = FEC_NONE; c 504 drivers/media/dvb-frontends/dib3000mb.c c->hierarchy = HIERARCHY_NONE; c 508 drivers/media/dvb-frontends/dib3000mb.c c->hierarchy = HIERARCHY_1; c 512 drivers/media/dvb-frontends/dib3000mb.c c->hierarchy = HIERARCHY_2; c 516 drivers/media/dvb-frontends/dib3000mb.c c->hierarchy = HIERARCHY_4; c 527 drivers/media/dvb-frontends/dib3000mb.c cr = &c->code_rate_HP; c 528 drivers/media/dvb-frontends/dib3000mb.c c->code_rate_LP = FEC_NONE; c 529 drivers/media/dvb-frontends/dib3000mb.c c->hierarchy = HIERARCHY_NONE; c 564 drivers/media/dvb-frontends/dib3000mb.c c->guard_interval = GUARD_INTERVAL_1_32; c 568 drivers/media/dvb-frontends/dib3000mb.c c->guard_interval = GUARD_INTERVAL_1_16; c 572 drivers/media/dvb-frontends/dib3000mb.c c->guard_interval = GUARD_INTERVAL_1_8; c 576 drivers/media/dvb-frontends/dib3000mb.c c->guard_interval = GUARD_INTERVAL_1_4; c 587 drivers/media/dvb-frontends/dib3000mb.c c->transmission_mode = TRANSMISSION_MODE_2K; c 591 drivers/media/dvb-frontends/dib3000mb.c c->transmission_mode = TRANSMISSION_MODE_8K; c 1667 drivers/media/dvb-frontends/dib7000p.c struct dtv_frontend_properties *c = &demod->dtv_property_cache; c 1670 drivers/media/dvb-frontends/dib7000p.c memset(&c->strength, 0, sizeof(c->strength)); c 1671 drivers/media/dvb-frontends/dib7000p.c memset(&c->cnr, 0, sizeof(c->cnr)); c 1672 drivers/media/dvb-frontends/dib7000p.c memset(&c->post_bit_error, 0, sizeof(c->post_bit_error)); c 1673 drivers/media/dvb-frontends/dib7000p.c memset(&c->post_bit_count, 0, sizeof(c->post_bit_count)); c 1674 drivers/media/dvb-frontends/dib7000p.c memset(&c->block_error, 0, sizeof(c->block_error)); c 1676 drivers/media/dvb-frontends/dib7000p.c c->strength.len = 1; c 1677 drivers/media/dvb-frontends/dib7000p.c c->cnr.len = 1; c 1678 drivers/media/dvb-frontends/dib7000p.c c->block_error.len = 1; c 1679 drivers/media/dvb-frontends/dib7000p.c c->block_count.len = 1; c 1680 drivers/media/dvb-frontends/dib7000p.c c->post_bit_error.len = 1; c 1681 drivers/media/dvb-frontends/dib7000p.c c->post_bit_count.len = 1; c 1683 drivers/media/dvb-frontends/dib7000p.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 1684 drivers/media/dvb-frontends/dib7000p.c c->strength.stat[0].uvalue = 0; c 1686 drivers/media/dvb-frontends/dib7000p.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1687 drivers/media/dvb-frontends/dib7000p.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1688 drivers/media/dvb-frontends/dib7000p.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1689 drivers/media/dvb-frontends/dib7000p.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1690 drivers/media/dvb-frontends/dib7000p.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1798 drivers/media/dvb-frontends/dib7000p.c struct dtv_frontend_properties *c = &demod->dtv_property_cache; c 1804 drivers/media/dvb-frontends/dib7000p.c switch (c->guard_interval) { c 1820 drivers/media/dvb-frontends/dib7000p.c switch (c->transmission_mode) { c 1833 drivers/media/dvb-frontends/dib7000p.c switch (c->modulation) { c 1847 drivers/media/dvb-frontends/dib7000p.c switch ((c->hierarchy == 0 || 1 == 1) ? c->code_rate_HP : c->code_rate_LP) { c 1899 drivers/media/dvb-frontends/dib7000p.c struct dtv_frontend_properties *c = &demod->dtv_property_cache; c 1912 drivers/media/dvb-frontends/dib7000p.c c->strength.stat[0].svalue = db; c 1916 drivers/media/dvb-frontends/dib7000p.c c->cnr.len = 1; c 1917 drivers/media/dvb-frontends/dib7000p.c c->block_count.len = 1; c 1918 drivers/media/dvb-frontends/dib7000p.c c->block_error.len = 1; c 1919 drivers/media/dvb-frontends/dib7000p.c c->post_bit_error.len = 1; c 1920 drivers/media/dvb-frontends/dib7000p.c c->post_bit_count.len = 1; c 1921 drivers/media/dvb-frontends/dib7000p.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1922 drivers/media/dvb-frontends/dib7000p.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1923 drivers/media/dvb-frontends/dib7000p.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1924 drivers/media/dvb-frontends/dib7000p.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1925 drivers/media/dvb-frontends/dib7000p.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1939 drivers/media/dvb-frontends/dib7000p.c c->cnr.stat[0].svalue = snr; c 1940 drivers/media/dvb-frontends/dib7000p.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 1948 drivers/media/dvb-frontends/dib7000p.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 1949 drivers/media/dvb-frontends/dib7000p.c c->block_error.stat[0].uvalue = ucb; c 1958 drivers/media/dvb-frontends/dib7000p.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 1959 drivers/media/dvb-frontends/dib7000p.c c->block_count.stat[0].uvalue += blocks; c 1973 drivers/media/dvb-frontends/dib7000p.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 1974 drivers/media/dvb-frontends/dib7000p.c c->post_bit_error.stat[0].uvalue += val; c 1976 drivers/media/dvb-frontends/dib7000p.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 1977 drivers/media/dvb-frontends/dib7000p.c c->post_bit_count.stat[0].uvalue += 100000000; c 1984 drivers/media/dvb-frontends/dib7000p.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 1985 drivers/media/dvb-frontends/dib7000p.c c->block_error.stat[0].uvalue += val; c 1991 drivers/media/dvb-frontends/dib7000p.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 1992 drivers/media/dvb-frontends/dib7000p.c c->block_count.stat[0].uvalue += blocks; c 998 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 1001 drivers/media/dvb-frontends/dib8000.c memset(&c->strength, 0, sizeof(c->strength)); c 1002 drivers/media/dvb-frontends/dib8000.c memset(&c->cnr, 0, sizeof(c->cnr)); c 1003 drivers/media/dvb-frontends/dib8000.c memset(&c->post_bit_error, 0, sizeof(c->post_bit_error)); c 1004 drivers/media/dvb-frontends/dib8000.c memset(&c->post_bit_count, 0, sizeof(c->post_bit_count)); c 1005 drivers/media/dvb-frontends/dib8000.c memset(&c->block_error, 0, sizeof(c->block_error)); c 1007 drivers/media/dvb-frontends/dib8000.c c->strength.len = 1; c 1008 drivers/media/dvb-frontends/dib8000.c c->cnr.len = 1; c 1009 drivers/media/dvb-frontends/dib8000.c c->block_error.len = 1; c 1010 drivers/media/dvb-frontends/dib8000.c c->block_count.len = 1; c 1011 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.len = 1; c 1012 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.len = 1; c 1014 drivers/media/dvb-frontends/dib8000.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 1015 drivers/media/dvb-frontends/dib8000.c c->strength.stat[0].uvalue = 0; c 1017 drivers/media/dvb-frontends/dib8000.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1018 drivers/media/dvb-frontends/dib8000.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1019 drivers/media/dvb-frontends/dib8000.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1020 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1021 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1996 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 1998 drivers/media/dvb-frontends/dib8000.c switch (c->layer[layer_index].modulation) { c 2014 drivers/media/dvb-frontends/dib8000.c switch (c->layer[layer_index].fec) { c 2033 drivers/media/dvb-frontends/dib8000.c time_intlv = fls(c->layer[layer_index].interleaving); c 2034 drivers/media/dvb-frontends/dib8000.c if (time_intlv > 3 && !(time_intlv == 4 && c->isdbt_sb_mode == 1)) c 2037 drivers/media/dvb-frontends/dib8000.c dib8000_write_word(state, 2 + layer_index, (constellation << 10) | ((c->layer[layer_index].segment_count & 0xf) << 6) | (cr << 3) | time_intlv); c 2038 drivers/media/dvb-frontends/dib8000.c if (c->layer[layer_index].segment_count > 0) { c 2042 drivers/media/dvb-frontends/dib8000.c if (c->layer[layer_index].modulation == QAM_16 || c->layer[layer_index].modulation == QAM_64) c 2043 drivers/media/dvb-frontends/dib8000.c max_constellation = c->layer[layer_index].modulation; c 2046 drivers/media/dvb-frontends/dib8000.c if (c->layer[layer_index].modulation == QAM_64) c 2047 drivers/media/dvb-frontends/dib8000.c max_constellation = c->layer[layer_index].modulation; c 2190 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2196 drivers/media/dvb-frontends/dib8000.c dib8000_write_word(state, 351, (c->isdbt_sb_mode << 9) | (c->isdbt_sb_mode << 8) | (13 << 4) | 5); c 2198 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode) { c 2200 drivers/media/dvb-frontends/dib8000.c switch (c->transmission_mode) { c 2202 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0) { /* 1-seg */ c 2203 drivers/media/dvb-frontends/dib8000.c if (c->layer[0].modulation == DQPSK) /* DQPSK */ c 2208 drivers/media/dvb-frontends/dib8000.c if (c->layer[0].modulation == DQPSK) { /* DQPSK on central segment */ c 2209 drivers/media/dvb-frontends/dib8000.c if (c->layer[1].modulation == DQPSK) /* DQPSK on external segments */ c 2214 drivers/media/dvb-frontends/dib8000.c if (c->layer[1].modulation == DQPSK) /* DQPSK on external segments */ c 2222 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0) { /* 1-seg */ c 2223 drivers/media/dvb-frontends/dib8000.c if (c->layer[0].modulation == DQPSK) /* DQPSK */ c 2228 drivers/media/dvb-frontends/dib8000.c if (c->layer[0].modulation == DQPSK) { /* DQPSK on central segment */ c 2229 drivers/media/dvb-frontends/dib8000.c if (c->layer[1].modulation == DQPSK) /* DQPSK on external segments */ c 2234 drivers/media/dvb-frontends/dib8000.c if (c->layer[1].modulation == DQPSK) /* DQPSK on external segments */ c 2244 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0) { /* 1-seg */ c 2245 drivers/media/dvb-frontends/dib8000.c if (c->layer[0].modulation == DQPSK) /* DQPSK */ c 2250 drivers/media/dvb-frontends/dib8000.c if (c->layer[0].modulation == DQPSK) { /* DQPSK on central segment */ c 2251 drivers/media/dvb-frontends/dib8000.c if (c->layer[1].modulation == DQPSK) /* DQPSK on external segments */ c 2256 drivers/media/dvb-frontends/dib8000.c if (c->layer[1].modulation == DQPSK) /* DQPSK on external segments */ c 2274 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2278 drivers/media/dvb-frontends/dib8000.c if (c->transmission_mode == TRANSMISSION_MODE_2K || c->transmission_mode == TRANSMISSION_MODE_4K) { c 2286 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 1) /* 3-segments */ c 2295 drivers/media/dvb-frontends/dib8000.c dib8000_write_word(state, 187, (4 << 12) | (0 << 11) | (63 << 5) | (0x3 << 3) | ((~c->isdbt_partial_reception & 1) << 2) | 0x3); c 2301 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0) { c 2321 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0 && c->transmission_mode == TRANSMISSION_MODE_2K) c 2337 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0) c 2349 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2352 drivers/media/dvb-frontends/dib8000.c c->isdbt_partial_reception = 1; c 2362 drivers/media/dvb-frontends/dib8000.c dib8000_write_word(state, 1, (tmp&0xfffc) | (c->guard_interval & 0x3)); c 2364 drivers/media/dvb-frontends/dib8000.c dib8000_write_word(state, 274, (dib8000_read_word(state, 274) & 0xffcf) | ((c->isdbt_partial_reception & 1) << 5) | ((c->isdbt_sb_mode & 1) << 4)); c 2367 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception) { c 2368 drivers/media/dvb-frontends/dib8000.c state->seg_diff_mask = (c->layer[0].modulation == DQPSK) << permu_seg[0]; c 2370 drivers/media/dvb-frontends/dib8000.c nbseg_diff += (c->layer[i].modulation == DQPSK) * c->layer[i].segment_count; c 2375 drivers/media/dvb-frontends/dib8000.c nbseg_diff += (c->layer[i].modulation == DQPSK) * c->layer[i].segment_count; c 2388 drivers/media/dvb-frontends/dib8000.c state->layer_b_nb_seg = c->layer[1].segment_count; c 2389 drivers/media/dvb-frontends/dib8000.c state->layer_c_nb_seg = c->layer[2].segment_count; c 2404 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception) /* 3-segments */ c 2410 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode) { c 2412 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_subchannel < 14) c 2413 drivers/media/dvb-frontends/dib8000.c init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel); c 2453 drivers/media/dvb-frontends/dib8000.c tmcc_pow += (((c->layer[i].modulation == DQPSK) * 4 + 1) * c->layer[i].segment_count) ; c 2495 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2541 drivers/media/dvb-frontends/dib8000.c c->transmission_mode = TRANSMISSION_MODE_8K; c 2542 drivers/media/dvb-frontends/dib8000.c c->guard_interval = GUARD_INTERVAL_1_8; c 2543 drivers/media/dvb-frontends/dib8000.c c->inversion = 0; c 2544 drivers/media/dvb-frontends/dib8000.c c->layer[0].modulation = QAM_64; c 2545 drivers/media/dvb-frontends/dib8000.c c->layer[0].fec = FEC_2_3; c 2546 drivers/media/dvb-frontends/dib8000.c c->layer[0].interleaving = 0; c 2547 drivers/media/dvb-frontends/dib8000.c c->layer[0].segment_count = 13; c 2550 drivers/media/dvb-frontends/dib8000.c c->transmission_mode = state->found_nfft; c 2579 drivers/media/dvb-frontends/dib8000.c c->inversion = 0; c 2580 drivers/media/dvb-frontends/dib8000.c c->layer[0].modulation = QAM_64; c 2581 drivers/media/dvb-frontends/dib8000.c c->layer[0].fec = FEC_2_3; c 2582 drivers/media/dvb-frontends/dib8000.c c->layer[0].interleaving = 0; c 2583 drivers/media/dvb-frontends/dib8000.c c->layer[0].segment_count = 13; c 2584 drivers/media/dvb-frontends/dib8000.c if (!c->isdbt_sb_mode) c 2585 drivers/media/dvb-frontends/dib8000.c c->layer[0].segment_count = 13; c 2588 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode) { c 2592 drivers/media/dvb-frontends/dib8000.c if (c->guard_interval == GUARD_INTERVAL_AUTO) { c 2593 drivers/media/dvb-frontends/dib8000.c if (c->transmission_mode == TRANSMISSION_MODE_AUTO) { c 2594 drivers/media/dvb-frontends/dib8000.c c->transmission_mode = TRANSMISSION_MODE_8K; c 2595 drivers/media/dvb-frontends/dib8000.c c->guard_interval = GUARD_INTERVAL_1_8; c 2599 drivers/media/dvb-frontends/dib8000.c c->guard_interval = GUARD_INTERVAL_1_8; c 2603 drivers/media/dvb-frontends/dib8000.c if (c->transmission_mode == TRANSMISSION_MODE_AUTO) { c 2604 drivers/media/dvb-frontends/dib8000.c c->transmission_mode = TRANSMISSION_MODE_8K; c 2718 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2726 drivers/media/dvb-frontends/dib8000.c current_rf = c->frequency; c 2728 drivers/media/dvb-frontends/dib8000.c total_dds_offset_khz = (int)current_rf - (int)c->frequency / 1000; c 2730 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode) { c 2731 drivers/media/dvb-frontends/dib8000.c state->subchannel = c->isdbt_sb_subchannel; c 2734 drivers/media/dvb-frontends/dib8000.c dib8000_write_word(state, 26, c->inversion ^ i); c 2737 drivers/media/dvb-frontends/dib8000.c if ((c->inversion ^ i) == 0) c 2740 drivers/media/dvb-frontends/dib8000.c if ((c->inversion ^ i) == 0) c 2745 drivers/media/dvb-frontends/dib8000.c dprintk("%dkhz tuner offset (frequency = %dHz & current_rf = %dHz) total_dds_offset_hz = %d\n", c->frequency - current_rf, c->frequency, current_rf, total_dds_offset_khz); c 2755 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2758 drivers/media/dvb-frontends/dib8000.c switch (c->transmission_mode) { c 2772 drivers/media/dvb-frontends/dib8000.c return (LUT_isdbt_symbol_duration[i] / (c->bandwidth_hz / 1000)) + 1; c 2777 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2782 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode) { c 2783 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0) { c 2796 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode) { c 2797 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_partial_reception == 0) { /* Sound Broadcasting mode 1 seg */ c 2823 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 2827 drivers/media/dvb-frontends/dib8000.c switch (c->transmission_mode) { c 2841 drivers/media/dvb-frontends/dib8000.c sync_wait = (sync_wait * (1 << (c->guard_interval)) * 3) / 2 + 48; /* add 50% SFN margin + compensate for one DVSY-fifo */ c 2843 drivers/media/dvb-frontends/dib8000.c sync_wait = (sync_wait * (1 << (c->guard_interval)) * 3) / 2 + state->cfg.diversity_delay; /* add 50% SFN margin + compensate for DVSY-fifo */ c 2926 drivers/media/dvb-frontends/dib8000.c static int is_manual_mode(struct dtv_frontend_properties *c) c 2931 drivers/media/dvb-frontends/dib8000.c if (c->delivery_system != SYS_ISDBT) c 2937 drivers/media/dvb-frontends/dib8000.c if (c->transmission_mode == TRANSMISSION_MODE_AUTO) { c 2945 drivers/media/dvb-frontends/dib8000.c if (c->guard_interval == GUARD_INTERVAL_AUTO) { c 2954 drivers/media/dvb-frontends/dib8000.c if (!c->isdbt_layer_enabled) { c 2964 drivers/media/dvb-frontends/dib8000.c if (!(c->isdbt_layer_enabled & 1 << i)) c 2967 drivers/media/dvb-frontends/dib8000.c if ((c->layer[i].segment_count > 13) || c 2968 drivers/media/dvb-frontends/dib8000.c (c->layer[i].segment_count == 0)) { c 2969 drivers/media/dvb-frontends/dib8000.c c->isdbt_layer_enabled &= ~(1 << i); c 2973 drivers/media/dvb-frontends/dib8000.c n_segs += c->layer[i].segment_count; c 2975 drivers/media/dvb-frontends/dib8000.c if ((c->layer[i].modulation == QAM_AUTO) || c 2976 drivers/media/dvb-frontends/dib8000.c (c->layer[i].fec == FEC_AUTO)) { c 2999 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 3027 drivers/media/dvb-frontends/dib8000.c state->channel_parameters_set = is_manual_mode(c); c 3038 drivers/media/dvb-frontends/dib8000.c dib8000_set_bandwidth(fe, c->bandwidth_hz / 1000); c 3224 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode c 3225 drivers/media/dvb-frontends/dib8000.c && c->isdbt_sb_subchannel < 14 c 3239 drivers/media/dvb-frontends/dib8000.c if (c->layer[i].interleaving >= deeper_interleaver) { c 3240 drivers/media/dvb-frontends/dib8000.c dprintk("layer%i: time interleaver = %d\n", i, c->layer[i].interleaving); c 3241 drivers/media/dvb-frontends/dib8000.c if (c->layer[i].segment_count > 0) { /* valid layer */ c 3242 drivers/media/dvb-frontends/dib8000.c deeper_interleaver = c->layer[0].interleaving; c 3271 drivers/media/dvb-frontends/dib8000.c c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled", c 3272 drivers/media/dvb-frontends/dib8000.c c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled", c 3273 drivers/media/dvb-frontends/dib8000.c c->layer[2].segment_count ? (locks >> 5) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled"); c 3274 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode c 3275 drivers/media/dvb-frontends/dib8000.c && c->isdbt_sb_subchannel < 14 c 3283 drivers/media/dvb-frontends/dib8000.c if (c->isdbt_sb_mode c 3284 drivers/media/dvb-frontends/dib8000.c && c->isdbt_sb_subchannel < 14 c 3292 drivers/media/dvb-frontends/dib8000.c c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled", c 3293 drivers/media/dvb-frontends/dib8000.c c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled", c 3294 drivers/media/dvb-frontends/dib8000.c c->layer[2].segment_count ? (locks >> 5) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled"); c 3390 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c) c 3397 drivers/media/dvb-frontends/dib8000.c c->bandwidth_hz = 6000000; c 3414 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c); c 3434 drivers/media/dvb-frontends/dib8000.c c->isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1; c 3440 drivers/media/dvb-frontends/dib8000.c c->inversion = (val & 0x40) >> 6; c 3443 drivers/media/dvb-frontends/dib8000.c c->transmission_mode = TRANSMISSION_MODE_2K; c 3447 drivers/media/dvb-frontends/dib8000.c c->transmission_mode = TRANSMISSION_MODE_4K; c 3452 drivers/media/dvb-frontends/dib8000.c c->transmission_mode = TRANSMISSION_MODE_8K; c 3459 drivers/media/dvb-frontends/dib8000.c c->guard_interval = GUARD_INTERVAL_1_32; c 3463 drivers/media/dvb-frontends/dib8000.c c->guard_interval = GUARD_INTERVAL_1_16; c 3468 drivers/media/dvb-frontends/dib8000.c c->guard_interval = GUARD_INTERVAL_1_8; c 3472 drivers/media/dvb-frontends/dib8000.c c->guard_interval = GUARD_INTERVAL_1_4; c 3477 drivers/media/dvb-frontends/dib8000.c c->isdbt_partial_reception = val & 1; c 3478 drivers/media/dvb-frontends/dib8000.c dprintk("dib8000_get_frontend: partial_reception = %d\n", c->isdbt_partial_reception); c 3484 drivers/media/dvb-frontends/dib8000.c c->layer[i].segment_count = val; c 3493 drivers/media/dvb-frontends/dib8000.c i, c->layer[i].segment_count); c 3499 drivers/media/dvb-frontends/dib8000.c c->layer[i].interleaving = val; c 3502 drivers/media/dvb-frontends/dib8000.c i, c->layer[i].interleaving); c 3507 drivers/media/dvb-frontends/dib8000.c c->layer[i].fec = FEC_1_2; c 3512 drivers/media/dvb-frontends/dib8000.c c->layer[i].fec = FEC_2_3; c 3517 drivers/media/dvb-frontends/dib8000.c c->layer[i].fec = FEC_3_4; c 3522 drivers/media/dvb-frontends/dib8000.c c->layer[i].fec = FEC_5_6; c 3527 drivers/media/dvb-frontends/dib8000.c c->layer[i].fec = FEC_7_8; c 3536 drivers/media/dvb-frontends/dib8000.c c->layer[i].modulation = DQPSK; c 3541 drivers/media/dvb-frontends/dib8000.c c->layer[i].modulation = QPSK; c 3546 drivers/media/dvb-frontends/dib8000.c c->layer[i].modulation = QAM_16; c 3552 drivers/media/dvb-frontends/dib8000.c c->layer[i].modulation = QAM_64; c 3561 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode = c->isdbt_sb_mode; c 3562 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.inversion = c->inversion; c 3563 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.transmission_mode = c->transmission_mode; c 3564 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.guard_interval = c->guard_interval; c 3565 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception = c->isdbt_partial_reception; c 3567 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count = c->layer[i].segment_count; c 3568 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving = c->layer[i].interleaving; c 3569 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.layer[i].fec = c->layer[i].fec; c 3570 drivers/media/dvb-frontends/dib8000.c state->fe[index_frontend]->dtv_property_cache.layer[i].modulation = c->layer[i].modulation; c 3579 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 3584 drivers/media/dvb-frontends/dib8000.c if (c->frequency == 0) { c 3589 drivers/media/dvb-frontends/dib8000.c if (c->bandwidth_hz == 0) { c 3591 drivers/media/dvb-frontends/dib8000.c c->bandwidth_hz = 6000000; c 3679 drivers/media/dvb-frontends/dib8000.c dib8000_get_frontend(state->fe[index_frontend], c); /* we read the channel parameters from the frontend which was successful */ c 3986 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 4001 drivers/media/dvb-frontends/dib8000.c switch (c->guard_interval) { c 4017 drivers/media/dvb-frontends/dib8000.c switch (c->transmission_mode) { c 4032 drivers/media/dvb-frontends/dib8000.c nsegs = c->layer[i].segment_count; c 4036 drivers/media/dvb-frontends/dib8000.c switch (c->layer[i].modulation) { c 4050 drivers/media/dvb-frontends/dib8000.c switch (c->layer[i].fec) { c 4074 drivers/media/dvb-frontends/dib8000.c interleaving = c->layer[i].interleaving; c 4100 drivers/media/dvb-frontends/dib8000.c struct dtv_frontend_properties *c = &state->fe[0]->dtv_property_cache; c 4114 drivers/media/dvb-frontends/dib8000.c c->strength.stat[0].svalue = db; c 4118 drivers/media/dvb-frontends/dib8000.c c->cnr.len = 1; c 4119 drivers/media/dvb-frontends/dib8000.c c->block_count.len = 1; c 4120 drivers/media/dvb-frontends/dib8000.c c->block_error.len = 1; c 4121 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.len = 1; c 4122 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.len = 1; c 4123 drivers/media/dvb-frontends/dib8000.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 4124 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 4125 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 4126 drivers/media/dvb-frontends/dib8000.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 4127 drivers/media/dvb-frontends/dib8000.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 4149 drivers/media/dvb-frontends/dib8000.c c->cnr.stat[0].svalue = snr; c 4150 drivers/media/dvb-frontends/dib8000.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 4157 drivers/media/dvb-frontends/dib8000.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 4158 drivers/media/dvb-frontends/dib8000.c c->block_error.stat[0].uvalue = val + state->init_ucb; c 4167 drivers/media/dvb-frontends/dib8000.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 4168 drivers/media/dvb-frontends/dib8000.c c->block_count.stat[0].uvalue += blocks; c 4182 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 4183 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.stat[0].uvalue += val; c 4185 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 4186 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.stat[0].uvalue += 100000000; c 4192 drivers/media/dvb-frontends/dib8000.c c->block_error.len = 4; c 4193 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.len = 4; c 4194 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.len = 4; c 4197 drivers/media/dvb-frontends/dib8000.c unsigned nsegs = c->layer[i].segment_count; c 4212 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.stat[1 + i].scale = FE_SCALE_COUNTER; c 4213 drivers/media/dvb-frontends/dib8000.c c->post_bit_error.stat[1 + i].uvalue += val; c 4215 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.stat[1 + i].scale = FE_SCALE_COUNTER; c 4216 drivers/media/dvb-frontends/dib8000.c c->post_bit_count.stat[1 + i].uvalue += 100000000; c 4222 drivers/media/dvb-frontends/dib8000.c c->block_error.stat[1 + i].scale = FE_SCALE_COUNTER; c 4223 drivers/media/dvb-frontends/dib8000.c c->block_error.stat[1 + i].uvalue += val; c 4230 drivers/media/dvb-frontends/dib8000.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 4231 drivers/media/dvb-frontends/dib8000.c c->block_count.stat[0].uvalue += blocks; c 1897 drivers/media/dvb-frontends/dib9000.c struct dtv_frontend_properties *c) c 1917 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c); c 1951 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.inversion = c->inversion; c 1952 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.transmission_mode = c->transmission_mode; c 1953 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.guard_interval = c->guard_interval; c 1954 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.modulation = c->modulation; c 1955 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.hierarchy = c->hierarchy; c 1956 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.code_rate_HP = c->code_rate_HP; c 1957 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.code_rate_LP = c->code_rate_LP; c 1958 drivers/media/dvb-frontends/dib9000.c state->fe[index_frontend]->dtv_property_cache.rolloff = c->rolloff; c 2205 drivers/media/dvb-frontends/dib9000.c u16 *c; c 2226 drivers/media/dvb-frontends/dib9000.c c = (u16 *)state->i2c_read_buffer; c 2228 drivers/media/dvb-frontends/dib9000.c *ber = c[10] << 16 | c[11]; c 2239 drivers/media/dvb-frontends/dib9000.c u16 *c = (u16 *)state->i2c_read_buffer; c 2266 drivers/media/dvb-frontends/dib9000.c dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2); c 2269 drivers/media/dvb-frontends/dib9000.c val = 65535 - c[4]; c 2283 drivers/media/dvb-frontends/dib9000.c u16 *c = (u16 *)state->i2c_read_buffer; c 2295 drivers/media/dvb-frontends/dib9000.c dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2); c 2298 drivers/media/dvb-frontends/dib9000.c val = c[7]; c 2301 drivers/media/dvb-frontends/dib9000.c val = c[8]; c 2348 drivers/media/dvb-frontends/dib9000.c u16 *c = (u16 *)state->i2c_read_buffer; c 2365 drivers/media/dvb-frontends/dib9000.c dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2); c 2368 drivers/media/dvb-frontends/dib9000.c *unc = c[12]; c 12233 drivers/media/dvb-frontends/drx39xyj/drxj.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 12238 drivers/media/dvb-frontends/drx39xyj/drxj.c if (c->lna) { c 12246 drivers/media/dvb-frontends/drx39xyj/drxj.c return drxj_set_lna_state(demod, c->lna); c 215 drivers/media/dvb-frontends/drxd_hard.c static inline u32 MulDiv32(u32 a, u32 b, u32 c) c 220 drivers/media/dvb-frontends/drxd_hard.c do_div(tmp64, c); c 159 drivers/media/dvb-frontends/drxk_hard.c static inline u32 MulDiv32(u32 a, u32 b, u32 c) c 164 drivers/media/dvb-frontends/drxk_hard.c do_div(tmp64, c); c 169 drivers/media/dvb-frontends/drxk_hard.c static inline u32 Frac28a(u32 a, u32 c) c 175 drivers/media/dvb-frontends/drxk_hard.c R0 = (a % c) << 4; /* 32-28 == 4 shifts possible at max */ c 176 drivers/media/dvb-frontends/drxk_hard.c Q1 = a / c; /* c 183 drivers/media/dvb-frontends/drxk_hard.c Q1 = (Q1 << 4) | (R0 / c); c 184 drivers/media/dvb-frontends/drxk_hard.c R0 = (R0 % c) << 4; c 187 drivers/media/dvb-frontends/drxk_hard.c if ((R0 >> 3) >= c) c 2491 drivers/media/dvb-frontends/drxk_hard.c u32 c = 0; c 2569 drivers/media/dvb-frontends/drxk_hard.c c = log10times100(sqr_err_iq); c 2571 drivers/media/dvb-frontends/drxk_hard.c i_mer = a + b - c; c 6508 drivers/media/dvb-frontends/drxk_hard.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 6540 drivers/media/dvb-frontends/drxk_hard.c get_strength(state, &c->strength.stat[0].uvalue); c 6541 drivers/media/dvb-frontends/drxk_hard.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 6546 drivers/media/dvb-frontends/drxk_hard.c c->cnr.stat[0].svalue = cnr * 100; c 6547 drivers/media/dvb-frontends/drxk_hard.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 6549 drivers/media/dvb-frontends/drxk_hard.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 6553 drivers/media/dvb-frontends/drxk_hard.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 6554 drivers/media/dvb-frontends/drxk_hard.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 6555 drivers/media/dvb-frontends/drxk_hard.c c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 6556 drivers/media/dvb-frontends/drxk_hard.c c->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 6557 drivers/media/dvb-frontends/drxk_hard.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 6558 drivers/media/dvb-frontends/drxk_hard.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 6610 drivers/media/dvb-frontends/drxk_hard.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 6611 drivers/media/dvb-frontends/drxk_hard.c c->block_error.stat[0].uvalue += pkt_error_count; c 6612 drivers/media/dvb-frontends/drxk_hard.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 6613 drivers/media/dvb-frontends/drxk_hard.c c->block_count.stat[0].uvalue += pkt_count; c 6615 drivers/media/dvb-frontends/drxk_hard.c c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 6616 drivers/media/dvb-frontends/drxk_hard.c c->pre_bit_error.stat[0].uvalue += pre_bit_err_count; c 6617 drivers/media/dvb-frontends/drxk_hard.c c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 6618 drivers/media/dvb-frontends/drxk_hard.c c->pre_bit_count.stat[0].uvalue += pre_bit_count; c 6620 drivers/media/dvb-frontends/drxk_hard.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 6621 drivers/media/dvb-frontends/drxk_hard.c c->post_bit_error.stat[0].uvalue += post_bit_err_count; c 6622 drivers/media/dvb-frontends/drxk_hard.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 6623 drivers/media/dvb-frontends/drxk_hard.c c->post_bit_count.stat[0].uvalue += post_bit_count; c 6650 drivers/media/dvb-frontends/drxk_hard.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 6659 drivers/media/dvb-frontends/drxk_hard.c *strength = c->strength.stat[0].uvalue; c 424 drivers/media/dvb-frontends/ds3000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 429 drivers/media/dvb-frontends/ds3000.c switch (c->delivery_system) { c 462 drivers/media/dvb-frontends/ds3000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 468 drivers/media/dvb-frontends/ds3000.c switch (c->delivery_system) { c 536 drivers/media/dvb-frontends/ds3000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 561 drivers/media/dvb-frontends/ds3000.c switch (c->delivery_system) { c 622 drivers/media/dvb-frontends/ds3000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 628 drivers/media/dvb-frontends/ds3000.c switch (c->delivery_system) { c 886 drivers/media/dvb-frontends/ds3000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 910 drivers/media/dvb-frontends/ds3000.c switch (c->delivery_system) { c 928 drivers/media/dvb-frontends/ds3000.c if (c->symbol_rate >= 30000000) c 942 drivers/media/dvb-frontends/ds3000.c if ((c->symbol_rate < ds3000_ops.info.symbol_rate_min) || c 943 drivers/media/dvb-frontends/ds3000.c (c->symbol_rate > ds3000_ops.info.symbol_rate_max)) { c 945 drivers/media/dvb-frontends/ds3000.c __func__, c->symbol_rate, c 952 drivers/media/dvb-frontends/ds3000.c if ((c->symbol_rate / 1000) <= 5000) { c 953 drivers/media/dvb-frontends/ds3000.c value = 29777 / (c->symbol_rate / 1000) + 1; c 960 drivers/media/dvb-frontends/ds3000.c } else if ((c->symbol_rate / 1000) <= 10000) { c 961 drivers/media/dvb-frontends/ds3000.c value = 92166 / (c->symbol_rate / 1000) + 1; c 968 drivers/media/dvb-frontends/ds3000.c } else if ((c->symbol_rate / 1000) <= 20000) { c 969 drivers/media/dvb-frontends/ds3000.c value = 64516 / (c->symbol_rate / 1000) + 1; c 975 drivers/media/dvb-frontends/ds3000.c value = 129032 / (c->symbol_rate / 1000) + 1; c 983 drivers/media/dvb-frontends/ds3000.c value = (((c->symbol_rate / 1000) << 16) + c 1001 drivers/media/dvb-frontends/ds3000.c switch (c->delivery_system) { c 1019 drivers/media/dvb-frontends/ds3000.c offset_khz = frequency - c->frequency; c 340 drivers/media/dvb-frontends/dvb-pll.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 342 drivers/media/dvb-frontends/dvb-pll.c u32 b_w = (c->symbol_rate * 27) / 32000; c 668 drivers/media/dvb-frontends/dvb-pll.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 680 drivers/media/dvb-frontends/dvb-pll.c result = dvb_pll_configure(fe, buf, c->frequency); c 693 drivers/media/dvb-frontends/dvb-pll.c priv->bandwidth = c->bandwidth_hz; c 701 drivers/media/dvb-frontends/dvb-pll.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 709 drivers/media/dvb-frontends/dvb-pll.c result = dvb_pll_configure(fe, buf + 1, c->frequency); c 718 drivers/media/dvb-frontends/dvb-pll.c priv->bandwidth = c->bandwidth_hz; c 77 drivers/media/dvb-frontends/ec100.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 83 drivers/media/dvb-frontends/ec100.c __func__, c->frequency, c->bandwidth_hz); c 107 drivers/media/dvb-frontends/ec100.c switch (c->bandwidth_hz) { c 133 drivers/media/dvb-frontends/gp8psk-fe.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 135 drivers/media/dvb-frontends/gp8psk-fe.c u32 freq = c->frequency * 1000; c 145 drivers/media/dvb-frontends/gp8psk-fe.c if (c->delivery_system == SYS_DVBS && c->modulation == PSK_8) c 146 drivers/media/dvb-frontends/gp8psk-fe.c c->delivery_system = SYS_TURBO; c 148 drivers/media/dvb-frontends/gp8psk-fe.c switch (c->delivery_system) { c 150 drivers/media/dvb-frontends/gp8psk-fe.c if (c->modulation != QPSK) { c 152 drivers/media/dvb-frontends/gp8psk-fe.c __func__, c->modulation); c 155 drivers/media/dvb-frontends/gp8psk-fe.c c->fec_inner = FEC_AUTO; c 166 drivers/media/dvb-frontends/gp8psk-fe.c __func__, c->delivery_system); c 170 drivers/media/dvb-frontends/gp8psk-fe.c cmd[0] = c->symbol_rate & 0xff; c 171 drivers/media/dvb-frontends/gp8psk-fe.c cmd[1] = (c->symbol_rate >> 8) & 0xff; c 172 drivers/media/dvb-frontends/gp8psk-fe.c cmd[2] = (c->symbol_rate >> 16) & 0xff; c 173 drivers/media/dvb-frontends/gp8psk-fe.c cmd[3] = (c->symbol_rate >> 24) & 0xff; c 174 drivers/media/dvb-frontends/gp8psk-fe.c switch (c->modulation) { c 179 drivers/media/dvb-frontends/gp8psk-fe.c switch (c->fec_inner) { c 195 drivers/media/dvb-frontends/gp8psk-fe.c if (c->delivery_system == SYS_TURBO) c 202 drivers/media/dvb-frontends/gp8psk-fe.c switch (c->fec_inner) { c 223 drivers/media/dvb-frontends/gp8psk-fe.c __func__, c->modulation); c 253 drivers/media/dvb-frontends/itd1000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 257 drivers/media/dvb-frontends/itd1000.c itd1000_set_lo(state, c->frequency); c 258 drivers/media/dvb-frontends/itd1000.c itd1000_set_lpf_bw(state, c->symbol_rate); c 120 drivers/media/dvb-frontends/ix2505v.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 122 drivers/media/dvb-frontends/ix2505v.c u32 frequency = c->frequency; c 123 drivers/media/dvb-frontends/ix2505v.c u32 b_w = (c->symbol_rate * 27) / 32000; c 932 drivers/media/dvb-frontends/lg2160.c struct dtv_frontend_properties *c) c 939 drivers/media/dvb-frontends/lg2160.c c->modulation = VSB_8; c 940 drivers/media/dvb-frontends/lg2160.c c->frequency = state->current_frequency; c 941 drivers/media/dvb-frontends/lg2160.c c->delivery_system = SYS_ATSCMH; c 944 drivers/media/dvb-frontends/lg2160.c &c->atscmh_fic_ver); c 947 drivers/media/dvb-frontends/lg2160.c if (state->fic_ver != c->atscmh_fic_ver) { c 948 drivers/media/dvb-frontends/lg2160.c state->fic_ver = c->atscmh_fic_ver; c 952 drivers/media/dvb-frontends/lg2160.c &c->atscmh_parade_id); c 956 drivers/media/dvb-frontends/lg2160.c c->atscmh_parade_id = state->parade_id; c 959 drivers/media/dvb-frontends/lg2160.c &c->atscmh_nog); c 963 drivers/media/dvb-frontends/lg2160.c &c->atscmh_tnog); c 967 drivers/media/dvb-frontends/lg2160.c &c->atscmh_sgn); c 971 drivers/media/dvb-frontends/lg2160.c &c->atscmh_prc); c 977 drivers/media/dvb-frontends/lg2160.c &c->atscmh_rs_frame_mode); c 982 drivers/media/dvb-frontends/lg2160.c &c->atscmh_rs_frame_ensemble); c 987 drivers/media/dvb-frontends/lg2160.c &c->atscmh_rs_code_mode_pri, c 989 drivers/media/dvb-frontends/lg2160.c &c->atscmh_rs_code_mode_sec); c 994 drivers/media/dvb-frontends/lg2160.c &c->atscmh_sccc_block_mode); c 999 drivers/media/dvb-frontends/lg2160.c &c->atscmh_sccc_code_mode_a, c 1001 drivers/media/dvb-frontends/lg2160.c &c->atscmh_sccc_code_mode_b, c 1003 drivers/media/dvb-frontends/lg2160.c &c->atscmh_sccc_code_mode_c, c 1005 drivers/media/dvb-frontends/lg2160.c &c->atscmh_sccc_code_mode_d); c 1011 drivers/media/dvb-frontends/lg2160.c (u8 *)&c->atscmh_fic_err); c 1015 drivers/media/dvb-frontends/lg2160.c &c->atscmh_crc_err); c 1019 drivers/media/dvb-frontends/lg2160.c &c->atscmh_rs_err); c 1025 drivers/media/dvb-frontends/lg2160.c if (((c->atscmh_rs_err >= 240) && c 1026 drivers/media/dvb-frontends/lg2160.c (c->atscmh_crc_err >= 240)) && c 1044 drivers/media/dvb-frontends/lg2160.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1112 drivers/media/dvb-frontends/lg2160.c lg216x_get_frontend(fe, c); c 968 drivers/media/dvb-frontends/lgdt3305.c static u32 calculate_snr(u32 mse, u32 c) c 974 drivers/media/dvb-frontends/lgdt3305.c if (mse > c) { c 980 drivers/media/dvb-frontends/lgdt3305.c return 10*(c - mse); c 987 drivers/media/dvb-frontends/lgdt3305.c u32 c; /* per-modulation SNR calculation constant */ c 997 drivers/media/dvb-frontends/lgdt3305.c c = 73957994; /* log10(25*32^2)*2^24 */ c 1004 drivers/media/dvb-frontends/lgdt3305.c c = 73957994; /* log10(25*32^2)*2^24 */ c 1012 drivers/media/dvb-frontends/lgdt3305.c c = (state->current_modulation == QAM_64) ? c 1019 drivers/media/dvb-frontends/lgdt3305.c state->snr = calculate_snr(noise, c); c 475 drivers/media/dvb-frontends/lgdt330x.c static u32 calculate_snr(u32 mse, u32 c) c 481 drivers/media/dvb-frontends/lgdt330x.c if (mse > c) { c 489 drivers/media/dvb-frontends/lgdt330x.c return 10 * (c - mse); c 497 drivers/media/dvb-frontends/lgdt330x.c u32 c; /* per-modulation SNR calculation constant */ c 506 drivers/media/dvb-frontends/lgdt330x.c c = 69765745; /* log10(25*24^2)*2^24 */ c 511 drivers/media/dvb-frontends/lgdt330x.c c = 73957994; /* log10(25*32^2)*2^24 */ c 518 drivers/media/dvb-frontends/lgdt330x.c c = state->current_modulation == QAM_64 ? 97939837 : 98026066; c 531 drivers/media/dvb-frontends/lgdt330x.c state->snr = calculate_snr(noise, c); c 544 drivers/media/dvb-frontends/lgdt330x.c u32 c; /* per-modulation SNR calculation constant */ c 553 drivers/media/dvb-frontends/lgdt330x.c c = 73957994; /* log10(25*32^2)*2^24 */ c 558 drivers/media/dvb-frontends/lgdt330x.c c = 73957994; /* log10(25*32^2)*2^24 */ c 565 drivers/media/dvb-frontends/lgdt330x.c c = state->current_modulation == QAM_64 ? 97939837 : 98026066; c 576 drivers/media/dvb-frontends/lgdt330x.c state->snr = calculate_snr(noise, c); c 88 drivers/media/dvb-frontends/m88ds3103.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 100 drivers/media/dvb-frontends/m88ds3103.c switch (c->delivery_system) { c 138 drivers/media/dvb-frontends/m88ds3103.c switch (c->delivery_system) { c 191 drivers/media/dvb-frontends/m88ds3103.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 192 drivers/media/dvb-frontends/m88ds3103.c c->cnr.stat[0].svalue = cnr; c 194 drivers/media/dvb-frontends/m88ds3103.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 197 drivers/media/dvb-frontends/m88ds3103.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 204 drivers/media/dvb-frontends/m88ds3103.c switch (c->delivery_system) { c 276 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 277 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_error.stat[0].uvalue = dev->post_bit_error; c 278 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 279 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_count.stat[0].uvalue = dev->post_bit_count; c 281 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 282 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 295 drivers/media/dvb-frontends/m88ds3103.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 309 drivers/media/dvb-frontends/m88ds3103.c c->delivery_system, c->modulation, c->frequency, c->symbol_rate, c 310 drivers/media/dvb-frontends/m88ds3103.c c->inversion, c->pilot, c->rolloff); c 346 drivers/media/dvb-frontends/m88ds3103.c tuner_frequency_khz = c->frequency; c 351 drivers/media/dvb-frontends/m88ds3103.c if (c->symbol_rate > 45010000) c 356 drivers/media/dvb-frontends/m88ds3103.c if (c->delivery_system == SYS_DVBS) c 377 drivers/media/dvb-frontends/m88ds3103.c if (c->delivery_system == SYS_DVBS) c 380 drivers/media/dvb-frontends/m88ds3103.c if (c->symbol_rate < 18000000) c 382 drivers/media/dvb-frontends/m88ds3103.c else if (c->symbol_rate < 28000000) c 424 drivers/media/dvb-frontends/m88ds3103.c switch (c->delivery_system) { c 450 drivers/media/dvb-frontends/m88ds3103.c if (c->delivery_system != dev->delivery_system) { c 457 drivers/media/dvb-frontends/m88ds3103.c if (c->delivery_system == SYS_DVBS2 && c 458 drivers/media/dvb-frontends/m88ds3103.c c->symbol_rate <= 5000000) { c 539 drivers/media/dvb-frontends/m88ds3103.c if (c->symbol_rate <= 3000000) c 541 drivers/media/dvb-frontends/m88ds3103.c else if (c->symbol_rate <= 10000000) c 562 drivers/media/dvb-frontends/m88ds3103.c u16tmp = DIV_ROUND_CLOSEST_ULL((u64)c->symbol_rate * 0x10000, dev->mclk); c 582 drivers/media/dvb-frontends/m88ds3103.c (tuner_frequency_khz - c->frequency)); c 585 drivers/media/dvb-frontends/m88ds3103.c s32tmp = 0x10000 * (tuner_frequency_khz - c->frequency); c 601 drivers/media/dvb-frontends/m88ds3103.c dev->delivery_system = c->delivery_system; c 613 drivers/media/dvb-frontends/m88ds3103.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 712 drivers/media/dvb-frontends/m88ds3103.c c->cnr.len = 1; c 713 drivers/media/dvb-frontends/m88ds3103.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 714 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_error.len = 1; c 715 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 716 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_count.len = 1; c 717 drivers/media/dvb-frontends/m88ds3103.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 766 drivers/media/dvb-frontends/m88ds3103.c struct dtv_frontend_properties *c) c 780 drivers/media/dvb-frontends/m88ds3103.c switch (c->delivery_system) { c 792 drivers/media/dvb-frontends/m88ds3103.c c->inversion = INVERSION_OFF; c 795 drivers/media/dvb-frontends/m88ds3103.c c->inversion = INVERSION_ON; c 801 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_7_8; c 804 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_5_6; c 807 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_3_4; c 810 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_2_3; c 813 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_1_2; c 819 drivers/media/dvb-frontends/m88ds3103.c c->modulation = QPSK; c 837 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_2_5; c 840 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_1_2; c 843 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_3_5; c 846 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_2_3; c 849 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_3_4; c 852 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_4_5; c 855 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_5_6; c 858 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_8_9; c 861 drivers/media/dvb-frontends/m88ds3103.c c->fec_inner = FEC_9_10; c 869 drivers/media/dvb-frontends/m88ds3103.c c->pilot = PILOT_OFF; c 872 drivers/media/dvb-frontends/m88ds3103.c c->pilot = PILOT_ON; c 878 drivers/media/dvb-frontends/m88ds3103.c c->modulation = QPSK; c 881 drivers/media/dvb-frontends/m88ds3103.c c->modulation = PSK_8; c 884 drivers/media/dvb-frontends/m88ds3103.c c->modulation = APSK_16; c 887 drivers/media/dvb-frontends/m88ds3103.c c->modulation = APSK_32; c 895 drivers/media/dvb-frontends/m88ds3103.c c->inversion = INVERSION_OFF; c 898 drivers/media/dvb-frontends/m88ds3103.c c->inversion = INVERSION_ON; c 904 drivers/media/dvb-frontends/m88ds3103.c c->rolloff = ROLLOFF_35; c 907 drivers/media/dvb-frontends/m88ds3103.c c->rolloff = ROLLOFF_25; c 910 drivers/media/dvb-frontends/m88ds3103.c c->rolloff = ROLLOFF_20; c 926 drivers/media/dvb-frontends/m88ds3103.c c->symbol_rate = DIV_ROUND_CLOSEST_ULL((u64)(buf[1] << 8 | buf[0] << 0) * dev->mclk, 0x10000); c 936 drivers/media/dvb-frontends/m88ds3103.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 938 drivers/media/dvb-frontends/m88ds3103.c if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) c 939 drivers/media/dvb-frontends/m88ds3103.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 599 drivers/media/dvb-frontends/m88rs2000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 608 drivers/media/dvb-frontends/m88rs2000.c if (c->delivery_system != SYS_DVBS) { c 610 drivers/media/dvb-frontends/m88rs2000.c __func__, c->delivery_system); c 627 drivers/media/dvb-frontends/m88rs2000.c offset = (s16)((s32)tuner_freq - c->frequency); c 633 drivers/media/dvb-frontends/m88rs2000.c if (((c->frequency % 192857) >= (192857 - 3000)) || c 634 drivers/media/dvb-frontends/m88rs2000.c (c->frequency % 192857) <= 3000) c 644 drivers/media/dvb-frontends/m88rs2000.c if (c->symbol_rate > 27500000) c 654 drivers/media/dvb-frontends/m88rs2000.c ret = m88rs2000_set_fec(state, c->fec_inner); c 665 drivers/media/dvb-frontends/m88rs2000.c ret = m88rs2000_set_symbolrate(fe, c->symbol_rate); c 696 drivers/media/dvb-frontends/m88rs2000.c state->tuner_frequency = c->frequency; c 697 drivers/media/dvb-frontends/m88rs2000.c state->symbol_rate = c->symbol_rate; c 702 drivers/media/dvb-frontends/m88rs2000.c struct dtv_frontend_properties *c) c 706 drivers/media/dvb-frontends/m88rs2000.c c->fec_inner = state->fec_inner; c 707 drivers/media/dvb-frontends/m88rs2000.c c->frequency = state->tuner_frequency; c 708 drivers/media/dvb-frontends/m88rs2000.c c->symbol_rate = state->symbol_rate; c 715 drivers/media/dvb-frontends/m88rs2000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 717 drivers/media/dvb-frontends/m88rs2000.c if (c->symbol_rate > 3000000) c 722 drivers/media/dvb-frontends/m88rs2000.c tune->step_size = c->symbol_rate / 16000; c 723 drivers/media/dvb-frontends/m88rs2000.c tune->max_drift = c->symbol_rate / 2000; c 321 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 327 drivers/media/dvb-frontends/mb86a20s.c return c->strength.stat[0].uvalue; c 330 drivers/media/dvb-frontends/mb86a20s.c c->strength.stat[0].uvalue = 0; c 368 drivers/media/dvb-frontends/mb86a20s.c c->strength.stat[0].uvalue = rf; c 500 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 505 drivers/media/dvb-frontends/mb86a20s.c c->delivery_system = SYS_ISDBT; c 506 drivers/media/dvb-frontends/mb86a20s.c c->bandwidth_hz = 6000000; c 509 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_layer_enabled = 0; c 510 drivers/media/dvb-frontends/mb86a20s.c c->transmission_mode = TRANSMISSION_MODE_AUTO; c 511 drivers/media/dvb-frontends/mb86a20s.c c->guard_interval = GUARD_INTERVAL_AUTO; c 512 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_sb_mode = 0; c 513 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_sb_segment_count = 0; c 627 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 642 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_partial_reception = (rc & 0x10) ? 1 : 0; c 654 drivers/media/dvb-frontends/mb86a20s.c c->layer[layer].segment_count = rc; c 656 drivers/media/dvb-frontends/mb86a20s.c c->layer[layer].segment_count = 0; c 660 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_layer_enabled |= 1 << layer; c 666 drivers/media/dvb-frontends/mb86a20s.c c->layer[layer].modulation = rc; c 672 drivers/media/dvb-frontends/mb86a20s.c c->layer[layer].fec = rc; c 678 drivers/media/dvb-frontends/mb86a20s.c c->layer[layer].interleaving = rc; c 679 drivers/media/dvb-frontends/mb86a20s.c mb86a20s_layer_bitrate(fe, layer, c->layer[layer].modulation, c 680 drivers/media/dvb-frontends/mb86a20s.c c->layer[layer].fec, c 681 drivers/media/dvb-frontends/mb86a20s.c c->guard_interval, c 682 drivers/media/dvb-frontends/mb86a20s.c c->layer[layer].segment_count); c 689 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_sb_mode = 1; c 691 drivers/media/dvb-frontends/mb86a20s.c if (!c->isdbt_sb_segment_count) c 692 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_sb_segment_count = 1; c 699 drivers/media/dvb-frontends/mb86a20s.c c->transmission_mode = TRANSMISSION_MODE_AUTO; c 704 drivers/media/dvb-frontends/mb86a20s.c c->transmission_mode = TRANSMISSION_MODE_4K; c 707 drivers/media/dvb-frontends/mb86a20s.c c->transmission_mode = TRANSMISSION_MODE_8K; c 711 drivers/media/dvb-frontends/mb86a20s.c c->guard_interval = GUARD_INTERVAL_AUTO; c 716 drivers/media/dvb-frontends/mb86a20s.c c->guard_interval = GUARD_INTERVAL_1_4; c 719 drivers/media/dvb-frontends/mb86a20s.c c->guard_interval = GUARD_INTERVAL_1_8; c 722 drivers/media/dvb-frontends/mb86a20s.c c->guard_interval = GUARD_INTERVAL_1_16; c 731 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_layer_enabled = 0; c 739 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 745 drivers/media/dvb-frontends/mb86a20s.c if (state->last_frequency != c->frequency) { c 746 drivers/media/dvb-frontends/mb86a20s.c memset(&c->cnr, 0, sizeof(c->cnr)); c 747 drivers/media/dvb-frontends/mb86a20s.c memset(&c->pre_bit_error, 0, sizeof(c->pre_bit_error)); c 748 drivers/media/dvb-frontends/mb86a20s.c memset(&c->pre_bit_count, 0, sizeof(c->pre_bit_count)); c 749 drivers/media/dvb-frontends/mb86a20s.c memset(&c->post_bit_error, 0, sizeof(c->post_bit_error)); c 750 drivers/media/dvb-frontends/mb86a20s.c memset(&c->post_bit_count, 0, sizeof(c->post_bit_count)); c 751 drivers/media/dvb-frontends/mb86a20s.c memset(&c->block_error, 0, sizeof(c->block_error)); c 752 drivers/media/dvb-frontends/mb86a20s.c memset(&c->block_count, 0, sizeof(c->block_count)); c 754 drivers/media/dvb-frontends/mb86a20s.c state->last_frequency = c->frequency; c 1386 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1415 drivers/media/dvb-frontends/mb86a20s.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 1416 drivers/media/dvb-frontends/mb86a20s.c c->cnr.stat[0].svalue = cnr; c 1433 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1458 drivers/media/dvb-frontends/mb86a20s.c if (!(c->isdbt_layer_enabled & (1 << layer))) { c 1459 drivers/media/dvb-frontends/mb86a20s.c c->cnr.stat[1 + layer].scale = FE_SCALE_NOT_AVAILABLE; c 1485 drivers/media/dvb-frontends/mb86a20s.c switch (c->layer[layer].modulation) { c 1503 drivers/media/dvb-frontends/mb86a20s.c c->cnr.stat[1 + layer].scale = FE_SCALE_DECIBEL; c 1504 drivers/media/dvb-frontends/mb86a20s.c c->cnr.stat[1 + layer].svalue = cnr; c 1535 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1543 drivers/media/dvb-frontends/mb86a20s.c c->strength.len = 1; c 1546 drivers/media/dvb-frontends/mb86a20s.c c->cnr.len = NUM_LAYERS + 1; c 1547 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.len = NUM_LAYERS + 1; c 1548 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.len = NUM_LAYERS + 1; c 1549 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.len = NUM_LAYERS + 1; c 1550 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.len = NUM_LAYERS + 1; c 1551 drivers/media/dvb-frontends/mb86a20s.c c->block_error.len = NUM_LAYERS + 1; c 1552 drivers/media/dvb-frontends/mb86a20s.c c->block_count.len = NUM_LAYERS + 1; c 1555 drivers/media/dvb-frontends/mb86a20s.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 1556 drivers/media/dvb-frontends/mb86a20s.c c->strength.stat[0].uvalue = 0; c 1560 drivers/media/dvb-frontends/mb86a20s.c c->cnr.stat[layer].scale = FE_SCALE_NOT_AVAILABLE; c 1561 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.stat[layer].scale = FE_SCALE_NOT_AVAILABLE; c 1562 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.stat[layer].scale = FE_SCALE_NOT_AVAILABLE; c 1563 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.stat[layer].scale = FE_SCALE_NOT_AVAILABLE; c 1564 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.stat[layer].scale = FE_SCALE_NOT_AVAILABLE; c 1565 drivers/media/dvb-frontends/mb86a20s.c c->block_error.stat[layer].scale = FE_SCALE_NOT_AVAILABLE; c 1566 drivers/media/dvb-frontends/mb86a20s.c c->block_count.stat[layer].scale = FE_SCALE_NOT_AVAILABLE; c 1573 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1599 drivers/media/dvb-frontends/mb86a20s.c if (c->isdbt_layer_enabled & (1 << layer)) { c 1607 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.stat[1 + layer].scale = FE_SCALE_COUNTER; c 1608 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.stat[1 + layer].uvalue += bit_error; c 1609 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.stat[1 + layer].scale = FE_SCALE_COUNTER; c 1610 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.stat[1 + layer].uvalue += bit_count; c 1616 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.stat[1 + layer].scale = FE_SCALE_NOT_AVAILABLE; c 1617 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.stat[1 + layer].scale = FE_SCALE_NOT_AVAILABLE; c 1622 drivers/media/dvb-frontends/mb86a20s.c if (c->block_error.stat[1 + layer].scale != FE_SCALE_NOT_AVAILABLE) c 1629 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.stat[1 + layer].scale = FE_SCALE_COUNTER; c 1630 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.stat[1 + layer].uvalue += bit_error; c 1631 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.stat[1 + layer].scale = FE_SCALE_COUNTER; c 1632 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.stat[1 + layer].uvalue += bit_count; c 1638 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.stat[1 + layer].scale = FE_SCALE_NOT_AVAILABLE; c 1639 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.stat[1 + layer].scale = FE_SCALE_NOT_AVAILABLE; c 1644 drivers/media/dvb-frontends/mb86a20s.c if (c->block_error.stat[1 + layer].scale != FE_SCALE_NOT_AVAILABLE) c 1652 drivers/media/dvb-frontends/mb86a20s.c c->block_error.stat[1 + layer].scale = FE_SCALE_COUNTER; c 1653 drivers/media/dvb-frontends/mb86a20s.c c->block_error.stat[1 + layer].uvalue += block_error; c 1654 drivers/media/dvb-frontends/mb86a20s.c c->block_count.stat[1 + layer].scale = FE_SCALE_COUNTER; c 1655 drivers/media/dvb-frontends/mb86a20s.c c->block_count.stat[1 + layer].uvalue += block_count; c 1661 drivers/media/dvb-frontends/mb86a20s.c c->block_error.stat[1 + layer].scale = FE_SCALE_NOT_AVAILABLE; c 1662 drivers/media/dvb-frontends/mb86a20s.c c->block_count.stat[1 + layer].scale = FE_SCALE_NOT_AVAILABLE; c 1668 drivers/media/dvb-frontends/mb86a20s.c if (c->block_error.stat[1 + layer].scale != FE_SCALE_NOT_AVAILABLE) c 1672 drivers/media/dvb-frontends/mb86a20s.c t_pre_bit_error += c->pre_bit_error.stat[1 + layer].uvalue; c 1673 drivers/media/dvb-frontends/mb86a20s.c t_pre_bit_count += c->pre_bit_count.stat[1 + layer].uvalue; c 1676 drivers/media/dvb-frontends/mb86a20s.c t_post_bit_error += c->post_bit_error.stat[1 + layer].uvalue; c 1677 drivers/media/dvb-frontends/mb86a20s.c t_post_bit_count += c->post_bit_count.stat[1 + layer].uvalue; c 1680 drivers/media/dvb-frontends/mb86a20s.c t_block_error += c->block_error.stat[1 + layer].uvalue; c 1681 drivers/media/dvb-frontends/mb86a20s.c t_block_count += c->block_count.stat[1 + layer].uvalue; c 1697 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 1698 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.stat[0].uvalue = t_pre_bit_error; c 1699 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 1700 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.stat[0].uvalue = t_pre_bit_count; c 1702 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1703 drivers/media/dvb-frontends/mb86a20s.c c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 1718 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 1719 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.stat[0].uvalue = t_post_bit_error; c 1720 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 1721 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.stat[0].uvalue = t_post_bit_count; c 1723 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1724 drivers/media/dvb-frontends/mb86a20s.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 1735 drivers/media/dvb-frontends/mb86a20s.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 1736 drivers/media/dvb-frontends/mb86a20s.c c->block_error.stat[0].uvalue = t_block_error; c 1737 drivers/media/dvb-frontends/mb86a20s.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 1738 drivers/media/dvb-frontends/mb86a20s.c c->block_count.stat[0].uvalue = t_block_count; c 1740 drivers/media/dvb-frontends/mb86a20s.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1741 drivers/media/dvb-frontends/mb86a20s.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 1871 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1875 drivers/media/dvb-frontends/mb86a20s.c if (!c->isdbt_layer_enabled) c 1876 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_layer_enabled = 7; c 1878 drivers/media/dvb-frontends/mb86a20s.c if (c->isdbt_layer_enabled == 1) c 1880 drivers/media/dvb-frontends/mb86a20s.c else if (c->isdbt_partial_reception) c 1885 drivers/media/dvb-frontends/mb86a20s.c if (c->inversion == INVERSION_ON) c 1890 drivers/media/dvb-frontends/mb86a20s.c if (!c->isdbt_sb_mode) { c 1893 drivers/media/dvb-frontends/mb86a20s.c if (c->isdbt_sb_subchannel >= ARRAY_SIZE(mb86a20s_subchannel)) c 1894 drivers/media/dvb-frontends/mb86a20s.c c->isdbt_sb_subchannel = 0; c 1896 drivers/media/dvb-frontends/mb86a20s.c state->subchannel = mb86a20s_subchannel[c->isdbt_sb_subchannel]; c 2013 drivers/media/dvb-frontends/mb86a20s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 2016 drivers/media/dvb-frontends/mb86a20s.c *strength = c->strength.stat[0].uvalue; c 250 drivers/media/dvb-frontends/mn88443x.c struct dtv_frontend_properties *c) c 254 drivers/media/dvb-frontends/mn88443x.c regmap_write(r_s, ATSIDU_S, c->stream_id >> 8); c 255 drivers/media/dvb-frontends/mn88443x.c regmap_write(r_s, ATSIDL_S, c->stream_id); c 260 drivers/media/dvb-frontends/mn88443x.c struct dtv_frontend_properties *c, c 277 drivers/media/dvb-frontends/mn88443x.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 285 drivers/media/dvb-frontends/mn88443x.c c->strength.len = 1; c 286 drivers/media/dvb-frontends/mn88443x.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 287 drivers/media/dvb-frontends/mn88443x.c c->strength.stat[0].uvalue = agc; c 291 drivers/media/dvb-frontends/mn88443x.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 324 drivers/media/dvb-frontends/mn88443x.c c->cnr.len = 1; c 325 drivers/media/dvb-frontends/mn88443x.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 326 drivers/media/dvb-frontends/mn88443x.c c->cnr.stat[0].uvalue = cnr; c 331 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 332 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 345 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.len = 1; c 346 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 347 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.stat[0].uvalue = bit_err; c 348 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.len = 1; c 349 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 350 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.stat[0].uvalue = bit_cnt; c 449 drivers/media/dvb-frontends/mn88443x.c struct dtv_frontend_properties *c) c 462 drivers/media/dvb-frontends/mn88443x.c struct dtv_frontend_properties *c, c 480 drivers/media/dvb-frontends/mn88443x.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 489 drivers/media/dvb-frontends/mn88443x.c c->strength.len = 1; c 490 drivers/media/dvb-frontends/mn88443x.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 491 drivers/media/dvb-frontends/mn88443x.c c->strength.stat[0].uvalue = agc; c 495 drivers/media/dvb-frontends/mn88443x.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 513 drivers/media/dvb-frontends/mn88443x.c c->cnr.len = 1; c 514 drivers/media/dvb-frontends/mn88443x.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 515 drivers/media/dvb-frontends/mn88443x.c c->cnr.stat[0].uvalue = cnr; c 519 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 520 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 537 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.len = 1; c 538 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 539 drivers/media/dvb-frontends/mn88443x.c c->post_bit_error.stat[0].uvalue = bit_err; c 540 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.len = 1; c 541 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 542 drivers/media/dvb-frontends/mn88443x.c c->post_bit_count.stat[0].uvalue = bit_cnt; c 562 drivers/media/dvb-frontends/mn88443x.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 567 drivers/media/dvb-frontends/mn88443x.c if (c->delivery_system == SYS_ISDBS) { c 573 drivers/media/dvb-frontends/mn88443x.c } else if (c->delivery_system == SYS_ISDBT) { c 594 drivers/media/dvb-frontends/mn88443x.c if (c->delivery_system == SYS_ISDBS) c 595 drivers/media/dvb-frontends/mn88443x.c mn88443x_s_tune(chip, c); c 596 drivers/media/dvb-frontends/mn88443x.c else if (c->delivery_system == SYS_ISDBT) c 597 drivers/media/dvb-frontends/mn88443x.c mn88443x_t_tune(chip, c); c 613 drivers/media/dvb-frontends/mn88443x.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 617 drivers/media/dvb-frontends/mn88443x.c if (c->delivery_system == SYS_ISDBS) { c 620 drivers/media/dvb-frontends/mn88443x.c } else if (c->delivery_system == SYS_ISDBT) { c 631 drivers/media/dvb-frontends/mn88443x.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 633 drivers/media/dvb-frontends/mn88443x.c if (c->delivery_system == SYS_ISDBS) c 634 drivers/media/dvb-frontends/mn88443x.c return mn88443x_s_read_status(chip, c, status); c 636 drivers/media/dvb-frontends/mn88443x.c if (c->delivery_system == SYS_ISDBT) c 637 drivers/media/dvb-frontends/mn88443x.c return mn88443x_t_read_status(chip, c, status); c 21 drivers/media/dvb-frontends/mn88472.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 31 drivers/media/dvb-frontends/mn88472.c switch (c->delivery_system) { c 84 drivers/media/dvb-frontends/mn88472.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 85 drivers/media/dvb-frontends/mn88472.c c->strength.stat[0].uvalue = utmp1; c 87 drivers/media/dvb-frontends/mn88472.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 91 drivers/media/dvb-frontends/mn88472.c if (*status & FE_HAS_VITERBI && c->delivery_system == SYS_DVBT) { c 109 drivers/media/dvb-frontends/mn88472.c c->cnr.stat[0].svalue = stmp; c 110 drivers/media/dvb-frontends/mn88472.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 112 drivers/media/dvb-frontends/mn88472.c c->delivery_system == SYS_DVBT2) { c 144 drivers/media/dvb-frontends/mn88472.c c->cnr.stat[0].svalue = stmp; c 145 drivers/media/dvb-frontends/mn88472.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 147 drivers/media/dvb-frontends/mn88472.c c->delivery_system == SYS_DVBC_ANNEX_A) { c 167 drivers/media/dvb-frontends/mn88472.c c->cnr.stat[0].svalue = stmp; c 168 drivers/media/dvb-frontends/mn88472.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 170 drivers/media/dvb-frontends/mn88472.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 184 drivers/media/dvb-frontends/mn88472.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 185 drivers/media/dvb-frontends/mn88472.c c->block_error.stat[0].uvalue += utmp1; c 186 drivers/media/dvb-frontends/mn88472.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 187 drivers/media/dvb-frontends/mn88472.c c->block_count.stat[0].uvalue += utmp2; c 189 drivers/media/dvb-frontends/mn88472.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 190 drivers/media/dvb-frontends/mn88472.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 203 drivers/media/dvb-frontends/mn88472.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 213 drivers/media/dvb-frontends/mn88472.c c->delivery_system, c->modulation, c->frequency, c 214 drivers/media/dvb-frontends/mn88472.c c->bandwidth_hz, c->symbol_rate, c->inversion, c->stream_id); c 221 drivers/media/dvb-frontends/mn88472.c switch (c->delivery_system) { c 248 drivers/media/dvb-frontends/mn88472.c switch (c->delivery_system) { c 251 drivers/media/dvb-frontends/mn88472.c switch (c->bandwidth_hz) { c 349 drivers/media/dvb-frontends/mn88472.c switch (c->delivery_system) { c 372 drivers/media/dvb-frontends/mn88472.c (c->stream_id == NO_STREAM_ID_FILTER) ? 0 : c 373 drivers/media/dvb-frontends/mn88472.c c->stream_id ); c 580 drivers/media/dvb-frontends/mn88472.c struct dtv_frontend_properties *c; c 665 drivers/media/dvb-frontends/mn88472.c c = &dev->fe.dtv_property_cache; c 666 drivers/media/dvb-frontends/mn88472.c c->strength.len = 1; c 667 drivers/media/dvb-frontends/mn88472.c c->cnr.len = 1; c 668 drivers/media/dvb-frontends/mn88472.c c->block_error.len = 1; c 669 drivers/media/dvb-frontends/mn88472.c c->block_count.len = 1; c 21 drivers/media/dvb-frontends/mn88473.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 30 drivers/media/dvb-frontends/mn88473.c c->delivery_system, c->modulation, c->frequency, c 31 drivers/media/dvb-frontends/mn88473.c c->bandwidth_hz, c->symbol_rate, c->inversion, c->stream_id); c 38 drivers/media/dvb-frontends/mn88473.c switch (c->delivery_system) { c 59 drivers/media/dvb-frontends/mn88473.c switch (c->delivery_system) { c 62 drivers/media/dvb-frontends/mn88473.c switch (c->bandwidth_hz) { c 142 drivers/media/dvb-frontends/mn88473.c switch (c->delivery_system) { c 218 drivers/media/dvb-frontends/mn88473.c if (c->delivery_system == SYS_DVBT2) { c 220 drivers/media/dvb-frontends/mn88473.c (c->stream_id == NO_STREAM_ID_FILTER) ? 0 : c 221 drivers/media/dvb-frontends/mn88473.c c->stream_id ); c 241 drivers/media/dvb-frontends/mn88473.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 252 drivers/media/dvb-frontends/mn88473.c switch (c->delivery_system) { c 324 drivers/media/dvb-frontends/mn88473.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 325 drivers/media/dvb-frontends/mn88473.c c->strength.stat[0].uvalue = utmp1; c 327 drivers/media/dvb-frontends/mn88473.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 331 drivers/media/dvb-frontends/mn88473.c if (*status & FE_HAS_VITERBI && c->delivery_system == SYS_DVBT) { c 348 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].svalue = stmp; c 349 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 351 drivers/media/dvb-frontends/mn88473.c c->delivery_system == SYS_DVBT2) { c 382 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].svalue = stmp; c 383 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 385 drivers/media/dvb-frontends/mn88473.c c->delivery_system == SYS_DVBC_ANNEX_A) { c 404 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].svalue = stmp; c 405 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 407 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 411 drivers/media/dvb-frontends/mn88473.c if (*status & FE_HAS_LOCK && (c->delivery_system == SYS_DVBT || c 412 drivers/media/dvb-frontends/mn88473.c c->delivery_system == SYS_DVBC_ANNEX_A)) { c 424 drivers/media/dvb-frontends/mn88473.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 425 drivers/media/dvb-frontends/mn88473.c c->post_bit_error.stat[0].uvalue += utmp1; c 426 drivers/media/dvb-frontends/mn88473.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 427 drivers/media/dvb-frontends/mn88473.c c->post_bit_count.stat[0].uvalue += utmp2; c 429 drivers/media/dvb-frontends/mn88473.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 430 drivers/media/dvb-frontends/mn88473.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 444 drivers/media/dvb-frontends/mn88473.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 445 drivers/media/dvb-frontends/mn88473.c c->block_error.stat[0].uvalue += utmp1; c 446 drivers/media/dvb-frontends/mn88473.c c->block_count.stat[0].scale = FE_SCALE_COUNTER; c 447 drivers/media/dvb-frontends/mn88473.c c->block_count.stat[0].uvalue += utmp2; c 449 drivers/media/dvb-frontends/mn88473.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 450 drivers/media/dvb-frontends/mn88473.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 463 drivers/media/dvb-frontends/mn88473.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 531 drivers/media/dvb-frontends/mn88473.c c->strength.len = 1; c 532 drivers/media/dvb-frontends/mn88473.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 533 drivers/media/dvb-frontends/mn88473.c c->cnr.len = 1; c 534 drivers/media/dvb-frontends/mn88473.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 535 drivers/media/dvb-frontends/mn88473.c c->post_bit_error.len = 1; c 536 drivers/media/dvb-frontends/mn88473.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 537 drivers/media/dvb-frontends/mn88473.c c->post_bit_count.len = 1; c 538 drivers/media/dvb-frontends/mn88473.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 539 drivers/media/dvb-frontends/mn88473.c c->block_error.len = 1; c 540 drivers/media/dvb-frontends/mn88473.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 541 drivers/media/dvb-frontends/mn88473.c c->block_count.len = 1; c 542 drivers/media/dvb-frontends/mn88473.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 337 drivers/media/dvb-frontends/mt312.c struct dvb_diseqc_master_cmd *c) c 343 drivers/media/dvb-frontends/mt312.c if ((c->msg_len == 0) || (c->msg_len > sizeof(c->msg))) c 350 drivers/media/dvb-frontends/mt312.c ret = mt312_write(state, (0x80 | DISEQC_INSTR), c->msg, c->msg_len); c 355 drivers/media/dvb-frontends/mt312.c (diseqc_mode & 0x40) | ((c->msg_len - 1) << 3) c 364 drivers/media/dvb-frontends/mt312.c if (c->msg[0] & 0x02) { c 374 drivers/media/dvb-frontends/mt312.c const enum fe_sec_mini_cmd c) c 382 drivers/media/dvb-frontends/mt312.c if (c > SEC_MINI_B) c 390 drivers/media/dvb-frontends/mt312.c (diseqc_mode & 0x40) | mini_tab[c]); c 125 drivers/media/dvb-frontends/nxt200x.c static u16 nxt200x_crc(u16 crc, u8 c) c 128 drivers/media/dvb-frontends/nxt200x.c u16 input = (u16) c & 0xFF; c 443 drivers/media/dvb-frontends/or51132.c static u32 calculate_snr(u32 mse, u32 c) c 449 drivers/media/dvb-frontends/or51132.c if (mse > c) { c 455 drivers/media/dvb-frontends/or51132.c return 10*(c - mse); c 462 drivers/media/dvb-frontends/or51132.c u32 c, usK = 0; c 487 drivers/media/dvb-frontends/or51132.c c = 150204167; c 490 drivers/media/dvb-frontends/or51132.c c = 150290396; c 501 drivers/media/dvb-frontends/or51132.c state->snr = calculate_snr(noise, c) - usK; c 266 drivers/media/dvb-frontends/or51211.c static u32 calculate_snr(u32 mse, u32 c) c 272 drivers/media/dvb-frontends/or51211.c if (mse > c) { c 278 drivers/media/dvb-frontends/or51211.c return 10*(c - mse); c 51 drivers/media/dvb-frontends/rtl2830.c struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; c 120 drivers/media/dvb-frontends/rtl2830.c c->strength.len = 1; c 121 drivers/media/dvb-frontends/rtl2830.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 122 drivers/media/dvb-frontends/rtl2830.c c->cnr.len = 1; c 123 drivers/media/dvb-frontends/rtl2830.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 124 drivers/media/dvb-frontends/rtl2830.c c->post_bit_error.len = 1; c 125 drivers/media/dvb-frontends/rtl2830.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 126 drivers/media/dvb-frontends/rtl2830.c c->post_bit_count.len = 1; c 127 drivers/media/dvb-frontends/rtl2830.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 162 drivers/media/dvb-frontends/rtl2830.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 192 drivers/media/dvb-frontends/rtl2830.c c->frequency, c->bandwidth_hz, c->inversion); c 198 drivers/media/dvb-frontends/rtl2830.c switch (c->bandwidth_hz) { c 210 drivers/media/dvb-frontends/rtl2830.c c->bandwidth_hz); c 269 drivers/media/dvb-frontends/rtl2830.c struct dtv_frontend_properties *c) c 291 drivers/media/dvb-frontends/rtl2830.c c->modulation = QPSK; c 294 drivers/media/dvb-frontends/rtl2830.c c->modulation = QAM_16; c 297 drivers/media/dvb-frontends/rtl2830.c c->modulation = QAM_64; c 303 drivers/media/dvb-frontends/rtl2830.c c->transmission_mode = TRANSMISSION_MODE_2K; c 306 drivers/media/dvb-frontends/rtl2830.c c->transmission_mode = TRANSMISSION_MODE_8K; c 311 drivers/media/dvb-frontends/rtl2830.c c->guard_interval = GUARD_INTERVAL_1_32; c 314 drivers/media/dvb-frontends/rtl2830.c c->guard_interval = GUARD_INTERVAL_1_16; c 317 drivers/media/dvb-frontends/rtl2830.c c->guard_interval = GUARD_INTERVAL_1_8; c 320 drivers/media/dvb-frontends/rtl2830.c c->guard_interval = GUARD_INTERVAL_1_4; c 326 drivers/media/dvb-frontends/rtl2830.c c->hierarchy = HIERARCHY_NONE; c 329 drivers/media/dvb-frontends/rtl2830.c c->hierarchy = HIERARCHY_1; c 332 drivers/media/dvb-frontends/rtl2830.c c->hierarchy = HIERARCHY_2; c 335 drivers/media/dvb-frontends/rtl2830.c c->hierarchy = HIERARCHY_4; c 341 drivers/media/dvb-frontends/rtl2830.c c->code_rate_HP = FEC_1_2; c 344 drivers/media/dvb-frontends/rtl2830.c c->code_rate_HP = FEC_2_3; c 347 drivers/media/dvb-frontends/rtl2830.c c->code_rate_HP = FEC_3_4; c 350 drivers/media/dvb-frontends/rtl2830.c c->code_rate_HP = FEC_5_6; c 353 drivers/media/dvb-frontends/rtl2830.c c->code_rate_HP = FEC_7_8; c 359 drivers/media/dvb-frontends/rtl2830.c c->code_rate_LP = FEC_1_2; c 362 drivers/media/dvb-frontends/rtl2830.c c->code_rate_LP = FEC_2_3; c 365 drivers/media/dvb-frontends/rtl2830.c c->code_rate_LP = FEC_3_4; c 368 drivers/media/dvb-frontends/rtl2830.c c->code_rate_LP = FEC_5_6; c 371 drivers/media/dvb-frontends/rtl2830.c c->code_rate_LP = FEC_7_8; c 385 drivers/media/dvb-frontends/rtl2830.c struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; c 423 drivers/media/dvb-frontends/rtl2830.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 424 drivers/media/dvb-frontends/rtl2830.c c->strength.stat[0].uvalue = utmp; c 426 drivers/media/dvb-frontends/rtl2830.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 465 drivers/media/dvb-frontends/rtl2830.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 466 drivers/media/dvb-frontends/rtl2830.c c->cnr.stat[0].svalue = stmp; c 468 drivers/media/dvb-frontends/rtl2830.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 483 drivers/media/dvb-frontends/rtl2830.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 484 drivers/media/dvb-frontends/rtl2830.c c->post_bit_error.stat[0].uvalue = dev->post_bit_error; c 485 drivers/media/dvb-frontends/rtl2830.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 486 drivers/media/dvb-frontends/rtl2830.c c->post_bit_count.stat[0].uvalue = dev->post_bit_count; c 488 drivers/media/dvb-frontends/rtl2830.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 489 drivers/media/dvb-frontends/rtl2830.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 501 drivers/media/dvb-frontends/rtl2830.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 503 drivers/media/dvb-frontends/rtl2830.c if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) c 504 drivers/media/dvb-frontends/rtl2830.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 531 drivers/media/dvb-frontends/rtl2830.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 533 drivers/media/dvb-frontends/rtl2830.c if (c->strength.stat[0].scale == FE_SCALE_RELATIVE) c 534 drivers/media/dvb-frontends/rtl2830.c *strength = c->strength.stat[0].uvalue; c 249 drivers/media/dvb-frontends/rtl2832.c struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache; c 353 drivers/media/dvb-frontends/rtl2832.c c->strength.len = 1; c 354 drivers/media/dvb-frontends/rtl2832.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 355 drivers/media/dvb-frontends/rtl2832.c c->cnr.len = 1; c 356 drivers/media/dvb-frontends/rtl2832.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 357 drivers/media/dvb-frontends/rtl2832.c c->post_bit_error.len = 1; c 358 drivers/media/dvb-frontends/rtl2832.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 359 drivers/media/dvb-frontends/rtl2832.c c->post_bit_count.len = 1; c 360 drivers/media/dvb-frontends/rtl2832.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 407 drivers/media/dvb-frontends/rtl2832.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 438 drivers/media/dvb-frontends/rtl2832.c c->frequency, c->bandwidth_hz, c->inversion); c 457 drivers/media/dvb-frontends/rtl2832.c switch (c->bandwidth_hz) { c 472 drivers/media/dvb-frontends/rtl2832.c c->bandwidth_hz); c 525 drivers/media/dvb-frontends/rtl2832.c struct dtv_frontend_properties *c) c 547 drivers/media/dvb-frontends/rtl2832.c c->modulation = QPSK; c 550 drivers/media/dvb-frontends/rtl2832.c c->modulation = QAM_16; c 553 drivers/media/dvb-frontends/rtl2832.c c->modulation = QAM_64; c 559 drivers/media/dvb-frontends/rtl2832.c c->transmission_mode = TRANSMISSION_MODE_2K; c 562 drivers/media/dvb-frontends/rtl2832.c c->transmission_mode = TRANSMISSION_MODE_8K; c 567 drivers/media/dvb-frontends/rtl2832.c c->guard_interval = GUARD_INTERVAL_1_32; c 570 drivers/media/dvb-frontends/rtl2832.c c->guard_interval = GUARD_INTERVAL_1_16; c 573 drivers/media/dvb-frontends/rtl2832.c c->guard_interval = GUARD_INTERVAL_1_8; c 576 drivers/media/dvb-frontends/rtl2832.c c->guard_interval = GUARD_INTERVAL_1_4; c 582 drivers/media/dvb-frontends/rtl2832.c c->hierarchy = HIERARCHY_NONE; c 585 drivers/media/dvb-frontends/rtl2832.c c->hierarchy = HIERARCHY_1; c 588 drivers/media/dvb-frontends/rtl2832.c c->hierarchy = HIERARCHY_2; c 591 drivers/media/dvb-frontends/rtl2832.c c->hierarchy = HIERARCHY_4; c 597 drivers/media/dvb-frontends/rtl2832.c c->code_rate_HP = FEC_1_2; c 600 drivers/media/dvb-frontends/rtl2832.c c->code_rate_HP = FEC_2_3; c 603 drivers/media/dvb-frontends/rtl2832.c c->code_rate_HP = FEC_3_4; c 606 drivers/media/dvb-frontends/rtl2832.c c->code_rate_HP = FEC_5_6; c 609 drivers/media/dvb-frontends/rtl2832.c c->code_rate_HP = FEC_7_8; c 615 drivers/media/dvb-frontends/rtl2832.c c->code_rate_LP = FEC_1_2; c 618 drivers/media/dvb-frontends/rtl2832.c c->code_rate_LP = FEC_2_3; c 621 drivers/media/dvb-frontends/rtl2832.c c->code_rate_LP = FEC_3_4; c 624 drivers/media/dvb-frontends/rtl2832.c c->code_rate_LP = FEC_5_6; c 627 drivers/media/dvb-frontends/rtl2832.c c->code_rate_LP = FEC_7_8; c 641 drivers/media/dvb-frontends/rtl2832.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 679 drivers/media/dvb-frontends/rtl2832.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 680 drivers/media/dvb-frontends/rtl2832.c c->strength.stat[0].uvalue = u16tmp; c 682 drivers/media/dvb-frontends/rtl2832.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 721 drivers/media/dvb-frontends/rtl2832.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 722 drivers/media/dvb-frontends/rtl2832.c c->cnr.stat[0].svalue = tmp; c 724 drivers/media/dvb-frontends/rtl2832.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 739 drivers/media/dvb-frontends/rtl2832.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 740 drivers/media/dvb-frontends/rtl2832.c c->post_bit_error.stat[0].uvalue = dev->post_bit_error; c 741 drivers/media/dvb-frontends/rtl2832.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 742 drivers/media/dvb-frontends/rtl2832.c c->post_bit_count.stat[0].uvalue = dev->post_bit_count; c 744 drivers/media/dvb-frontends/rtl2832.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 745 drivers/media/dvb-frontends/rtl2832.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 756 drivers/media/dvb-frontends/rtl2832.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 759 drivers/media/dvb-frontends/rtl2832.c if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) c 760 drivers/media/dvb-frontends/rtl2832.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 776 drivers/media/dvb-frontends/rtl2832_sdr.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 793 drivers/media/dvb-frontends/rtl2832_sdr.c c->bandwidth_hz = dev->f_adc; c 796 drivers/media/dvb-frontends/rtl2832_sdr.c c->bandwidth_hz = v4l2_ctrl_g_ctrl(bandwidth); c 799 drivers/media/dvb-frontends/rtl2832_sdr.c c->frequency = dev->f_tuner; c 800 drivers/media/dvb-frontends/rtl2832_sdr.c c->delivery_system = SYS_DVBT; c 803 drivers/media/dvb-frontends/rtl2832_sdr.c c->frequency, c->bandwidth_hz); c 1251 drivers/media/dvb-frontends/rtl2832_sdr.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1274 drivers/media/dvb-frontends/rtl2832_sdr.c c->bandwidth_hz = dev->bandwidth->val; c 528 drivers/media/dvb-frontends/si2165.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 639 drivers/media/dvb-frontends/si2165.c c = &state->fe.dtv_property_cache; c 640 drivers/media/dvb-frontends/si2165.c c->cnr.len = 1; c 641 drivers/media/dvb-frontends/si2165.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 642 drivers/media/dvb-frontends/si2165.c c->post_bit_error.len = 1; c 643 drivers/media/dvb-frontends/si2165.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 644 drivers/media/dvb-frontends/si2165.c c->post_bit_count.len = 1; c 645 drivers/media/dvb-frontends/si2165.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 674 drivers/media/dvb-frontends/si2165.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 675 drivers/media/dvb-frontends/si2165.c u32 delsys = c->delivery_system; c 737 drivers/media/dvb-frontends/si2165.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 738 drivers/media/dvb-frontends/si2165.c c->cnr.stat[0].svalue = u32tmp; c 740 drivers/media/dvb-frontends/si2165.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 744 drivers/media/dvb-frontends/si2165.c if (c->post_bit_error.stat[0].scale == FE_SCALE_NOT_AVAILABLE) { c 751 drivers/media/dvb-frontends/si2165.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 752 drivers/media/dvb-frontends/si2165.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 753 drivers/media/dvb-frontends/si2165.c c->post_bit_error.stat[0].uvalue = 0; c 754 drivers/media/dvb-frontends/si2165.c c->post_bit_count.stat[0].uvalue = 0; c 775 drivers/media/dvb-frontends/si2165.c c->post_bit_error.stat[0].uvalue += c 777 drivers/media/dvb-frontends/si2165.c c->post_bit_count.stat[0].uvalue += c 792 drivers/media/dvb-frontends/si2165.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 793 drivers/media/dvb-frontends/si2165.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 801 drivers/media/dvb-frontends/si2165.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 803 drivers/media/dvb-frontends/si2165.c if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) c 804 drivers/media/dvb-frontends/si2165.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 813 drivers/media/dvb-frontends/si2165.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 815 drivers/media/dvb-frontends/si2165.c if (c->post_bit_error.stat[0].scale != FE_SCALE_COUNTER) { c 820 drivers/media/dvb-frontends/si2165.c *ber = c->post_bit_error.stat[0].uvalue - state->ber_prev; c 821 drivers/media/dvb-frontends/si2165.c state->ber_prev = c->post_bit_error.stat[0].uvalue; c 118 drivers/media/dvb-frontends/si2168.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 130 drivers/media/dvb-frontends/si2168.c switch (c->delivery_system) { c 162 drivers/media/dvb-frontends/si2168.c c->cnr.len = 1; c 163 drivers/media/dvb-frontends/si2168.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 164 drivers/media/dvb-frontends/si2168.c c->cnr.stat[0].svalue = cmd.args[3] * 1000 / 4; c 166 drivers/media/dvb-frontends/si2168.c c->cnr.len = 1; c 167 drivers/media/dvb-frontends/si2168.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 195 drivers/media/dvb-frontends/si2168.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 196 drivers/media/dvb-frontends/si2168.c c->post_bit_error.stat[0].uvalue += utmp1; c 197 drivers/media/dvb-frontends/si2168.c c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c 198 drivers/media/dvb-frontends/si2168.c c->post_bit_count.stat[0].uvalue += utmp2; c 200 drivers/media/dvb-frontends/si2168.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 201 drivers/media/dvb-frontends/si2168.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 218 drivers/media/dvb-frontends/si2168.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 219 drivers/media/dvb-frontends/si2168.c c->block_error.stat[0].uvalue += utmp1; c 221 drivers/media/dvb-frontends/si2168.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 234 drivers/media/dvb-frontends/si2168.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 241 drivers/media/dvb-frontends/si2168.c c->delivery_system, c->modulation, c->frequency, c 242 drivers/media/dvb-frontends/si2168.c c->bandwidth_hz, c->symbol_rate, c->inversion, c 243 drivers/media/dvb-frontends/si2168.c c->stream_id); c 250 drivers/media/dvb-frontends/si2168.c switch (c->delivery_system) { c 265 drivers/media/dvb-frontends/si2168.c if (c->bandwidth_hz == 0) { c 268 drivers/media/dvb-frontends/si2168.c } else if (c->bandwidth_hz <= 2000000) c 270 drivers/media/dvb-frontends/si2168.c else if (c->bandwidth_hz <= 5000000) c 272 drivers/media/dvb-frontends/si2168.c else if (c->bandwidth_hz <= 6000000) c 274 drivers/media/dvb-frontends/si2168.c else if (c->bandwidth_hz <= 7000000) c 276 drivers/media/dvb-frontends/si2168.c else if (c->bandwidth_hz <= 8000000) c 278 drivers/media/dvb-frontends/si2168.c else if (c->bandwidth_hz <= 9000000) c 280 drivers/media/dvb-frontends/si2168.c else if (c->bandwidth_hz <= 10000000) c 298 drivers/media/dvb-frontends/si2168.c if (c->delivery_system == SYS_DVBT) c 300 drivers/media/dvb-frontends/si2168.c else if (c->delivery_system == SYS_DVBC_ANNEX_A) c 302 drivers/media/dvb-frontends/si2168.c else if (c->delivery_system == SYS_DVBT2) c 308 drivers/media/dvb-frontends/si2168.c if (c->delivery_system == SYS_DVBT2) { c 311 drivers/media/dvb-frontends/si2168.c cmd.args[1] = c->stream_id & 0xff; c 312 drivers/media/dvb-frontends/si2168.c cmd.args[2] = c->stream_id == NO_STREAM_ID_FILTER ? 0 : 1; c 354 drivers/media/dvb-frontends/si2168.c if (c->delivery_system == SYS_DVBC_ANNEX_A) { c 356 drivers/media/dvb-frontends/si2168.c cmd.args[4] = ((c->symbol_rate / 1000) >> 0) & 0xff; c 357 drivers/media/dvb-frontends/si2168.c cmd.args[5] = ((c->symbol_rate / 1000) >> 8) & 0xff; c 395 drivers/media/dvb-frontends/si2168.c dev->delivery_system = c->delivery_system; c 412 drivers/media/dvb-frontends/si2168.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 532 drivers/media/dvb-frontends/si2168.c c->cnr.len = 1; c 533 drivers/media/dvb-frontends/si2168.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 534 drivers/media/dvb-frontends/si2168.c c->post_bit_error.len = 1; c 535 drivers/media/dvb-frontends/si2168.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 536 drivers/media/dvb-frontends/si2168.c c->post_bit_count.len = 1; c 537 drivers/media/dvb-frontends/si2168.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 538 drivers/media/dvb-frontends/si2168.c c->block_error.len = 1; c 539 drivers/media/dvb-frontends/si2168.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 694 drivers/media/dvb-frontends/si21xx.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 734 drivers/media/dvb-frontends/si21xx.c if (c->delivery_system != SYS_DVBS) { c 736 drivers/media/dvb-frontends/si21xx.c __func__, c->delivery_system); c 749 drivers/media/dvb-frontends/si21xx.c rf_freq = 10 * c->frequency ; c 750 drivers/media/dvb-frontends/si21xx.c data_rate = c->symbol_rate / 100; c 835 drivers/media/dvb-frontends/si21xx.c si21xx_setacquire(fe, c->symbol_rate, c->fec_inner); c 168 drivers/media/dvb-frontends/sp887x.c int c = BLOCKSIZE; c 171 drivers/media/dvb-frontends/sp887x.c if (c > FW_SIZE - i) c 172 drivers/media/dvb-frontends/sp887x.c c = FW_SIZE - i; c 180 drivers/media/dvb-frontends/sp887x.c memcpy(&buf[2], mem + i, c); c 182 drivers/media/dvb-frontends/sp887x.c if ((err = i2c_writebytes (state, buf, c+2)) < 0) { c 47 drivers/media/dvb-frontends/stb0899_priv.h #define MAKEWORD32(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) c 504 drivers/media/dvb-frontends/stb6100.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 506 drivers/media/dvb-frontends/stb6100.c if (c->frequency > 0) c 507 drivers/media/dvb-frontends/stb6100.c stb6100_set_frequency(fe, c->frequency); c 509 drivers/media/dvb-frontends/stb6100.c if (c->bandwidth_hz > 0) c 510 drivers/media/dvb-frontends/stb6100.c stb6100_set_bandwidth(fe, c->bandwidth_hz); c 33 drivers/media/dvb-frontends/stb6100_cfg.h struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 34 drivers/media/dvb-frontends/stb6100_cfg.h u32 bw = c->bandwidth_hz; c 37 drivers/media/dvb-frontends/stb6100_cfg.h c->frequency = frequency; c 38 drivers/media/dvb-frontends/stb6100_cfg.h c->bandwidth_hz = 0; /* Don't adjust the bandwidth */ c 42 drivers/media/dvb-frontends/stb6100_cfg.h c->bandwidth_hz = bw; c 71 drivers/media/dvb-frontends/stb6100_cfg.h struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 72 drivers/media/dvb-frontends/stb6100_cfg.h u32 freq = c->frequency; c 75 drivers/media/dvb-frontends/stb6100_cfg.h c->bandwidth_hz = bandwidth; c 76 drivers/media/dvb-frontends/stb6100_cfg.h c->frequency = 0; /* Don't adjust the frequency */ c 80 drivers/media/dvb-frontends/stb6100_cfg.h c->frequency = freq; c 38 drivers/media/dvb-frontends/stb6100_proc.h struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 39 drivers/media/dvb-frontends/stb6100_proc.h u32 bw = c->bandwidth_hz; c 42 drivers/media/dvb-frontends/stb6100_proc.h c->frequency = frequency; c 43 drivers/media/dvb-frontends/stb6100_proc.h c->bandwidth_hz = 0; /* Don't adjust the bandwidth */ c 50 drivers/media/dvb-frontends/stb6100_proc.h c->bandwidth_hz = bw; c 91 drivers/media/dvb-frontends/stb6100_proc.h struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 92 drivers/media/dvb-frontends/stb6100_proc.h u32 freq = c->frequency; c 95 drivers/media/dvb-frontends/stb6100_proc.h c->bandwidth_hz = bandwidth; c 96 drivers/media/dvb-frontends/stb6100_proc.h c->frequency = 0; /* Don't adjust the frequency */ c 103 drivers/media/dvb-frontends/stb6100_proc.h c->frequency = freq; c 441 drivers/media/dvb-frontends/stv0288.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 449 drivers/media/dvb-frontends/stv0288.c if (c->delivery_system != SYS_DVBS) { c 451 drivers/media/dvb-frontends/stv0288.c __func__, c->delivery_system); c 466 drivers/media/dvb-frontends/stv0288.c stv0288_set_symbolrate(fe, c->symbol_rate); c 493 drivers/media/dvb-frontends/stv0288.c state->tuner_frequency = c->frequency; c 495 drivers/media/dvb-frontends/stv0288.c state->symbol_rate = c->symbol_rate; c 1567 drivers/media/dvb-frontends/stv0900_core.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1576 drivers/media/dvb-frontends/stv0900_core.c if (!(INRANGE(100000, c->symbol_rate, 70000000))) c 1582 drivers/media/dvb-frontends/stv0900_core.c stv0900_set_mis(intp, demod, c->stream_id); c 1586 drivers/media/dvb-frontends/stv0900_core.c p_search.frequency = c->frequency; c 1587 drivers/media/dvb-frontends/stv0900_core.c p_search.symbol_rate = c->symbol_rate; c 1594 drivers/media/dvb-frontends/stv0900_core.c if (c->delivery_system == SYS_DVBS) c 67 drivers/media/dvb-frontends/stv0910.c static inline u32 muldiv32(u32 a, u32 b, u32 c) c 72 drivers/media/dvb-frontends/stv0910.c do_div(tmp64, c); c 338 drivers/media/dvb-frontends/stv6110.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 339 drivers/media/dvb-frontends/stv6110.c u32 bandwidth = carrier_width(c->symbol_rate, c->rolloff); c 341 drivers/media/dvb-frontends/stv6110.c stv6110_set_frequency(fe, c->frequency); c 294 drivers/media/dvb-frontends/stv6111.c static inline u32 muldiv32(u32 a, u32 b, u32 c) c 299 drivers/media/dvb-frontends/stv6111.c do_div(tmp64, c); c 92 drivers/media/dvb-frontends/tc90522.c static struct tc90522_state *cfg_to_state(struct tc90522_config *c) c 94 drivers/media/dvb-frontends/tc90522.c return container_of(c, struct tc90522_state, cfg); c 196 drivers/media/dvb-frontends/tc90522.c struct dtv_frontend_properties *c) c 206 drivers/media/dvb-frontends/tc90522.c c->delivery_system = SYS_ISDBS; c 207 drivers/media/dvb-frontends/tc90522.c c->symbol_rate = 28860000; c 214 drivers/media/dvb-frontends/tc90522.c c->stream_id = val[0] << 8 | val[1]; c 218 drivers/media/dvb-frontends/tc90522.c c->modulation = (v == 7) ? PSK_8 : QPSK; c 219 drivers/media/dvb-frontends/tc90522.c c->fec_inner = fec_conv_sat[v]; c 220 drivers/media/dvb-frontends/tc90522.c c->layer[0].fec = c->fec_inner; c 221 drivers/media/dvb-frontends/tc90522.c c->layer[0].modulation = c->modulation; c 222 drivers/media/dvb-frontends/tc90522.c c->layer[0].segment_count = val[3] & 0x3f; /* slots */ c 226 drivers/media/dvb-frontends/tc90522.c c->layer[1].fec = fec_conv_sat[v]; c 228 drivers/media/dvb-frontends/tc90522.c c->layer[1].segment_count = 0; c 230 drivers/media/dvb-frontends/tc90522.c c->layer[1].segment_count = val[4] & 0x3f; /* slots */ c 235 drivers/media/dvb-frontends/tc90522.c c->layer[1].modulation = QPSK; c 241 drivers/media/dvb-frontends/tc90522.c stats = &c->strength; c 250 drivers/media/dvb-frontends/tc90522.c stats = &c->cnr; c 282 drivers/media/dvb-frontends/tc90522.c stats = &c->post_bit_error; c 296 drivers/media/dvb-frontends/tc90522.c stats = &c->post_bit_count; c 331 drivers/media/dvb-frontends/tc90522.c struct dtv_frontend_properties *c) c 341 drivers/media/dvb-frontends/tc90522.c c->delivery_system = SYS_ISDBT; c 342 drivers/media/dvb-frontends/tc90522.c c->bandwidth_hz = 6000000; c 347 drivers/media/dvb-frontends/tc90522.c c->transmission_mode = tm_conv[mode]; c 348 drivers/media/dvb-frontends/tc90522.c c->guard_interval = (val[0] & 0x30) >> 4; c 356 drivers/media/dvb-frontends/tc90522.c c->isdbt_partial_reception = val[0] & 0x01; c 357 drivers/media/dvb-frontends/tc90522.c c->isdbt_sb_mode = (val[0] & 0xc0) == 0x40; c 362 drivers/media/dvb-frontends/tc90522.c c->layer[0].segment_count = 0; c 365 drivers/media/dvb-frontends/tc90522.c c->layer[0].segment_count = v; c 366 drivers/media/dvb-frontends/tc90522.c c->layer[0].fec = fec_conv_ter[(val[1] & 0x1c) >> 2]; c 367 drivers/media/dvb-frontends/tc90522.c c->layer[0].modulation = mod_conv[(val[1] & 0xe0) >> 5]; c 369 drivers/media/dvb-frontends/tc90522.c c->layer[0].interleaving = v; c 375 drivers/media/dvb-frontends/tc90522.c c->layer[1].segment_count = 0; c 378 drivers/media/dvb-frontends/tc90522.c c->layer[1].segment_count = v; c 379 drivers/media/dvb-frontends/tc90522.c c->layer[1].fec = fec_conv_ter[(val[3] & 0xe0) >> 5]; c 380 drivers/media/dvb-frontends/tc90522.c c->layer[1].modulation = mod_conv[(val[2] & 0x07)]; c 381 drivers/media/dvb-frontends/tc90522.c c->layer[1].interleaving = (val[3] & 0x1c) >> 2; c 387 drivers/media/dvb-frontends/tc90522.c c->layer[2].segment_count = 0; c 390 drivers/media/dvb-frontends/tc90522.c c->layer[2].segment_count = v; c 391 drivers/media/dvb-frontends/tc90522.c c->layer[2].fec = fec_conv_ter[(val[4] & 0x07)]; c 392 drivers/media/dvb-frontends/tc90522.c c->layer[2].modulation = mod_conv[(val[4] & 0x38) >> 3]; c 393 drivers/media/dvb-frontends/tc90522.c c->layer[2].interleaving = (val[5] & 0xe0) >> 5; c 399 drivers/media/dvb-frontends/tc90522.c stats = &c->strength; c 408 drivers/media/dvb-frontends/tc90522.c stats = &c->cnr; c 442 drivers/media/dvb-frontends/tc90522.c stats = &c->post_bit_error; c 456 drivers/media/dvb-frontends/tc90522.c stats = &c->post_bit_count; c 220 drivers/media/dvb-frontends/tda10021.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 221 drivers/media/dvb-frontends/tda10021.c u32 delsys = c->delivery_system; c 222 drivers/media/dvb-frontends/tda10021.c unsigned qam = c->modulation; c 266 drivers/media/dvb-frontends/tda10021.c if (c->inversion != INVERSION_ON && c->inversion != INVERSION_OFF) c 276 drivers/media/dvb-frontends/tda10021.c tda10021_set_symbolrate(state, c->symbol_rate); c 293 drivers/media/dvb-frontends/tda10021.c tda10021_setup_reg0(state, qam_params[qam].conf, c->inversion); c 293 drivers/media/dvb-frontends/tda10023.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 294 drivers/media/dvb-frontends/tda10023.c u32 delsys = c->delivery_system; c 295 drivers/media/dvb-frontends/tda10023.c unsigned qam = c->modulation; c 343 drivers/media/dvb-frontends/tda10023.c tda10023_set_symbolrate(state, c->symbol_rate); c 350 drivers/media/dvb-frontends/tda10023.c tda10023_writereg(state, 0x04, (c->inversion ? 0x12 : 0x32)); c 351 drivers/media/dvb-frontends/tda10023.c tda10023_writebit(state, 0x04, 0x60, (c->inversion ? 0 : 0x20)); c 394 drivers/media/dvb-frontends/tda10023.c u8 a,b,c; c 397 drivers/media/dvb-frontends/tda10023.c c=tda10023_readreg(state, 0x16)&0xf; c 400 drivers/media/dvb-frontends/tda10023.c *ber = a | (b<<8)| (c<<16); c 432 drivers/media/dvb-frontends/tda10023.c u8 a,b,c,d; c 435 drivers/media/dvb-frontends/tda10023.c c= tda10023_readreg (state, 0x76); c 437 drivers/media/dvb-frontends/tda10023.c *ucblocks = a | (b<<8)|(c<<16)|(d<<24); c 367 drivers/media/dvb-frontends/tda10071.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 408 drivers/media/dvb-frontends/tda10071.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 409 drivers/media/dvb-frontends/tda10071.c c->strength.stat[0].svalue = (int) (uitmp - 256) * 1000; c 411 drivers/media/dvb-frontends/tda10071.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 421 drivers/media/dvb-frontends/tda10071.c c->cnr.stat[0].scale = FE_SCALE_DECIBEL; c 422 drivers/media/dvb-frontends/tda10071.c c->cnr.stat[0].svalue = (buf[0] << 8 | buf[1] << 0) * 100; c 424 drivers/media/dvb-frontends/tda10071.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 477 drivers/media/dvb-frontends/tda10071.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 478 drivers/media/dvb-frontends/tda10071.c c->post_bit_error.stat[0].uvalue = dev->post_bit_error; c 480 drivers/media/dvb-frontends/tda10071.c c->block_error.stat[0].scale = FE_SCALE_COUNTER; c 481 drivers/media/dvb-frontends/tda10071.c c->block_error.stat[0].uvalue = dev->block_error; c 485 drivers/media/dvb-frontends/tda10071.c c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c 486 drivers/media/dvb-frontends/tda10071.c c->post_bit_error.stat[0].uvalue = dev->post_bit_error; c 487 drivers/media/dvb-frontends/tda10071.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 490 drivers/media/dvb-frontends/tda10071.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 491 drivers/media/dvb-frontends/tda10071.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 502 drivers/media/dvb-frontends/tda10071.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 504 drivers/media/dvb-frontends/tda10071.c if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL) c 505 drivers/media/dvb-frontends/tda10071.c *snr = div_s64(c->cnr.stat[0].svalue, 100); c 513 drivers/media/dvb-frontends/tda10071.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 516 drivers/media/dvb-frontends/tda10071.c if (c->strength.stat[0].scale == FE_SCALE_DECIBEL) { c 517 drivers/media/dvb-frontends/tda10071.c uitmp = div_s64(c->strength.stat[0].svalue, 1000) + 256; c 537 drivers/media/dvb-frontends/tda10071.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 539 drivers/media/dvb-frontends/tda10071.c if (c->block_error.stat[0].scale == FE_SCALE_COUNTER) c 540 drivers/media/dvb-frontends/tda10071.c *ucblocks = c->block_error.stat[0].uvalue; c 551 drivers/media/dvb-frontends/tda10071.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 558 drivers/media/dvb-frontends/tda10071.c c->delivery_system, c->modulation, c->frequency, c->symbol_rate, c 559 drivers/media/dvb-frontends/tda10071.c c->inversion, c->pilot, c->rolloff); c 568 drivers/media/dvb-frontends/tda10071.c switch (c->inversion) { c 586 drivers/media/dvb-frontends/tda10071.c switch (c->delivery_system) { c 593 drivers/media/dvb-frontends/tda10071.c modulation = c->modulation; c 595 drivers/media/dvb-frontends/tda10071.c switch (c->rolloff) { c 612 drivers/media/dvb-frontends/tda10071.c switch (c->pilot) { c 635 drivers/media/dvb-frontends/tda10071.c if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && c 637 drivers/media/dvb-frontends/tda10071.c c->fec_inner == TDA10071_MODCOD[i].fec) { c 650 drivers/media/dvb-frontends/tda10071.c if (c->symbol_rate <= 5000000) c 666 drivers/media/dvb-frontends/tda10071.c cmd.args[3] = (c->frequency >> 16) & 0xff; c 667 drivers/media/dvb-frontends/tda10071.c cmd.args[4] = (c->frequency >> 8) & 0xff; c 668 drivers/media/dvb-frontends/tda10071.c cmd.args[5] = (c->frequency >> 0) & 0xff; c 669 drivers/media/dvb-frontends/tda10071.c cmd.args[6] = ((c->symbol_rate / 1000) >> 8) & 0xff; c 670 drivers/media/dvb-frontends/tda10071.c cmd.args[7] = ((c->symbol_rate / 1000) >> 0) & 0xff; c 683 drivers/media/dvb-frontends/tda10071.c dev->delivery_system = c->delivery_system; c 692 drivers/media/dvb-frontends/tda10071.c struct dtv_frontend_properties *c) c 711 drivers/media/dvb-frontends/tda10071.c c->modulation = TDA10071_MODCOD[i].modulation; c 712 drivers/media/dvb-frontends/tda10071.c c->fec_inner = TDA10071_MODCOD[i].fec; c 713 drivers/media/dvb-frontends/tda10071.c c->delivery_system = TDA10071_MODCOD[i].delivery_system; c 719 drivers/media/dvb-frontends/tda10071.c c->inversion = INVERSION_ON; c 722 drivers/media/dvb-frontends/tda10071.c c->inversion = INVERSION_OFF; c 728 drivers/media/dvb-frontends/tda10071.c c->pilot = PILOT_OFF; c 731 drivers/media/dvb-frontends/tda10071.c c->pilot = PILOT_ON; c 735 drivers/media/dvb-frontends/tda10071.c c->frequency = (buf[2] << 16) | (buf[3] << 8) | (buf[4] << 0); c 741 drivers/media/dvb-frontends/tda10071.c c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000; c 753 drivers/media/dvb-frontends/tda10071.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1020 drivers/media/dvb-frontends/tda10071.c c->strength.len = 1; c 1021 drivers/media/dvb-frontends/tda10071.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1022 drivers/media/dvb-frontends/tda10071.c c->cnr.len = 1; c 1023 drivers/media/dvb-frontends/tda10071.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1024 drivers/media/dvb-frontends/tda10071.c c->post_bit_error.len = 1; c 1025 drivers/media/dvb-frontends/tda10071.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 1026 drivers/media/dvb-frontends/tda10071.c c->block_error.len = 1; c 1027 drivers/media/dvb-frontends/tda10071.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 181 drivers/media/dvb-frontends/tda665x.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 183 drivers/media/dvb-frontends/tda665x.c tda665x_set_frequency(fe, c->frequency); c 85 drivers/media/dvb-frontends/tda8261.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 97 drivers/media/dvb-frontends/tda8261.c frequency = c->frequency; c 29 drivers/media/dvb-frontends/tda8261_cfg.h struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 39 drivers/media/dvb-frontends/tda8261_cfg.h pr_debug("%s: Frequency=%d\n", __func__, c->frequency); c 80 drivers/media/dvb-frontends/ts2020.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 143 drivers/media/dvb-frontends/ts2020.c c->strength.len = 1; c 144 drivers/media/dvb-frontends/ts2020.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 145 drivers/media/dvb-frontends/ts2020.c c->strength.stat[0].uvalue = 0; c 190 drivers/media/dvb-frontends/ts2020.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 198 drivers/media/dvb-frontends/ts2020.c unsigned int frequency_khz = c->frequency; c 222 drivers/media/dvb-frontends/ts2020.c priv->frequency_khz, priv->frequency_khz - c->frequency, c 273 drivers/media/dvb-frontends/ts2020.c f3db = (c->bandwidth_hz / 1000 / 2) + 2000; c 432 drivers/media/dvb-frontends/ts2020.c struct dtv_frontend_properties *c = &priv->fe->dtv_property_cache; c 437 drivers/media/dvb-frontends/ts2020.c ret = ts2020_get_tuner_gain(priv->fe, &c->strength.stat[0].svalue); c 441 drivers/media/dvb-frontends/ts2020.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 456 drivers/media/dvb-frontends/ts2020.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 464 drivers/media/dvb-frontends/ts2020.c if (c->strength.stat[0].scale == FE_SCALE_NOT_AVAILABLE) { c 469 drivers/media/dvb-frontends/ts2020.c gain = c->strength.stat[0].svalue; c 59 drivers/media/dvb-frontends/tua6100.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 75 drivers/media/dvb-frontends/tua6100.c if (c->frequency < 2000000) c 81 drivers/media/dvb-frontends/tua6100.c if (c->frequency < 1630000) c 88 drivers/media/dvb-frontends/tua6100.c if (c->frequency >= 1525000) c 94 drivers/media/dvb-frontends/tua6100.c if (c->frequency < 1455000) c 96 drivers/media/dvb-frontends/tua6100.c else if (c->frequency < 1630000) c 105 drivers/media/dvb-frontends/tua6100.c prediv = (c->frequency * _R_VAL) / (_ri / 1000); c 41 drivers/media/dvb-frontends/zd1301_demod.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 47 drivers/media/dvb-frontends/zd1301_demod.c c->frequency, c->bandwidth_hz); c 69 drivers/media/dvb-frontends/zd1301_demod.c switch (c->bandwidth_hz) { c 270 drivers/media/dvb-frontends/zl10036.c int c) c 288 drivers/media/dvb-frontends/zl10036.c buf[1] = _RDIV_REG | ((c << 5) & 0x60); c 290 drivers/media/dvb-frontends/zl10036.c deb_info("%s: c=%u rfg=%u ba=%u bg=%u\n", __func__, c, rfg, ba, bg); c 302 drivers/media/dvb-frontends/zl10036.c u8 c; c 327 drivers/media/dvb-frontends/zl10036.c c = 0; c 329 drivers/media/dvb-frontends/zl10036.c c = 1; c 331 drivers/media/dvb-frontends/zl10036.c c = 2; c 338 drivers/media/dvb-frontends/zl10036.c ret = zl10036_set_gain_params(state, c); c 178 drivers/media/dvb-frontends/zl10039.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 188 drivers/media/dvb-frontends/zl10039.c c->frequency, c->symbol_rate); c 192 drivers/media/dvb-frontends/zl10039.c div = (c->frequency * 1000) / 126387; c 193 drivers/media/dvb-frontends/zl10039.c fbw = (c->symbol_rate * 27) / 32000; c 168 drivers/media/dvb-frontends/zl10353.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 174 drivers/media/dvb-frontends/zl10353.c state->frequency = c->frequency; c 184 drivers/media/dvb-frontends/zl10353.c if (c->transmission_mode != TRANSMISSION_MODE_AUTO) c 186 drivers/media/dvb-frontends/zl10353.c if (c->guard_interval != GUARD_INTERVAL_AUTO) c 190 drivers/media/dvb-frontends/zl10353.c switch (c->bandwidth_hz) { c 203 drivers/media/dvb-frontends/zl10353.c c->bandwidth_hz = 8000000; c 211 drivers/media/dvb-frontends/zl10353.c zl10353_calc_nominal_rate(fe, c->bandwidth_hz, &nominal_rate); c 214 drivers/media/dvb-frontends/zl10353.c state->bandwidth = c->bandwidth_hz; c 221 drivers/media/dvb-frontends/zl10353.c switch (c->code_rate_HP) { c 241 drivers/media/dvb-frontends/zl10353.c switch (c->code_rate_LP) { c 258 drivers/media/dvb-frontends/zl10353.c if (c->hierarchy == HIERARCHY_AUTO || c 259 drivers/media/dvb-frontends/zl10353.c c->hierarchy == HIERARCHY_NONE) c 266 drivers/media/dvb-frontends/zl10353.c switch (c->modulation) { c 280 drivers/media/dvb-frontends/zl10353.c switch (c->transmission_mode) { c 291 drivers/media/dvb-frontends/zl10353.c switch (c->guard_interval) { c 308 drivers/media/dvb-frontends/zl10353.c switch (c->hierarchy) { c 360 drivers/media/dvb-frontends/zl10353.c struct dtv_frontend_properties *c) c 386 drivers/media/dvb-frontends/zl10353.c c->code_rate_HP = tps_fec_to_api[(tps >> 7) & 7]; c 387 drivers/media/dvb-frontends/zl10353.c c->code_rate_LP = tps_fec_to_api[(tps >> 4) & 7]; c 391 drivers/media/dvb-frontends/zl10353.c c->modulation = QPSK; c 394 drivers/media/dvb-frontends/zl10353.c c->modulation = QAM_16; c 397 drivers/media/dvb-frontends/zl10353.c c->modulation = QAM_64; c 400 drivers/media/dvb-frontends/zl10353.c c->modulation = QAM_AUTO; c 404 drivers/media/dvb-frontends/zl10353.c c->transmission_mode = (tps & 0x01) ? TRANSMISSION_MODE_8K : c 409 drivers/media/dvb-frontends/zl10353.c c->guard_interval = GUARD_INTERVAL_1_32; c 412 drivers/media/dvb-frontends/zl10353.c c->guard_interval = GUARD_INTERVAL_1_16; c 415 drivers/media/dvb-frontends/zl10353.c c->guard_interval = GUARD_INTERVAL_1_8; c 418 drivers/media/dvb-frontends/zl10353.c c->guard_interval = GUARD_INTERVAL_1_4; c 421 drivers/media/dvb-frontends/zl10353.c c->guard_interval = GUARD_INTERVAL_AUTO; c 427 drivers/media/dvb-frontends/zl10353.c c->hierarchy = HIERARCHY_NONE; c 430 drivers/media/dvb-frontends/zl10353.c c->hierarchy = HIERARCHY_1; c 433 drivers/media/dvb-frontends/zl10353.c c->hierarchy = HIERARCHY_2; c 436 drivers/media/dvb-frontends/zl10353.c c->hierarchy = HIERARCHY_4; c 439 drivers/media/dvb-frontends/zl10353.c c->hierarchy = HIERARCHY_AUTO; c 443 drivers/media/dvb-frontends/zl10353.c c->frequency = state->frequency; c 444 drivers/media/dvb-frontends/zl10353.c c->bandwidth_hz = state->bandwidth; c 445 drivers/media/dvb-frontends/zl10353.c c->inversion = INVERSION_AUTO; c 90 drivers/media/firewire/firedtv-avc.c static inline void clear_operands(struct avc_command_frame *c, int from, int to) c 92 drivers/media/firewire/firedtv-avc.c memset(&c->operand[from], 0, to - from + 1); c 95 drivers/media/firewire/firedtv-avc.c static void pad_operands(struct avc_command_frame *c, int from) c 100 drivers/media/firewire/firedtv-avc.c clear_operands(c, from, to); c 336 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 338 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 340 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 341 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 342 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 344 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2; c 346 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; c 348 drivers/media/firewire/firedtv-avc.c c->operand[4] = (p->frequency >> 24) & 0xff; c 349 drivers/media/firewire/firedtv-avc.c c->operand[5] = (p->frequency >> 16) & 0xff; c 350 drivers/media/firewire/firedtv-avc.c c->operand[6] = (p->frequency >> 8) & 0xff; c 351 drivers/media/firewire/firedtv-avc.c c->operand[7] = p->frequency & 0xff; c 353 drivers/media/firewire/firedtv-avc.c c->operand[8] = ((p->symbol_rate / 1000) >> 8) & 0xff; c 354 drivers/media/firewire/firedtv-avc.c c->operand[9] = (p->symbol_rate / 1000) & 0xff; c 357 drivers/media/firewire/firedtv-avc.c case FEC_1_2: c->operand[10] = 0x1; break; c 358 drivers/media/firewire/firedtv-avc.c case FEC_2_3: c->operand[10] = 0x2; break; c 359 drivers/media/firewire/firedtv-avc.c case FEC_3_4: c->operand[10] = 0x3; break; c 360 drivers/media/firewire/firedtv-avc.c case FEC_5_6: c->operand[10] = 0x4; break; c 361 drivers/media/firewire/firedtv-avc.c case FEC_7_8: c->operand[10] = 0x5; break; c 365 drivers/media/firewire/firedtv-avc.c default: c->operand[10] = 0x0; c 369 drivers/media/firewire/firedtv-avc.c c->operand[11] = 0xff; c 371 drivers/media/firewire/firedtv-avc.c c->operand[11] = 0; c 373 drivers/media/firewire/firedtv-avc.c c->operand[11] = 1; c 376 drivers/media/firewire/firedtv-avc.c c->operand[12] = 0xff; c 378 drivers/media/firewire/firedtv-avc.c c->operand[12] = 1; c 380 drivers/media/firewire/firedtv-avc.c c->operand[12] = 0; c 385 drivers/media/firewire/firedtv-avc.c case QAM_16: c->operand[13] = 0x1; break; c 386 drivers/media/firewire/firedtv-avc.c case QPSK: c->operand[13] = 0x2; break; c 387 drivers/media/firewire/firedtv-avc.c case PSK_8: c->operand[13] = 0x3; break; c 388 drivers/media/firewire/firedtv-avc.c default: c->operand[13] = 0x2; break; c 391 drivers/media/firewire/firedtv-avc.c case ROLLOFF_35: c->operand[14] = 0x2; break; c 392 drivers/media/firewire/firedtv-avc.c case ROLLOFF_20: c->operand[14] = 0x0; break; c 393 drivers/media/firewire/firedtv-avc.c case ROLLOFF_25: c->operand[14] = 0x1; break; c 395 drivers/media/firewire/firedtv-avc.c default: c->operand[14] = 0x2; break; c 399 drivers/media/firewire/firedtv-avc.c case PILOT_AUTO: c->operand[15] = 0x0; break; c 400 drivers/media/firewire/firedtv-avc.c case PILOT_OFF: c->operand[15] = 0x0; break; c 401 drivers/media/firewire/firedtv-avc.c case PILOT_ON: c->operand[15] = 0x1; break; c 404 drivers/media/firewire/firedtv-avc.c c->operand[13] = 0x1; /* auto modulation */ c 405 drivers/media/firewire/firedtv-avc.c c->operand[14] = 0xff; /* disable rolloff */ c 406 drivers/media/firewire/firedtv-avc.c c->operand[15] = 0xff; /* disable pilot */ c 417 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 419 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_DSD; c 421 drivers/media/firewire/firedtv-avc.c c->operand[0] = 0; /* source plug */ c 422 drivers/media/firewire/firedtv-avc.c c->operand[1] = 0xd2; /* subfunction replace */ c 423 drivers/media/firewire/firedtv-avc.c c->operand[2] = 0x20; /* system id = DVB */ c 424 drivers/media/firewire/firedtv-avc.c c->operand[3] = 0x00; /* antenna number */ c 425 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0x11; /* system_specific_multiplex selection_length */ c 428 drivers/media/firewire/firedtv-avc.c c->operand[5] = 0 << 7 /* reserved */ c 438 drivers/media/firewire/firedtv-avc.c c->operand[6] = 0 << 7 /* NetworkID */ c 441 drivers/media/firewire/firedtv-avc.c c->operand[7] = 0x00; c 442 drivers/media/firewire/firedtv-avc.c c->operand[8] = 0x00; c 443 drivers/media/firewire/firedtv-avc.c c->operand[9] = 0x00; c 444 drivers/media/firewire/firedtv-avc.c c->operand[10] = 0x00; c 446 drivers/media/firewire/firedtv-avc.c c->operand[11] = (((p->frequency / 4000) >> 16) & 0xff) | (2 << 6); c 447 drivers/media/firewire/firedtv-avc.c c->operand[12] = ((p->frequency / 4000) >> 8) & 0xff; c 448 drivers/media/firewire/firedtv-avc.c c->operand[13] = (p->frequency / 4000) & 0xff; c 449 drivers/media/firewire/firedtv-avc.c c->operand[14] = ((p->symbol_rate / 1000) >> 12) & 0xff; c 450 drivers/media/firewire/firedtv-avc.c c->operand[15] = ((p->symbol_rate / 1000) >> 4) & 0xff; c 451 drivers/media/firewire/firedtv-avc.c c->operand[16] = ((p->symbol_rate / 1000) << 4) & 0xf0; c 452 drivers/media/firewire/firedtv-avc.c c->operand[17] = 0x00; c 455 drivers/media/firewire/firedtv-avc.c case FEC_1_2: c->operand[18] = 0x1; break; c 456 drivers/media/firewire/firedtv-avc.c case FEC_2_3: c->operand[18] = 0x2; break; c 457 drivers/media/firewire/firedtv-avc.c case FEC_3_4: c->operand[18] = 0x3; break; c 458 drivers/media/firewire/firedtv-avc.c case FEC_5_6: c->operand[18] = 0x4; break; c 459 drivers/media/firewire/firedtv-avc.c case FEC_7_8: c->operand[18] = 0x5; break; c 460 drivers/media/firewire/firedtv-avc.c case FEC_8_9: c->operand[18] = 0x6; break; c 461 drivers/media/firewire/firedtv-avc.c case FEC_4_5: c->operand[18] = 0x8; break; c 463 drivers/media/firewire/firedtv-avc.c default: c->operand[18] = 0x0; c 467 drivers/media/firewire/firedtv-avc.c case QAM_16: c->operand[19] = 0x08; break; c 468 drivers/media/firewire/firedtv-avc.c case QAM_32: c->operand[19] = 0x10; break; c 469 drivers/media/firewire/firedtv-avc.c case QAM_64: c->operand[19] = 0x18; break; c 470 drivers/media/firewire/firedtv-avc.c case QAM_128: c->operand[19] = 0x20; break; c 471 drivers/media/firewire/firedtv-avc.c case QAM_256: c->operand[19] = 0x28; break; c 473 drivers/media/firewire/firedtv-avc.c default: c->operand[19] = 0x00; c 476 drivers/media/firewire/firedtv-avc.c c->operand[20] = 0x00; c 477 drivers/media/firewire/firedtv-avc.c c->operand[21] = 0x00; c 479 drivers/media/firewire/firedtv-avc.c return 22 + add_pid_filter(fdtv, &c->operand[22]); c 485 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 487 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_DSD; c 489 drivers/media/firewire/firedtv-avc.c c->operand[0] = 0; /* source plug */ c 490 drivers/media/firewire/firedtv-avc.c c->operand[1] = 0xd2; /* subfunction replace */ c 491 drivers/media/firewire/firedtv-avc.c c->operand[2] = 0x20; /* system id = DVB */ c 492 drivers/media/firewire/firedtv-avc.c c->operand[3] = 0x00; /* antenna number */ c 493 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0x0c; /* system_specific_multiplex selection_length */ c 496 drivers/media/firewire/firedtv-avc.c c->operand[5] = c 507 drivers/media/firewire/firedtv-avc.c c->operand[6] = c 513 drivers/media/firewire/firedtv-avc.c c->operand[7] = 0x0; c 514 drivers/media/firewire/firedtv-avc.c c->operand[8] = (p->frequency / 10) >> 24; c 515 drivers/media/firewire/firedtv-avc.c c->operand[9] = ((p->frequency / 10) >> 16) & 0xff; c 516 drivers/media/firewire/firedtv-avc.c c->operand[10] = ((p->frequency / 10) >> 8) & 0xff; c 517 drivers/media/firewire/firedtv-avc.c c->operand[11] = (p->frequency / 10) & 0xff; c 520 drivers/media/firewire/firedtv-avc.c case 7000000: c->operand[12] = 0x20; break; c 524 drivers/media/firewire/firedtv-avc.c default: c->operand[12] = 0x00; c 528 drivers/media/firewire/firedtv-avc.c case QAM_16: c->operand[13] = 1 << 6; break; c 529 drivers/media/firewire/firedtv-avc.c case QAM_64: c->operand[13] = 2 << 6; break; c 531 drivers/media/firewire/firedtv-avc.c default: c->operand[13] = 0x00; c 535 drivers/media/firewire/firedtv-avc.c case HIERARCHY_1: c->operand[13] |= 1 << 3; break; c 536 drivers/media/firewire/firedtv-avc.c case HIERARCHY_2: c->operand[13] |= 2 << 3; break; c 537 drivers/media/firewire/firedtv-avc.c case HIERARCHY_4: c->operand[13] |= 3 << 3; break; c 544 drivers/media/firewire/firedtv-avc.c case FEC_2_3: c->operand[13] |= 1; break; c 545 drivers/media/firewire/firedtv-avc.c case FEC_3_4: c->operand[13] |= 2; break; c 546 drivers/media/firewire/firedtv-avc.c case FEC_5_6: c->operand[13] |= 3; break; c 547 drivers/media/firewire/firedtv-avc.c case FEC_7_8: c->operand[13] |= 4; break; c 553 drivers/media/firewire/firedtv-avc.c case FEC_2_3: c->operand[14] = 1 << 5; break; c 554 drivers/media/firewire/firedtv-avc.c case FEC_3_4: c->operand[14] = 2 << 5; break; c 555 drivers/media/firewire/firedtv-avc.c case FEC_5_6: c->operand[14] = 3 << 5; break; c 556 drivers/media/firewire/firedtv-avc.c case FEC_7_8: c->operand[14] = 4 << 5; break; c 558 drivers/media/firewire/firedtv-avc.c default: c->operand[14] = 0x00; break; c 562 drivers/media/firewire/firedtv-avc.c case GUARD_INTERVAL_1_16: c->operand[14] |= 1 << 3; break; c 563 drivers/media/firewire/firedtv-avc.c case GUARD_INTERVAL_1_8: c->operand[14] |= 2 << 3; break; c 564 drivers/media/firewire/firedtv-avc.c case GUARD_INTERVAL_1_4: c->operand[14] |= 3 << 3; break; c 571 drivers/media/firewire/firedtv-avc.c case TRANSMISSION_MODE_8K: c->operand[14] |= 1 << 1; break; c 577 drivers/media/firewire/firedtv-avc.c c->operand[15] = 0x00; /* network_ID[0] */ c 578 drivers/media/firewire/firedtv-avc.c c->operand[16] = 0x00; /* network_ID[1] */ c 580 drivers/media/firewire/firedtv-avc.c return 17 + add_pid_filter(fdtv, &c->operand[17]); c 586 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 591 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 592 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 602 drivers/media/firewire/firedtv-avc.c pad_operands(c, pos); c 625 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 633 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 634 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 635 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_DSD; c 637 drivers/media/firewire/firedtv-avc.c c->operand[0] = 0; /* source plug */ c 638 drivers/media/firewire/firedtv-avc.c c->operand[1] = 0xd2; /* subfunction replace */ c 639 drivers/media/firewire/firedtv-avc.c c->operand[2] = 0x20; /* system id = DVB */ c 640 drivers/media/firewire/firedtv-avc.c c->operand[3] = 0x00; /* antenna number */ c 641 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0x00; /* system_specific_multiplex selection_length */ c 642 drivers/media/firewire/firedtv-avc.c c->operand[5] = pidc; /* Nr_of_dsd_sel_specs */ c 647 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = 0x13; /* flowfunction relay */ c 648 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = 0x80; /* dsd_sel_spec_valid_flags -> PID */ c 649 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = (pid[k] >> 8) & 0x1f; c 650 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = pid[k] & 0xff; c 651 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = 0x00; /* tableID */ c 652 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = 0x00; /* filter_length */ c 654 drivers/media/firewire/firedtv-avc.c pad_operands(c, pos); c 671 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 676 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 677 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 678 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_DSIT; c 682 drivers/media/firewire/firedtv-avc.c c->operand[0] = 0; /* source plug */ c 683 drivers/media/firewire/firedtv-avc.c c->operand[1] = 0xd2; /* subfunction replace */ c 684 drivers/media/firewire/firedtv-avc.c c->operand[2] = 0xff; /* status */ c 685 drivers/media/firewire/firedtv-avc.c c->operand[3] = 0x20; /* system id = DVB */ c 686 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0x00; /* antenna number */ c 687 drivers/media/firewire/firedtv-avc.c c->operand[5] = 0x0; /* system_specific_search_flags */ c 688 drivers/media/firewire/firedtv-avc.c c->operand[6] = sl; /* system_specific_multiplex selection_length */ c 694 drivers/media/firewire/firedtv-avc.c clear_operands(c, 7, 24); c 711 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 717 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 718 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 719 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_READ_DESCRIPTOR; c 721 drivers/media/firewire/firedtv-avc.c c->operand[0] = DESCRIPTOR_SUBUNIT_IDENTIFIER; c 722 drivers/media/firewire/firedtv-avc.c c->operand[1] = 0xff; c 723 drivers/media/firewire/firedtv-avc.c c->operand[2] = 0x00; c 724 drivers/media/firewire/firedtv-avc.c c->operand[3] = 0x00; /* length highbyte */ c 725 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0x08; /* length lowbyte */ c 726 drivers/media/firewire/firedtv-avc.c c->operand[5] = 0x00; /* offset highbyte */ c 727 drivers/media/firewire/firedtv-avc.c c->operand[6] = 0x0d; /* offset lowbyte */ c 728 drivers/media/firewire/firedtv-avc.c clear_operands(c, 7, 8); /* padding */ c 751 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 757 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 758 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 759 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_READ_DESCRIPTOR; c 761 drivers/media/firewire/firedtv-avc.c c->operand[0] = DESCRIPTOR_TUNER_STATUS; c 762 drivers/media/firewire/firedtv-avc.c c->operand[1] = 0xff; /* read_result_status */ c 768 drivers/media/firewire/firedtv-avc.c clear_operands(c, 2, 31); c 832 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 838 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 839 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 840 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 842 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 843 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 844 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 845 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_LNB_CONTROL; c 846 drivers/media/firewire/firedtv-avc.c c->operand[4] = voltage; c 847 drivers/media/firewire/firedtv-avc.c c->operand[5] = nrdiseq; c 851 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = diseqcmd[j].msg_len; c 854 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = diseqcmd[j].msg[k]; c 856 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = burst; c 857 drivers/media/firewire/firedtv-avc.c c->operand[pos++] = conttone; c 858 drivers/media/firewire/firedtv-avc.c pad_operands(c, pos); c 877 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 882 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_NOTIFY; c 883 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_UNIT | 7; c 884 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 886 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 887 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 888 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 889 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL; c 890 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* padding */ c 914 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 919 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 920 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 921 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 923 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 924 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 925 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 926 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA; c 927 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 928 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */ c 929 drivers/media/firewire/firedtv-avc.c clear_operands(c, 6, 8); c 970 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 976 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_STATUS; c 977 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 978 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 980 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 981 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 982 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 983 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST; c 984 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 985 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */ c 986 drivers/media/firewire/firedtv-avc.c clear_operands(c, 6, LAST_OPERAND); c 1012 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 1018 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_STATUS; c 1019 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 1020 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 1022 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 1023 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 1024 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 1025 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST; c 1026 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 1027 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */ c 1028 drivers/media/firewire/firedtv-avc.c clear_operands(c, 6, LAST_OPERAND); c 1062 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 1067 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 1068 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 1069 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 1071 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 1072 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 1073 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 1074 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA; c 1075 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 1076 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_RESET; /* ca tag */ c 1077 drivers/media/firewire/firedtv-avc.c c->operand[6] = 0; /* more/last */ c 1078 drivers/media/firewire/firedtv-avc.c c->operand[7] = 1; /* length */ c 1079 drivers/media/firewire/firedtv-avc.c c->operand[8] = 0; /* force hardware reset */ c 1093 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 1109 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_CONTROL; c 1110 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 1111 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 1124 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 1125 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 1126 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 1127 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA; c 1128 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 1129 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_PMT; /* ca tag */ c 1130 drivers/media/firewire/firedtv-avc.c c->operand[6] = 0; /* more/last */ c 1132 drivers/media/firewire/firedtv-avc.c c->operand[10] = list_management; c 1133 drivers/media/firewire/firedtv-avc.c c->operand[11] = 0x01; /* pmt_cmd=OK_descramble */ c 1137 drivers/media/firewire/firedtv-avc.c c->operand[12] = 0x02; /* Table id=2 */ c 1138 drivers/media/firewire/firedtv-avc.c c->operand[13] = 0x80; /* Section syntax + length */ c 1140 drivers/media/firewire/firedtv-avc.c c->operand[15] = msg[1]; /* Program number */ c 1141 drivers/media/firewire/firedtv-avc.c c->operand[16] = msg[2]; c 1142 drivers/media/firewire/firedtv-avc.c c->operand[17] = msg[3]; /* Version number and current/next */ c 1143 drivers/media/firewire/firedtv-avc.c c->operand[18] = 0x00; /* Section number=0 */ c 1144 drivers/media/firewire/firedtv-avc.c c->operand[19] = 0x00; /* Last section number=0 */ c 1145 drivers/media/firewire/firedtv-avc.c c->operand[20] = 0x1f; /* PCR_PID=1FFF */ c 1146 drivers/media/firewire/firedtv-avc.c c->operand[21] = 0xff; c 1147 drivers/media/firewire/firedtv-avc.c c->operand[22] = (program_info_length >> 8); /* Program info length */ c 1148 drivers/media/firewire/firedtv-avc.c c->operand[23] = (program_info_length & 0xff); c 1158 drivers/media/firewire/firedtv-avc.c if (program_info_length > sizeof(c->operand) - 4 - write_pos) { c 1163 drivers/media/firewire/firedtv-avc.c memcpy(&c->operand[write_pos], &msg[read_pos], c 1169 drivers/media/firewire/firedtv-avc.c c->operand[write_pos++] = msg[read_pos++]; c 1170 drivers/media/firewire/firedtv-avc.c c->operand[write_pos++] = msg[read_pos++]; c 1171 drivers/media/firewire/firedtv-avc.c c->operand[write_pos++] = msg[read_pos++]; c 1177 drivers/media/firewire/firedtv-avc.c c->operand[write_pos++] = es_info_length >> 8; c 1178 drivers/media/firewire/firedtv-avc.c c->operand[write_pos++] = es_info_length & 0xff; c 1185 drivers/media/firewire/firedtv-avc.c if (es_info_length > sizeof(c->operand) - 4 - c 1191 drivers/media/firewire/firedtv-avc.c memcpy(&c->operand[write_pos], &msg[read_pos], c 1199 drivers/media/firewire/firedtv-avc.c c->operand[7] = 0x82; c 1200 drivers/media/firewire/firedtv-avc.c c->operand[8] = (write_pos - 10) >> 8; c 1201 drivers/media/firewire/firedtv-avc.c c->operand[9] = (write_pos - 10) & 0xff; c 1202 drivers/media/firewire/firedtv-avc.c c->operand[14] = write_pos - 15; c 1204 drivers/media/firewire/firedtv-avc.c crc32_csum = crc32_be(0, &c->operand[10], c->operand[12] - 1); c 1205 drivers/media/firewire/firedtv-avc.c c->operand[write_pos - 4] = (crc32_csum >> 24) & 0xff; c 1206 drivers/media/firewire/firedtv-avc.c c->operand[write_pos - 3] = (crc32_csum >> 16) & 0xff; c 1207 drivers/media/firewire/firedtv-avc.c c->operand[write_pos - 2] = (crc32_csum >> 8) & 0xff; c 1208 drivers/media/firewire/firedtv-avc.c c->operand[write_pos - 1] = (crc32_csum >> 0) & 0xff; c 1209 drivers/media/firewire/firedtv-avc.c pad_operands(c, write_pos); c 1229 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 1235 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_STATUS; c 1236 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 1237 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 1239 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 1240 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 1241 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 1242 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST; c 1243 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 1244 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_DATE_TIME; /* ca tag */ c 1245 drivers/media/firewire/firedtv-avc.c clear_operands(c, 6, LAST_OPERAND); c 1263 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 1268 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_STATUS; c 1269 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 1270 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 1272 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 1273 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 1274 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 1275 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA; c 1276 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 1277 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_ENTER_MENU; c 1278 drivers/media/firewire/firedtv-avc.c clear_operands(c, 6, 8); c 1292 drivers/media/firewire/firedtv-avc.c struct avc_command_frame *c = (void *)fdtv->avc_data; c 1298 drivers/media/firewire/firedtv-avc.c c->ctype = AVC_CTYPE_STATUS; c 1299 drivers/media/firewire/firedtv-avc.c c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit; c 1300 drivers/media/firewire/firedtv-avc.c c->opcode = AVC_OPCODE_VENDOR; c 1302 drivers/media/firewire/firedtv-avc.c c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c 1303 drivers/media/firewire/firedtv-avc.c c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c 1304 drivers/media/firewire/firedtv-avc.c c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; c 1305 drivers/media/firewire/firedtv-avc.c c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST; c 1306 drivers/media/firewire/firedtv-avc.c c->operand[4] = 0; /* slot */ c 1307 drivers/media/firewire/firedtv-avc.c c->operand[5] = SFE_VENDOR_TAG_CA_MMI; c 1308 drivers/media/firewire/firedtv-avc.c clear_operands(c, 6, LAST_OPERAND); c 52 drivers/media/firewire/firedtv-dvb.c int pidc, c, ret; c 75 drivers/media/firewire/firedtv-dvb.c c = alloc_channel(fdtv); c 85 drivers/media/firewire/firedtv-dvb.c c = alloc_channel(fdtv); c 88 drivers/media/firewire/firedtv-dvb.c if (c > 15) { c 94 drivers/media/firewire/firedtv-dvb.c dvbdmxfeed->priv = (typeof(dvbdmxfeed->priv))(unsigned long)c; c 95 drivers/media/firewire/firedtv-dvb.c fdtv->channel_pid[c] = dvbdmxfeed->pid; c 101 drivers/media/firewire/firedtv-dvb.c dealloc_channel(fdtv, c); c 108 drivers/media/firewire/firedtv-dvb.c dealloc_channel(fdtv, c); c 123 drivers/media/firewire/firedtv-dvb.c int pidc, c, ret; c 147 drivers/media/firewire/firedtv-dvb.c c = (unsigned long)dvbdmxfeed->priv; c 148 drivers/media/firewire/firedtv-dvb.c dealloc_channel(fdtv, c); c 36 drivers/media/i2c/ad5820.c #define RAMP_US_TO_CODE(c) fls(((c) + ((c)>>1)) / 50) c 35 drivers/media/i2c/adp1653.c #define TIMEOUT_CODE_TO_US(c) (TIMEOUT_MAX - (c) * TIMEOUT_STEP) c 1307 drivers/media/i2c/adv7511-v4l2.c u8 c = HDMI_COLORIMETRY_NONE; c 1364 drivers/media/i2c/adv7511-v4l2.c c = HDMI_COLORIMETRY_EXTENDED; c 1369 drivers/media/i2c/adv7511-v4l2.c c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE; c 1371 drivers/media/i2c/adv7511-v4l2.c c = HDMI_COLORIMETRY_EXTENDED; c 1376 drivers/media/i2c/adv7511-v4l2.c c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE; c 1378 drivers/media/i2c/adv7511-v4l2.c c = HDMI_COLORIMETRY_EXTENDED; c 1383 drivers/media/i2c/adv7511-v4l2.c c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE; c 1388 drivers/media/i2c/adv7511-v4l2.c c = HDMI_COLORIMETRY_EXTENDED; c 1426 drivers/media/i2c/adv7511-v4l2.c adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6); c 1860 drivers/media/i2c/adv7604.c #define _SEL(a,b,c,d,e,f) { \ c 1861 drivers/media/i2c/adv7604.c ADV76XX_OP_CH_SEL_##a, ADV76XX_OP_CH_SEL_##b, ADV76XX_OP_CH_SEL_##c, \ c 1747 drivers/media/i2c/adv7842.c const struct adv7842_sdp_csc_coeff *c) c 1750 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xe0, 0xbf, c->manual ? 0x00 : 0x40); c 1752 drivers/media/i2c/adv7842.c if (!c->manual) c 1756 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xe0, 0x7f, c->scaling == 2 ? 0x80 : 0x00); c 1759 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xe0, 0xe0, c->A1 >> 8); c 1760 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xe1, c->A1); c 1761 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xe2, 0xe0, c->A2 >> 8); c 1762 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xe3, c->A2); c 1763 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xe4, 0xe0, c->A3 >> 8); c 1764 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xe5, c->A3); c 1767 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xe6, 0x80, c->A4 >> 8); c 1768 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xe7, c->A4); c 1771 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xe8, 0xe0, c->B1 >> 8); c 1772 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xe9, c->B1); c 1773 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xea, 0xe0, c->B2 >> 8); c 1774 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xeb, c->B2); c 1775 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xec, 0xe0, c->B3 >> 8); c 1776 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xed, c->B3); c 1779 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xee, 0x80, c->B4 >> 8); c 1780 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xef, c->B4); c 1783 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xf0, 0xe0, c->C1 >> 8); c 1784 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xf1, c->C1); c 1785 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xf2, 0xe0, c->C2 >> 8); c 1786 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xf3, c->C2); c 1787 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xf4, 0xe0, c->C3 >> 8); c 1788 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xf5, c->C3); c 1791 drivers/media/i2c/adv7842.c sdp_io_write_and_or(sd, 0xf6, 0x80, c->C4 >> 8); c 1792 drivers/media/i2c/adv7842.c sdp_io_write(sd, 0xf7, c->C4); c 2042 drivers/media/i2c/adv7842.c #define _SEL(a, b, c, d, e, f) { \ c 2043 drivers/media/i2c/adv7842.c ADV7842_OP_CH_SEL_##a, ADV7842_OP_CH_SEL_##b, ADV7842_OP_CH_SEL_##c, \ c 604 drivers/media/i2c/cx25840/cx25840-core.c cx25840_loadfw(state->c); c 2639 drivers/media/i2c/cx25840/cx25840-core.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 2645 drivers/media/i2c/cx25840/cx25840-core.c irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG); c 2646 drivers/media/i2c/cx25840/cx25840-core.c v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (entry): %s %s %s\n", c 2652 drivers/media/i2c/cx25840/cx25840-core.c ir_stat = cx25840_read(c, CX25840_IR_STATS_REG); c 2653 drivers/media/i2c/cx25840/cx25840-core.c ir_en = cx25840_read(c, CX25840_IR_IRQEN_REG); c 2654 drivers/media/i2c/cx25840/cx25840-core.c v4l_dbg(2, cx25840_debug, c, c 2666 drivers/media/i2c/cx25840/cx25840-core.c aud_stat = cx25840_read(c, CX25840_AUD_INT_STAT_REG); c 2667 drivers/media/i2c/cx25840/cx25840-core.c aud_en = cx25840_read(c, CX25840_AUD_INT_CTRL_REG); c 2668 drivers/media/i2c/cx25840/cx25840-core.c v4l_dbg(2, cx25840_debug, c, c 2671 drivers/media/i2c/cx25840/cx25840-core.c aud_mc_stat = cx25840_read4(c, CX23885_AUD_MC_INT_MASK_REG); c 2672 drivers/media/i2c/cx25840/cx25840-core.c v4l_dbg(2, cx25840_debug, c, c 2678 drivers/media/i2c/cx25840/cx25840-core.c cx25840_write(c, CX25840_AUD_INT_STAT_REG, aud_stat); c 2683 drivers/media/i2c/cx25840/cx25840-core.c vid_stat = cx25840_read4(c, CX25840_VID_INT_STAT_REG); c 2684 drivers/media/i2c/cx25840/cx25840-core.c v4l_dbg(2, cx25840_debug, c, c 2690 drivers/media/i2c/cx25840/cx25840-core.c cx25840_write4(c, CX25840_VID_INT_STAT_REG, vid_stat); c 2695 drivers/media/i2c/cx25840/cx25840-core.c irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG); c 2696 drivers/media/i2c/cx25840/cx25840-core.c v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (exit): %s %s %s\n", c 5952 drivers/media/i2c/cx25840/cx25840-core.c state->c = client; c 72 drivers/media/i2c/cx25840/cx25840-core.h struct i2c_client *c; c 99 drivers/media/i2c/cx25840/cx25840-ir.c struct i2c_client *c; c 330 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_tx_irq_watermark(struct i2c_client *c, c 333 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_TIC, level); c 336 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_rx_irq_watermark(struct i2c_client *c, c 339 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_RIC, level); c 342 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_tx_enable(struct i2c_client *c, bool enable) c 344 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_TXE | CNTRL_TFE), c 348 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_rx_enable(struct i2c_client *c, bool enable) c 350 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_RXE | CNTRL_RFE), c 354 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_tx_modulation_enable(struct i2c_client *c, c 357 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_MOD, c 361 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_rx_demodulation_enable(struct i2c_client *c, c 364 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_DMD, c 368 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_rx_s_edge_detection(struct i2c_client *c, c 371 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_EDG_BOTH, c 375 drivers/media/i2c/cx25840/cx25840-ir.c static void control_rx_s_carrier_window(struct i2c_client *c, c 398 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_WIN, v); c 401 drivers/media/i2c/cx25840/cx25840-ir.c static inline void control_tx_polarity_invert(struct i2c_client *c, c 404 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_CPL, c 411 drivers/media/i2c/cx25840/cx25840-ir.c static unsigned int txclk_tx_s_carrier(struct i2c_client *c, c 416 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider); c 420 drivers/media/i2c/cx25840/cx25840-ir.c static unsigned int rxclk_rx_s_carrier(struct i2c_client *c, c 425 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider); c 429 drivers/media/i2c/cx25840/cx25840-ir.c static u32 txclk_tx_s_max_pulse_width(struct i2c_client *c, u32 ns, c 438 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider); c 442 drivers/media/i2c/cx25840/cx25840-ir.c static u32 rxclk_rx_s_max_pulse_width(struct i2c_client *c, u32 ns, c 451 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider); c 458 drivers/media/i2c/cx25840/cx25840-ir.c static unsigned int cduty_tx_s_duty_cycle(struct i2c_client *c, c 467 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_CDUTY_REG, n); c 474 drivers/media/i2c/cx25840/cx25840-ir.c static u32 filter_rx_s_min_width(struct i2c_client *c, u32 min_width_ns) c 477 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_FILTR_REG, count); c 491 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG, c 502 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG, ~IRQEN_TSE, mask); c 512 drivers/media/i2c/cx25840/cx25840-ir.c struct i2c_client *c = NULL; c 525 drivers/media/i2c/cx25840/cx25840-ir.c c = ir_state->c; c 531 drivers/media/i2c/cx25840/cx25840-ir.c cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG); c 532 drivers/media/i2c/cx25840/cx25840-ir.c irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG); c 535 drivers/media/i2c/cx25840/cx25840-ir.c stats = cx25840_read4(c, CX25840_IR_STATS_REG); c 592 drivers/media/i2c/cx25840/cx25840-ir.c v = cx25840_read4(c, CX25840_IR_FIFO_REG); c 633 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl & ~v); c 634 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl); c 724 drivers/media/i2c/cx25840/cx25840-ir.c struct i2c_client *c; c 729 drivers/media/i2c/cx25840/cx25840-ir.c c = ir_state->c; c 734 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_enable(c, false); c 735 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_demodulation_enable(c, false); c 736 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_s_edge_detection(c, CNTRL_EDG_NONE); c 737 drivers/media/i2c/cx25840/cx25840-ir.c filter_rx_s_min_width(c, 0); c 738 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_RXCLK_REG, RXCLK_RCD); c 750 drivers/media/i2c/cx25840/cx25840-ir.c struct i2c_client *c; c 763 drivers/media/i2c/cx25840/cx25840-ir.c c = ir_state->c; c 778 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_enable(c, false); c 780 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_demodulation_enable(c, p->modulation); c 784 drivers/media/i2c/cx25840/cx25840-ir.c p->carrier_freq = rxclk_rx_s_carrier(c, p->carrier_freq, c 792 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_s_carrier_window(c, p->carrier_freq, c 802 drivers/media/i2c/cx25840/cx25840-ir.c rxclk_rx_s_max_pulse_width(c, p->max_pulse_width, c 809 drivers/media/i2c/cx25840/cx25840-ir.c filter_rx_s_min_width(c, p->noise_filter_min_width); c 816 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_irq_watermark(c, RX_FIFO_HALF_FULL); c 818 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_s_edge_detection(c, CNTRL_EDG_BOTH); c 833 drivers/media/i2c/cx25840/cx25840-ir.c control_rx_enable(c, p->enable); c 914 drivers/media/i2c/cx25840/cx25840-ir.c struct i2c_client *c; c 919 drivers/media/i2c/cx25840/cx25840-ir.c c = ir_state->c; c 924 drivers/media/i2c/cx25840/cx25840-ir.c control_tx_enable(c, false); c 925 drivers/media/i2c/cx25840/cx25840-ir.c control_tx_modulation_enable(c, false); c 926 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(c, CX25840_IR_TXCLK_REG, TXCLK_TCD); c 938 drivers/media/i2c/cx25840/cx25840-ir.c struct i2c_client *c; c 951 drivers/media/i2c/cx25840/cx25840-ir.c c = ir_state->c; c 965 drivers/media/i2c/cx25840/cx25840-ir.c control_tx_enable(c, false); c 967 drivers/media/i2c/cx25840/cx25840-ir.c control_tx_modulation_enable(c, p->modulation); c 971 drivers/media/i2c/cx25840/cx25840-ir.c p->carrier_freq = txclk_tx_s_carrier(c, p->carrier_freq, c 975 drivers/media/i2c/cx25840/cx25840-ir.c p->duty_cycle = cduty_tx_s_duty_cycle(c, p->duty_cycle); c 982 drivers/media/i2c/cx25840/cx25840-ir.c txclk_tx_s_max_pulse_width(c, p->max_pulse_width, c 992 drivers/media/i2c/cx25840/cx25840-ir.c control_tx_irq_watermark(c, TX_FIFO_HALF_EMPTY); c 994 drivers/media/i2c/cx25840/cx25840-ir.c control_tx_polarity_invert(c, p->invert_carrier_sense); c 1011 drivers/media/i2c/cx25840/cx25840-ir.c control_tx_enable(c, p->enable); c 1025 drivers/media/i2c/cx25840/cx25840-ir.c struct i2c_client *c = state->c; c 1034 drivers/media/i2c/cx25840/cx25840-ir.c cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG); c 1035 drivers/media/i2c/cx25840/cx25840-ir.c txclk = cx25840_read4(c, CX25840_IR_TXCLK_REG) & TXCLK_TCD; c 1036 drivers/media/i2c/cx25840/cx25840-ir.c rxclk = cx25840_read4(c, CX25840_IR_RXCLK_REG) & RXCLK_RCD; c 1037 drivers/media/i2c/cx25840/cx25840-ir.c cduty = cx25840_read4(c, CX25840_IR_CDUTY_REG) & CDUTY_CDC; c 1038 drivers/media/i2c/cx25840/cx25840-ir.c stats = cx25840_read4(c, CX25840_IR_STATS_REG); c 1039 drivers/media/i2c/cx25840/cx25840-ir.c irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG); c 1042 drivers/media/i2c/cx25840/cx25840-ir.c filtr = cx25840_read4(c, CX25840_IR_FILTR_REG) & FILTR_LPF; c 1216 drivers/media/i2c/cx25840/cx25840-ir.c ir_state = devm_kzalloc(&state->c->dev, sizeof(*ir_state), GFP_KERNEL); c 1225 drivers/media/i2c/cx25840/cx25840-ir.c ir_state->c = state->c; c 1230 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, IRQEN_MSK); c 1232 drivers/media/i2c/cx25840/cx25840-ir.c cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, 0); c 13 drivers/media/i2c/cx25840/cx25840-vbi.c static int odd_parity(u8 c) c 15 drivers/media/i2c/cx25840/cx25840-vbi.c c ^= (c >> 4); c 16 drivers/media/i2c/cx25840/cx25840-vbi.c c ^= (c >> 2); c 17 drivers/media/i2c/cx25840/cx25840-vbi.c c ^= (c >> 1); c 19 drivers/media/i2c/cx25840/cx25840-vbi.c return c & 1; c 59 drivers/media/i2c/cx25840/cx25840-vbi.c u8 c, err = 0; c 64 drivers/media/i2c/cx25840/cx25840-vbi.c c = (biphase_tbl[p[i + 1]] & 0xf) | c 66 drivers/media/i2c/cx25840/cx25840-vbi.c dst[i / 2] = c; c 66 drivers/media/i2c/ir-kbd-i2c.c if (size != i2c_master_recv(ir->c, buf, size)) c 151 drivers/media/i2c/ir-kbd-i2c.c ret = i2c_master_send(ir->c, buf, 1); c 165 drivers/media/i2c/ir-kbd-i2c.c rc = i2c_master_recv(ir->c, &b, 1); c 186 drivers/media/i2c/ir-kbd-i2c.c rc = i2c_master_recv(ir->c, buf, 4); c 214 drivers/media/i2c/ir-kbd-i2c.c rc = i2c_master_recv(ir->c, &b, 1); c 245 drivers/media/i2c/ir-kbd-i2c.c struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0, c 247 drivers/media/i2c/ir-kbd-i2c.c { .addr = ir->c->addr, .flags = I2C_M_RD, c 250 drivers/media/i2c/ir-kbd-i2c.c if (2 != i2c_transfer(ir->c->adapter, msg, 2)) { c 260 drivers/media/i2c/ir-kbd-i2c.c if (2 != i2c_transfer(ir->c->adapter, msg, 2)) { c 277 drivers/media/i2c/ir-kbd-i2c.c if (ir->c->addr == 0x41) /* AVerMedia EM78P153 */ c 436 drivers/media/i2c/ir-kbd-i2c.c dev_err(&ir->c->dev, "failed to retrieve firmware version: %d\n", c 441 drivers/media/i2c/ir-kbd-i2c.c dev_info(&ir->c->dev, "Zilog/Hauppauge IR blaster firmware version %d.%d.%d\n", c 453 drivers/media/i2c/ir-kbd-i2c.c u8 c, last = 0xff; c 456 drivers/media/i2c/ir-kbd-i2c.c c = *src++; c 457 drivers/media/i2c/ir-kbd-i2c.c if ((c & 0xf0) == last) { c 458 drivers/media/i2c/ir-kbd-i2c.c *dst++ = 0x70 | (c & 0xf); c 460 drivers/media/i2c/ir-kbd-i2c.c *dst++ = c; c 461 drivers/media/i2c/ir-kbd-i2c.c last = c & 0xf0; c 500 drivers/media/i2c/ir-kbd-i2c.c int rep, i, l, p = 0, s, c = 0; c 510 drivers/media/i2c/ir-kbd-i2c.c if (c >= ARRAY_SIZE(codes) - 1) { c 533 drivers/media/i2c/ir-kbd-i2c.c codes[c++] = (p << 4) | s; c 553 drivers/media/i2c/ir-kbd-i2c.c codes[c++] = (p << 4) | s; c 555 drivers/media/i2c/ir-kbd-i2c.c dev_dbg(&rcdev->dev, "generated %d codes\n", c); c 564 drivers/media/i2c/ir-kbd-i2c.c for (rep = c / 3; rep >= 1; rep--) { c 565 drivers/media/i2c/ir-kbd-i2c.c if (!memcmp(&codes[c - rep * 3], &codes[c - rep * 2], rep) && c 566 drivers/media/i2c/ir-kbd-i2c.c !cmp_no_trail(&codes[c - rep], &codes[c - rep * 2], rep)) { c 574 drivers/media/i2c/ir-kbd-i2c.c int leading = c - rep * 3; c 586 drivers/media/i2c/ir-kbd-i2c.c c = leading + 1 + rep; c 587 drivers/media/i2c/ir-kbd-i2c.c code_block->codes[c++] = 0xc0; c 589 drivers/media/i2c/ir-kbd-i2c.c if (c >= ARRAY_SIZE(code_block->codes) - 3) { c 596 drivers/media/i2c/ir-kbd-i2c.c copy_codes(code_block->codes + 1, codes, c); c 597 drivers/media/i2c/ir-kbd-i2c.c c++; c 598 drivers/media/i2c/ir-kbd-i2c.c code_block->codes[c++] = 0xc4; c 601 drivers/media/i2c/ir-kbd-i2c.c while (c < ARRAY_SIZE(code_block->codes)) c 602 drivers/media/i2c/ir-kbd-i2c.c code_block->codes[c++] = 0x83; c 744 drivers/media/i2c/ir-kbd-i2c.c ir->c = client; c 65 drivers/media/i2c/mt9t112.c #define mt9t112_reg_mask_set(ret, client, a, b, c) \ c 66 drivers/media/i2c/mt9t112.c ECHECKER(ret, __mt9t112_reg_mask_set(client, a, b, c)) c 67 drivers/media/i2c/mt9t112.c #define mt9t112_mcu_mask_set(ret, client, a, b, c) \ c 68 drivers/media/i2c/mt9t112.c ECHECKER(ret, __mt9t112_mcu_mask_set(client, a, b, c)) c 72 drivers/media/i2c/mt9v011.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 76 drivers/media/i2c/mt9v011.c rc = i2c_master_send(c, &addr, 1); c 83 drivers/media/i2c/mt9v011.c rc = i2c_master_recv(c, (char *)&buffer, 2); c 98 drivers/media/i2c/mt9v011.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 108 drivers/media/i2c/mt9v011.c rc = i2c_master_send(c, buffer, 3); c 481 drivers/media/i2c/mt9v011.c static int mt9v011_probe(struct i2c_client *c, c 492 drivers/media/i2c/mt9v011.c if (!i2c_check_functionality(c->adapter, c 496 drivers/media/i2c/mt9v011.c core = devm_kzalloc(&c->dev, sizeof(struct mt9v011), GFP_KERNEL); c 501 drivers/media/i2c/mt9v011.c v4l2_i2c_subdev_init(sd, c, &mt9v011_ops); c 550 drivers/media/i2c/mt9v011.c if (c->dev.platform_data) { c 551 drivers/media/i2c/mt9v011.c struct mt9v011_platform_data *pdata = c->dev.platform_data; c 558 drivers/media/i2c/mt9v011.c v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n", c 559 drivers/media/i2c/mt9v011.c c->addr << 1, c->adapter->name, version); c 564 drivers/media/i2c/mt9v011.c static int mt9v011_remove(struct i2c_client *c) c 566 drivers/media/i2c/mt9v011.c struct v4l2_subdev *sd = i2c_get_clientdata(c); c 571 drivers/media/i2c/mt9v011.c c->addr << 1); c 220 drivers/media/i2c/mt9v111.c static int __mt9v111_read(struct i2c_client *c, u8 reg, u16 *val) c 226 drivers/media/i2c/mt9v111.c msg[0].addr = c->addr; c 231 drivers/media/i2c/mt9v111.c msg[1].addr = c->addr; c 236 drivers/media/i2c/mt9v111.c ret = i2c_transfer(c->adapter, msg, 2); c 238 drivers/media/i2c/mt9v111.c dev_err(&c->dev, "i2c read transfer error: %d\n", ret); c 244 drivers/media/i2c/mt9v111.c dev_dbg(&c->dev, "%s: %x=%x\n", __func__, reg, *val); c 249 drivers/media/i2c/mt9v111.c static int __mt9v111_write(struct i2c_client *c, u8 reg, u16 val) c 259 drivers/media/i2c/mt9v111.c msg.addr = c->addr; c 264 drivers/media/i2c/mt9v111.c dev_dbg(&c->dev, "%s: %x = %x%x\n", __func__, reg, buf[1], buf[2]); c 266 drivers/media/i2c/mt9v111.c ret = i2c_transfer(c->adapter, &msg, 1); c 268 drivers/media/i2c/mt9v111.c dev_err(&c->dev, "i2c write transfer error: %d\n", ret); c 275 drivers/media/i2c/mt9v111.c static int __mt9v111_addr_space_select(struct i2c_client *c, u16 addr_space) c 277 drivers/media/i2c/mt9v111.c struct v4l2_subdev *sd = i2c_get_clientdata(c); c 285 drivers/media/i2c/mt9v111.c ret = __mt9v111_write(c, MT9V111_R01_ADDR_SPACE, addr_space); c 290 drivers/media/i2c/mt9v111.c ret = __mt9v111_read(c, MT9V111_R01_ADDR_SPACE, &val); c 302 drivers/media/i2c/mt9v111.c static int mt9v111_read(struct i2c_client *c, u8 addr_space, u8 reg, u16 *val) c 307 drivers/media/i2c/mt9v111.c ret = __mt9v111_addr_space_select(c, addr_space); c 311 drivers/media/i2c/mt9v111.c ret = __mt9v111_read(c, reg, val); c 318 drivers/media/i2c/mt9v111.c static int mt9v111_write(struct i2c_client *c, u8 addr_space, u8 reg, u16 val) c 323 drivers/media/i2c/mt9v111.c ret = __mt9v111_addr_space_select(c, addr_space); c 327 drivers/media/i2c/mt9v111.c ret = __mt9v111_write(c, reg, val); c 334 drivers/media/i2c/mt9v111.c static int mt9v111_update(struct i2c_client *c, u8 addr_space, u8 reg, c 341 drivers/media/i2c/mt9v111.c ret = __mt9v111_addr_space_select(c, addr_space); c 346 drivers/media/i2c/mt9v111.c ret = __mt9v111_read(c, reg, ¤t_val); c 352 drivers/media/i2c/mt9v111.c ret = __mt9v111_write(c, reg, current_val); c 412 drivers/media/i2c/mt9v111.c struct i2c_client *c = mt9v111->client; c 417 drivers/media/i2c/mt9v111.c ret = mt9v111_update(c, MT9V111_R01_CORE, c 424 drivers/media/i2c/mt9v111.c ret = mt9v111_update(c, MT9V111_R01_CORE, c 431 drivers/media/i2c/mt9v111.c ret = mt9v111_update(c, MT9V111_R01_IFP, c 438 drivers/media/i2c/mt9v111.c ret = mt9v111_update(c, MT9V111_R01_IFP, c 536 drivers/media/i2c/mt9v111.c struct i2c_client *c = mt9v111->client; c 549 drivers/media/i2c/mt9v111.c mt9v111_update(c, MT9V111_R01_CORE, c 552 drivers/media/i2c/mt9v111.c mt9v111_update(c, MT9V111_R01_CORE, c 581 drivers/media/i2c/mt9v111.c ret = mt9v111_update(c, MT9V111_R01_IFP, MT9V111_IFP_R3A_OUTFMT_CTRL2, c 596 drivers/media/i2c/mt9v111.c ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA5_HPAN, c 601 drivers/media/i2c/mt9v111.c ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA8_VPAN, c 606 drivers/media/i2c/mt9v111.c ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA6_HZOOM, c 612 drivers/media/i2c/mt9v111.c ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA9_VZOOM, c 618 drivers/media/i2c/mt9v111.c ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RA7_HOUT, c 624 drivers/media/i2c/mt9v111.c ret = mt9v111_write(c, MT9V111_R01_IFP, MT9V111_IFP_RAA_VOUT, c 640 drivers/media/i2c/mt9v111.c return mt9v111_write(c, MT9V111_R01_CORE, MT9V111_CORE_R09_PIXEL_INT, c 1548 drivers/media/i2c/s5c73m3/s5c73m3-core.c struct i2c_client *c = state->i2c_client; c 1556 drivers/media/i2c/s5c73m3/s5c73m3-core.c ret = devm_gpio_request_one(&c->dev, g[i].gpio, flags, c 1559 drivers/media/i2c/s5c73m3/s5c73m3-core.c v4l2_err(c, "failed to request gpio %s\n", c 170 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c struct s5c73m3_ctrls *c = &state->ctrls; c 176 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c if (c->focus_auto->val) c 213 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c struct s5c73m3_ctrls *c = &state->ctrls; c 216 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c if (c->af_distance->is_new) { c 217 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c u16 mode = (c->af_distance->val == V4L2_AUTO_FOCUS_RANGE_MACRO) c 224 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c if (!ret || (c->focus_auto->is_new && c->focus_auto->val) || c 225 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c c->af_start->is_new) c 227 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c else if ((c->focus_auto->is_new && !c->focus_auto->val) || c 228 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c c->af_stop->is_new) c 375 drivers/media/i2c/s5k4ecgx.c static int s5k4ecgx_set_input_window(struct i2c_client *c, c 380 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREV_IN_WIDTH, r->width); c 382 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREV_IN_HEIGHT, r->height); c 384 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREV_IN_XOFFS, r->left); c 386 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREV_IN_YOFFS, r->top); c 388 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAP_IN_WIDTH, r->width); c 390 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAP_IN_HEIGHT, r->height); c 392 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAP_IN_XOFFS, r->left); c 394 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAP_IN_YOFFS, r->top); c 400 drivers/media/i2c/s5k4ecgx.c static int s5k4ecgx_set_zoom_window(struct i2c_client *c, c 405 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREVZOOM_IN_WIDTH, r->width); c 407 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREVZOOM_IN_HEIGHT, r->height); c 409 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREVZOOM_IN_XOFFS, r->left); c 411 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_PREVZOOM_IN_YOFFS, r->top); c 413 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAPZOOM_IN_WIDTH, r->width); c 415 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAPZOOM_IN_HEIGHT, r->height); c 417 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAPZOOM_IN_XOFFS, r->left); c 419 drivers/media/i2c/s5k4ecgx.c ret = s5k4ecgx_write(c, REG_G_CAPZOOM_IN_YOFFS, r->top); c 421 drivers/media/i2c/s5k5baf.c struct i2c_client *c = v4l2_get_subdevdata(&state->sd); c 425 drivers/media/i2c/s5k5baf.c { .addr = c->addr, .flags = 0, c 427 drivers/media/i2c/s5k5baf.c { .addr = c->addr, .flags = I2C_M_RD, c 436 drivers/media/i2c/s5k5baf.c ret = i2c_transfer(c->adapter, msg, 2); c 439 drivers/media/i2c/s5k5baf.c v4l2_dbg(3, debug, c, "i2c_read: 0x%04x : 0x%04x\n", addr, res); c 442 drivers/media/i2c/s5k5baf.c v4l2_err(c, "i2c_read: error during transfer (%d)\n", ret); c 451 drivers/media/i2c/s5k5baf.c struct i2c_client *c = v4l2_get_subdevdata(&state->sd); c 457 drivers/media/i2c/s5k5baf.c ret = i2c_master_send(c, buf, 4); c 458 drivers/media/i2c/s5k5baf.c v4l2_dbg(3, debug, c, "i2c_write: 0x%04x : 0x%04x\n", addr, val); c 461 drivers/media/i2c/s5k5baf.c v4l2_err(c, "i2c_write: error during transfer (%d)\n", ret); c 481 drivers/media/i2c/s5k5baf.c struct i2c_client *c = v4l2_get_subdevdata(&state->sd); c 488 drivers/media/i2c/s5k5baf.c v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count, c 501 drivers/media/i2c/s5k5baf.c ret = i2c_master_send(c, (char *)buf, i); c 503 drivers/media/i2c/s5k5baf.c v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret); c 741 drivers/media/i2c/s5k5baf.c int i, c = -1; c 748 drivers/media/i2c/s5k5baf.c if (c < 0) c 749 drivers/media/i2c/s5k5baf.c c = i; c 751 drivers/media/i2c/s5k5baf.c return (c < 0) ? 0 : c; c 1028 drivers/media/i2c/s5k5baf.c struct i2c_client *c = v4l2_get_subdevdata(&state->sd); c 1032 drivers/media/i2c/s5k5baf.c ret = request_firmware(&fw, S5K5BAF_FW_FILENAME, &c->dev); c 1034 drivers/media/i2c/s5k5baf.c dev_warn(&c->dev, "firmware file (%s) not loaded\n", c 1039 drivers/media/i2c/s5k5baf.c ret = s5k5baf_fw_parse(&c->dev, &state->fw, fw->size / 2, c 1797 drivers/media/i2c/s5k5baf.c struct i2c_client *c = v4l2_get_subdevdata(&state->sd); c 1805 drivers/media/i2c/s5k5baf.c ret = devm_gpio_request_one(&c->dev, g[i].gpio, flags, name[i]); c 1807 drivers/media/i2c/s5k5baf.c v4l2_err(c, "failed to request gpio %s\n", name[i]); c 1890 drivers/media/i2c/s5k5baf.c struct i2c_client *c) c 1900 drivers/media/i2c/s5k5baf.c i2c_adapter_id(c->adapter), c->addr); c 1912 drivers/media/i2c/s5k5baf.c v4l2_i2c_subdev_init(sd, c, &s5k5baf_subdev_ops); c 1914 drivers/media/i2c/s5k5baf.c i2c_adapter_id(c->adapter), c->addr); c 1929 drivers/media/i2c/s5k5baf.c dev_err(&c->dev, "cannot init media entity %s\n", sd->name); c 1935 drivers/media/i2c/s5k5baf.c struct i2c_client *c = v4l2_get_subdevdata(&state->sd); c 1942 drivers/media/i2c/s5k5baf.c ret = devm_regulator_bulk_get(&c->dev, S5K5BAF_NUM_SUPPLIES, c 1945 drivers/media/i2c/s5k5baf.c v4l2_err(c, "failed to get regulators\n"); c 1949 drivers/media/i2c/s5k5baf.c static int s5k5baf_probe(struct i2c_client *c) c 1954 drivers/media/i2c/s5k5baf.c state = devm_kzalloc(&c->dev, sizeof(*state), GFP_KERNEL); c 1963 drivers/media/i2c/s5k5baf.c ret = s5k5baf_parse_device_node(state, &c->dev); c 1967 drivers/media/i2c/s5k5baf.c ret = s5k5baf_configure_subdevs(state, c); c 2015 drivers/media/i2c/s5k5baf.c static int s5k5baf_remove(struct i2c_client *c) c 2017 drivers/media/i2c/s5k5baf.c struct v4l2_subdev *sd = i2c_get_clientdata(c); c 365 drivers/media/i2c/s5k6aa.c static int s5k6aa_write(struct i2c_client *c, u16 addr, u16 val) c 367 drivers/media/i2c/s5k6aa.c int ret = s5k6aa_i2c_write(c, REG_CMDWR_ADDRL, addr); c 370 drivers/media/i2c/s5k6aa.c return s5k6aa_i2c_write(c, REG_CMDBUF0_ADDR, val); c 427 drivers/media/i2c/s5k6aa.c struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd); c 441 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_I_INCLK_FREQ_H, fmclk >> 16); c 443 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_I_INCLK_FREQ_L, fmclk & 0xFFFF); c 445 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_I_USE_NPVI_CLOCKS, 1); c 448 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_I_OPCLK_4KHZ(0), s5k6aa->clk_fop); c 450 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_I_MIN_OUTRATE_4KHZ(0), c 453 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_I_MAX_OUTRATE_4KHZ(0), c 456 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_I_INIT_PARAMS_UPDATED, 1); c 458 drivers/media/i2c/s5k6aa.c ret = s5k6aa_read(c, REG_I_ERROR_INFO, &status); c 478 drivers/media/i2c/s5k6aa.c struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd); c 482 drivers/media/i2c/s5k6aa.c int ret = s5k6aa_read(c, REG_DBG_AUTOALG_EN, ®); c 485 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_SF_RGAIN, ctrls->gain_red->val); c 487 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_SF_RGAIN_CHG, 1); c 491 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_SF_GGAIN, ctrls->gain_green->val); c 493 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_SF_GGAIN_CHG, 1); c 497 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_SF_BGAIN, ctrls->gain_blue->val); c 499 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_SF_BGAIN_CHG, 1); c 503 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_DBG_AUTOALG_EN, reg); c 533 drivers/media/i2c/s5k6aa.c struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd); c 537 drivers/media/i2c/s5k6aa.c int ret = s5k6aa_read(c, REG_DBG_AUTOALG_EN, &auto_alg); c 541 drivers/media/i2c/s5k6aa.c v4l2_dbg(1, debug, c, "man_exp: %d, auto_exp: %d, a_alg: 0x%x\n", c 547 drivers/media/i2c/s5k6aa.c ret = s5k6aa_set_user_exposure(c, exp_time); c 550 drivers/media/i2c/s5k6aa.c ret = s5k6aa_set_user_gain(c, s5k6aa->ctrls.gain->val); c 556 drivers/media/i2c/s5k6aa.c return s5k6aa_write(c, REG_DBG_AUTOALG_EN, auto_alg); c 648 drivers/media/i2c/s5k6aa.c struct i2c_client *c = v4l2_get_subdevdata(&s5k6aa->sd); c 652 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_WIDTH, r->width); c 654 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_HEIGHT, r->height); c 656 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_XOFFS, r->left); c 658 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_G_PREVZOOM_IN_YOFFS, r->top); c 660 drivers/media/i2c/s5k6aa.c ret = s5k6aa_write(c, REG_G_INPUTS_CHANGE_REQ, 1); c 667 drivers/media/i2c/saa7115.c static int saa711x_odd_parity(u8 c) c 669 drivers/media/i2c/saa7115.c c ^= (c >> 4); c 670 drivers/media/i2c/saa7115.c c ^= (c >> 2); c 671 drivers/media/i2c/saa7115.c c ^= (c >> 1); c 673 drivers/media/i2c/saa7115.c return c & 1; c 713 drivers/media/i2c/saa7115.c u8 c, err = 0; c 717 drivers/media/i2c/saa7115.c c = (biphase_tbl[p[i + 1]] & 0xf) | ((biphase_tbl[p[i]] & 0xf) << 4); c 718 drivers/media/i2c/saa7115.c dst[i / 2] = c; c 649 drivers/media/i2c/tda1997x.c struct v4l2_hdmi_colorimetry *c = &state->colorimetry; c 663 drivers/media/i2c/tda1997x.c v4l2_quantization_names[c->quantization], c 670 drivers/media/i2c/tda1997x.c if (c->colorspace == V4L2_COLORSPACE_SRGB) { c 671 drivers/media/i2c/tda1997x.c if (c->quantization == V4L2_QUANTIZATION_LIM_RANGE) c 674 drivers/media/i2c/tda1997x.c if (c->colorspace == V4L2_COLORSPACE_REC709) c 676 drivers/media/i2c/tda1997x.c else if (c->colorspace == V4L2_COLORSPACE_SMPTE170M) c 685 drivers/media/i2c/tda1997x.c if ((c->colorspace == V4L2_COLORSPACE_SRGB) && c 686 drivers/media/i2c/tda1997x.c (c->quantization == V4L2_QUANTIZATION_FULL_RANGE)) { c 691 drivers/media/i2c/tda1997x.c } else if ((c->colorspace == V4L2_COLORSPACE_SRGB) && c 692 drivers/media/i2c/tda1997x.c (c->quantization == V4L2_QUANTIZATION_LIM_RANGE)) { c 1220 drivers/media/i2c/tda1997x.c struct v4l2_hdmi_colorimetry *c = &state->colorimetry; c 1226 drivers/media/i2c/tda1997x.c if (c->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) { c 1229 drivers/media/i2c/tda1997x.c c->quantization = V4L2_QUANTIZATION_FULL_RANGE; c 1232 drivers/media/i2c/tda1997x.c c->quantization = V4L2_QUANTIZATION_LIM_RANGE; c 1238 drivers/media/i2c/tda1997x.c state->avi_infoframe.colorspace, c->colorspace, c 1240 drivers/media/i2c/tda1997x.c v4l2_quantization_names[c->quantization], c 157 drivers/media/i2c/tvaudio.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 165 drivers/media/i2c/tvaudio.c rc = i2c_master_send(c, buffer, 1); c 185 drivers/media/i2c/tvaudio.c rc = i2c_master_send(c, buffer, 2); c 222 drivers/media/i2c/tvaudio.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 226 drivers/media/i2c/tvaudio.c rc = i2c_master_recv(c, &buffer, 1); c 240 drivers/media/i2c/tvaudio.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 246 drivers/media/i2c/tvaudio.c .addr = c->addr, c 251 drivers/media/i2c/tvaudio.c .addr = c->addr, c 260 drivers/media/i2c/tvaudio.c rc = i2c_transfer(c->adapter, msgs, 2); c 275 drivers/media/i2c/tvaudio.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 302 drivers/media/i2c/tvaudio.c rc = i2c_master_send(c, cmd->bytes, cmd->count); c 1526 drivers/media/i2c/tvp5150.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 1536 drivers/media/i2c/tvp5150.c dev_err(&c->dev, "reading ID registers failed: %d\n", res); c 1544 drivers/media/i2c/tvp5150.c core->dev_id, regs[2], regs[3], c->addr << 1, c 1545 drivers/media/i2c/tvp5150.c c->adapter->name); c 1564 drivers/media/i2c/tvp5150.c static int tvp5150_init(struct i2c_client *c) c 1569 drivers/media/i2c/tvp5150.c pdn_gpio = devm_gpiod_get_optional(&c->dev, "pdn", GPIOD_OUT_HIGH); c 1579 drivers/media/i2c/tvp5150.c reset_gpio = devm_gpiod_get_optional(&c->dev, "reset", GPIOD_OUT_HIGH); c 1698 drivers/media/i2c/tvp5150.c static int tvp5150_probe(struct i2c_client *c) c 1702 drivers/media/i2c/tvp5150.c struct device_node *np = c->dev.of_node; c 1707 drivers/media/i2c/tvp5150.c if (!i2c_check_functionality(c->adapter, c 1711 drivers/media/i2c/tvp5150.c res = tvp5150_init(c); c 1715 drivers/media/i2c/tvp5150.c core = devm_kzalloc(&c->dev, sizeof(*core), GFP_KERNEL); c 1719 drivers/media/i2c/tvp5150.c map = devm_regmap_init_i2c(c, &tvp5150_config); c 1737 drivers/media/i2c/tvp5150.c v4l2_i2c_subdev_init(sd, c, &tvp5150_ops); c 1789 drivers/media/i2c/tvp5150.c core->irq = c->irq; c 1791 drivers/media/i2c/tvp5150.c if (c->irq) { c 1792 drivers/media/i2c/tvp5150.c res = devm_request_threaded_irq(&c->dev, c->irq, NULL, c 1812 drivers/media/i2c/tvp5150.c static int tvp5150_remove(struct i2c_client *c) c 1814 drivers/media/i2c/tvp5150.c struct v4l2_subdev *sd = i2c_get_clientdata(c); c 1819 drivers/media/i2c/tvp5150.c c->addr << 1); c 444 drivers/media/i2c/tvp7002.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 449 drivers/media/i2c/tvp7002.c error = i2c_smbus_read_byte_data(c, addr); c 490 drivers/media/i2c/tvp7002.c struct i2c_client *c; c 494 drivers/media/i2c/tvp7002.c c = v4l2_get_subdevdata(sd); c 497 drivers/media/i2c/tvp7002.c error = i2c_smbus_write_byte_data(c, addr, value); c 933 drivers/media/i2c/tvp7002.c static int tvp7002_probe(struct i2c_client *c) c 935 drivers/media/i2c/tvp7002.c struct tvp7002_config *pdata = tvp7002_get_pdata(c); c 945 drivers/media/i2c/tvp7002.c dev_err(&c->dev, "No platform data\n"); c 950 drivers/media/i2c/tvp7002.c if (!i2c_check_functionality(c->adapter, c 954 drivers/media/i2c/tvp7002.c device = devm_kzalloc(&c->dev, sizeof(struct tvp7002), GFP_KERNEL); c 964 drivers/media/i2c/tvp7002.c v4l2_i2c_subdev_init(sd, c, &tvp7002_ops); c 965 drivers/media/i2c/tvp7002.c v4l_info(c, "tvp7002 found @ 0x%02x (%s)\n", c 966 drivers/media/i2c/tvp7002.c c->addr, c->adapter->name); c 1042 drivers/media/i2c/tvp7002.c static int tvp7002_remove(struct i2c_client *c) c 1044 drivers/media/i2c/tvp7002.c struct v4l2_subdev *sd = i2c_get_clientdata(c); c 1048 drivers/media/i2c/tvp7002.c "on address 0x%x\n", c->addr); c 89 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = 0; c 90 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = 0; c 91 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = -win->left; c 92 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = win->height; c 97 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = swidth - win->left; c 98 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = 0; c 99 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = win->width - clips[n].c.left; c 100 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = win->height; c 105 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = 0; c 106 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = 0; c 107 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = win->width; c 108 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = -win->top; c 113 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.left = 0; c 114 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.top = sheight - win->top; c 115 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.width = win->width; c 116 drivers/media/pci/bt8xx/btcx-risc.c clips[n].c.height = win->height - clips[n].c.top; c 141 drivers/media/pci/bt8xx/btcx-risc.c nx = (clips[i].c.left-dx) & ~mask; c 142 drivers/media/pci/bt8xx/btcx-risc.c nw = (clips[i].c.width) & ~mask; c 143 drivers/media/pci/bt8xx/btcx-risc.c if (nx + nw < clips[i].c.left-dx + clips[i].c.width) c 145 drivers/media/pci/bt8xx/btcx-risc.c clips[i].c.left = nx; c 146 drivers/media/pci/bt8xx/btcx-risc.c clips[i].c.width = nw; c 148 drivers/media/pci/bt8xx/btcx-risc.c clips[i].c.width, clips[i].c.height, c 149 drivers/media/pci/bt8xx/btcx-risc.c clips[i].c.left, clips[i].c.top); c 163 drivers/media/pci/bt8xx/btcx-risc.c if (clips[j].c.left > clips[j+1].c.left) { c 186 drivers/media/pci/bt8xx/btcx-risc.c if (clips[clip].c.left + clips[clip].c.width <= 0) c 188 drivers/media/pci/bt8xx/btcx-risc.c if (clips[clip].c.left > (signed)width) c 192 drivers/media/pci/bt8xx/btcx-risc.c if (line > clips[clip].c.top+clips[clip].c.height-1) c 194 drivers/media/pci/bt8xx/btcx-risc.c if (line < clips[clip].c.top) { c 195 drivers/media/pci/bt8xx/btcx-risc.c if (maxline > clips[clip].c.top-1) c 196 drivers/media/pci/bt8xx/btcx-risc.c maxline = clips[clip].c.top-1; c 199 drivers/media/pci/bt8xx/btcx-risc.c if (maxline > clips[clip].c.top+clips[clip].c.height-1) c 200 drivers/media/pci/bt8xx/btcx-risc.c maxline = clips[clip].c.top+clips[clip].c.height-1; c 203 drivers/media/pci/bt8xx/btcx-risc.c if (0 == skip || clips[clip].c.left > skips[skip-1].end) { c 205 drivers/media/pci/bt8xx/btcx-risc.c skips[skip].start = clips[clip].c.left; c 208 drivers/media/pci/bt8xx/btcx-risc.c skips[skip].end = clips[clip].c.left + clips[clip].c.width; c 214 drivers/media/pci/bt8xx/btcx-risc.c end = clips[clip].c.left + clips[clip].c.width; c 2899 drivers/media/pci/bt8xx/bttv-cards.c btv->cardid = btv->c.pci->subsystem_device << 16; c 2900 drivers/media/pci/bt8xx/bttv-cards.c btv->cardid |= btv->c.pci->subsystem_vendor; c 2911 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, cards[type].name, cards[type].cardnr, c 2914 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type = cards[type].cardnr; c 2918 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, btv->cardid & 0xffff, c 2925 drivers/media/pci/bt8xx/bttv-cards.c if (card[btv->c.nr] < bttv_num_tvcards) c 2926 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type=card[btv->c.nr]; c 2930 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type, c 2931 drivers/media/pci/bt8xx/bttv-cards.c card[btv->c.nr] < bttv_num_tvcards c 2941 drivers/media/pci/bt8xx/bttv-cards.c bttv_tvcards[btv->c.type].gpiomux[i] = audiomux[i]; c 2947 drivers/media/pci/bt8xx/bttv-cards.c bttv_tvcards[btv->c.type].gpiomux[i] = audioall; c 2950 drivers/media/pci/bt8xx/bttv-cards.c bttv_tvcards[btv->c.type].gpiomask = (UNSET != gpiomask) ? gpiomask : gpiobits; c 2952 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, bttv_tvcards[btv->c.type].gpiomask); c 2955 drivers/media/pci/bt8xx/bttv-cards.c i ? "," : "", bttv_tvcards[btv->c.type].gpiomux[i]); c 2977 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type = type; c 2979 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, bttv_tvcards[btv->c.type].name, btv->c.type); c 3023 drivers/media/pci/bt8xx/bttv-cards.c pr_info("%d: FlyVideo_gpio: unknown tuner type\n", btv->c.nr); c 3041 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, has_radio ? "yes" : "no", c 3044 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, is_lr90 ? "yes" : "no", c 3090 drivers/media/pci/bt8xx/bttv-cards.c if (btv->c.type == BTTV_BOARD_MIRO) c 3091 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type = BTTV_BOARD_MIROPRO; c 3092 drivers/media/pci/bt8xx/bttv-cards.c if (btv->c.type == BTTV_BOARD_PINNACLE) c 3093 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type = BTTV_BOARD_PINNACLEPRO; c 3096 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, id+1, btv->tuner_type, c 3140 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type = BTTV_BOARD_PINNACLEPRO; c 3142 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, id, info, btv->has_radio ? "yes" : "no"); c 3284 drivers/media/pci/bt8xx/bttv-cards.c pr_debug("%d: BT878A ARESET\n", btv->c.nr); c 3293 drivers/media/pci/bt8xx/bttv-cards.c switch (btv->c.type) { c 3321 drivers/media/pci/bt8xx/bttv-cards.c if (!bttv_tvcards[btv->c.type].has_dvb) c 3330 drivers/media/pci/bt8xx/bttv-cards.c if (BTTV_BOARD_UNKNOWN == btv->c.type) { c 3335 drivers/media/pci/bt8xx/bttv-cards.c switch (btv->c.type) { c 3391 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr); c 3441 drivers/media/pci/bt8xx/bttv-cards.c if (PLL_28 == bttv_tvcards[btv->c.type].pll) { c 3445 drivers/media/pci/bt8xx/bttv-cards.c if (PLL_35 == bttv_tvcards[btv->c.type].pll) { c 3449 drivers/media/pci/bt8xx/bttv-cards.c if (PLL_14 == bttv_tvcards[btv->c.type].pll) { c 3454 drivers/media/pci/bt8xx/bttv-cards.c switch (pll[btv->c.nr]) { c 3483 drivers/media/pci/bt8xx/bttv-cards.c if (UNSET != bttv_tvcards[btv->c.type].tuner_type) c 3485 drivers/media/pci/bt8xx/bttv-cards.c btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type; c 3486 drivers/media/pci/bt8xx/bttv-cards.c if (UNSET != tuner[btv->c.nr]) c 3487 drivers/media/pci/bt8xx/bttv-cards.c btv->tuner_type = tuner[btv->c.nr]; c 3490 drivers/media/pci/bt8xx/bttv-cards.c pr_info("%d: tuner absent\n", btv->c.nr); c 3492 drivers/media/pci/bt8xx/bttv-cards.c pr_warn("%d: tuner type unset\n", btv->c.nr); c 3494 drivers/media/pci/bt8xx/bttv-cards.c pr_info("%d: tuner type=%d\n", btv->c.nr, btv->tuner_type); c 3497 drivers/media/pci/bt8xx/bttv-cards.c pr_warn("%d: the autoload option is obsolete\n", btv->c.nr); c 3499 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr); c 3505 drivers/media/pci/bt8xx/bttv-cards.c btv->dig = bttv_tvcards[btv->c.type].has_dig_in ? c 3506 drivers/media/pci/bt8xx/bttv-cards.c bttv_tvcards[btv->c.type].video_inputs - 1 : UNSET; c 3507 drivers/media/pci/bt8xx/bttv-cards.c btv->svhs = bttv_tvcards[btv->c.type].svhs == NO_SVHS ? c 3508 drivers/media/pci/bt8xx/bttv-cards.c UNSET : bttv_tvcards[btv->c.type].svhs; c 3509 drivers/media/pci/bt8xx/bttv-cards.c if (svhs[btv->c.nr] != UNSET) c 3510 drivers/media/pci/bt8xx/bttv-cards.c btv->svhs = svhs[btv->c.nr]; c 3511 drivers/media/pci/bt8xx/bttv-cards.c if (remote[btv->c.nr] != UNSET) c 3512 drivers/media/pci/bt8xx/bttv-cards.c btv->has_remote = remote[btv->c.nr]; c 3514 drivers/media/pci/bt8xx/bttv-cards.c if (bttv_tvcards[btv->c.type].has_radio) c 3516 drivers/media/pci/bt8xx/bttv-cards.c if (bttv_tvcards[btv->c.type].has_remote) c 3518 drivers/media/pci/bt8xx/bttv-cards.c if (!bttv_tvcards[btv->c.type].no_gpioirq) c 3520 drivers/media/pci/bt8xx/bttv-cards.c if (bttv_tvcards[btv->c.type].volume_gpio) c 3521 drivers/media/pci/bt8xx/bttv-cards.c btv->volume_gpio = bttv_tvcards[btv->c.type].volume_gpio; c 3522 drivers/media/pci/bt8xx/bttv-cards.c if (bttv_tvcards[btv->c.type].audio_mode_gpio) c 3523 drivers/media/pci/bt8xx/bttv-cards.c btv->audio_mode_gpio = bttv_tvcards[btv->c.type].audio_mode_gpio; c 3528 drivers/media/pci/bt8xx/bttv-cards.c if (btv->has_saa6588 || saa6588[btv->c.nr]) { c 3537 drivers/media/pci/bt8xx/bttv-cards.c sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3538 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "saa6588", 0, addrs); c 3547 drivers/media/pci/bt8xx/bttv-cards.c switch (audiodev[btv->c.nr]) { c 3562 drivers/media/pci/bt8xx/bttv-cards.c btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3563 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "msp3400", 0, addrs); c 3576 drivers/media/pci/bt8xx/bttv-cards.c if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3577 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tda7432", 0, addrs)) c 3584 drivers/media/pci/bt8xx/bttv-cards.c btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3585 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); c 3592 drivers/media/pci/bt8xx/bttv-cards.c pr_warn("%d: unknown audiodev value!\n", btv->c.nr); c 3602 drivers/media/pci/bt8xx/bttv-cards.c if (!bttv_tvcards[btv->c.type].no_msp34xx) { c 3603 drivers/media/pci/bt8xx/bttv-cards.c btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3604 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "msp3400", c 3606 drivers/media/pci/bt8xx/bttv-cards.c } else if (bttv_tvcards[btv->c.type].msp34xx_alt) { c 3607 drivers/media/pci/bt8xx/bttv-cards.c btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3608 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "msp3400", c 3617 drivers/media/pci/bt8xx/bttv-cards.c btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3618 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); c 3622 drivers/media/pci/bt8xx/bttv-cards.c v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3623 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); c 3627 drivers/media/pci/bt8xx/bttv-cards.c if (!bttv_tvcards[btv->c.type].no_tda7432) { c 3633 drivers/media/pci/bt8xx/bttv-cards.c btv->sd_tda7432 = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3634 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tda7432", 0, addrs); c 3642 drivers/media/pci/bt8xx/bttv-cards.c pr_warn("%d: audio absent, no audio device found!\n", btv->c.nr); c 3651 drivers/media/pci/bt8xx/bttv-cards.c if (ADDR_UNSET != bttv_tvcards[btv->c.type].tuner_addr) c 3652 drivers/media/pci/bt8xx/bttv-cards.c addr = bttv_tvcards[btv->c.type].tuner_addr; c 3659 drivers/media/pci/bt8xx/bttv-cards.c v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3660 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tuner", c 3662 drivers/media/pci/bt8xx/bttv-cards.c v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3663 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tuner", c 3665 drivers/media/pci/bt8xx/bttv-cards.c v4l2_i2c_new_subdev(&btv->c.v4l2_dev, c 3666 drivers/media/pci/bt8xx/bttv-cards.c &btv->c.i2c_adap, "tuner", c 3696 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, &eeprom_data[0x1e]); c 3700 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, &eeprom_data[0x1e]); c 3704 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, &eeprom_data[0x1e]); c 3707 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, &eeprom_data[0x1e]); c 3720 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, tv.model); c 3728 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, c 3729 drivers/media/pci/bt8xx/bttv-cards.c bttv_tvcards[btv->c.type].name, c 3731 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB; c 3814 drivers/media/pci/bt8xx/bttv-cards.c pr_info("%d: detected TEA575x radio\n", btv->c.nr); c 3841 drivers/media/pci/bt8xx/bttv-cards.c pr_info("%d: Terratec Active Radio Upgrade found\n", btv->c.nr); c 3906 drivers/media/pci/bt8xx/bttv-cards.c rc = request_firmware(&fw_entry, "hcwamc.rbf", &btv->c.pci->dev); c 3908 drivers/media/pci/bt8xx/bttv-cards.c pr_warn("%d: no altera firmware [via hotplug]\n", btv->c.nr); c 3913 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, (rc < 0) ? "failed" : "ok"); c 3928 drivers/media/pci/bt8xx/bttv-cards.c if (btv->c.type == BTTV_BOARD_UNKNOWN) { c 4010 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, type); c 4017 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, cardid, c 4020 drivers/media/pci/bt8xx/bttv-cards.c if (cardid<0 || btv->c.type == cardid) c 4024 drivers/media/pci/bt8xx/bttv-cards.c if (card[btv->c.nr] < bttv_num_tvcards) { c 4026 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr); c 4029 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, btv->c.type, cardid); c 4030 drivers/media/pci/bt8xx/bttv-cards.c btv->c.type = cardid; c 4073 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, eeprom_data[0x41], eeprom_data[0x42]); c 4102 drivers/media/pci/bt8xx/bttv-cards.c gpio_bits(bttv_tvcards[btv->c.type].gpiomask, gpiobits); c 4128 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, pin); c 4240 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr); c 4254 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, dataRead); c 4267 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, dataRead); c 4272 drivers/media/pci/bt8xx/bttv-cards.c pr_info("%d: Adlink RTV-24 initialisation complete\n", btv->c.nr); c 4303 drivers/media/pci/bt8xx/bttv-cards.c if ((PCI_SLOT(btv->c.pci->devfn) & ~3) != 0xC) { c 4308 drivers/media/pci/bt8xx/bttv-cards.c if (PCI_SLOT(btv->c.pci->devfn) != 0xD) c 4503 drivers/media/pci/bt8xx/bttv-cards.c mctlr = master[btv->c.nr]; c 4507 drivers/media/pci/bt8xx/bttv-cards.c yaddr = (btv->c.nr - mctlr->c.nr + 1) & 3; /* the '&' is for safety */ c 4543 drivers/media/pci/bt8xx/bttv-cards.c if ((btv->c.nr<1) || (btv->c.nr>BTTV_MAX-3)) c 4545 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr-1] = btv; c 4546 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr] = btv; c 4547 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr+1] = btv; c 4548 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr+2] = btv; c 4588 drivers/media/pci/bt8xx/bttv-cards.c dprintk("%d : picolo_tetra_muxsel => input = %d\n", btv->c.nr, input); c 4634 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, input, matrix, key); c 4691 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, rc); c 4699 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, rc); c 4739 drivers/media/pci/bt8xx/bttv-cards.c pr_debug("%d: setting input channel to:%d\n", btv->c.nr, (int)mux); c 4818 drivers/media/pci/bt8xx/bttv-cards.c mctlr = master[btv->c.nr]; c 4823 drivers/media/pci/bt8xx/bttv-cards.c yaddr = (btv->c.nr - mctlr->c.nr) & 3; c 4853 drivers/media/pci/bt8xx/bttv-cards.c if (btv->c.nr > BTTV_MAX-4) c 4859 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr] = btv; c 4860 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr+1] = btv; c 4861 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr+2] = btv; c 4862 drivers/media/pci/bt8xx/bttv-cards.c master[btv->c.nr+3] = btv; c 4921 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr); c 4923 drivers/media/pci/bt8xx/bttv-cards.c pr_info("%d: enabling VSFX\n", btv->c.nr); c 4926 drivers/media/pci/bt8xx/bttv-cards.c btv->c.nr, latency); c 4935 drivers/media/pci/bt8xx/bttv-cards.c pci_read_config_byte(btv->c.pci, BT878_DEVCTRL, &command); c 4940 drivers/media/pci/bt8xx/bttv-cards.c pci_write_config_byte(btv->c.pci, BT878_DEVCTRL, command); c 4943 drivers/media/pci/bt8xx/bttv-cards.c pci_write_config_byte(btv->c.pci, PCI_LATENCY_TIMER, latency); c 178 drivers/media/pci/bt8xx/bttv-driver.c return sprintf(buf, "%d\n", btv ? btv->c.type : UNSET); c 803 drivers/media/pci/bt8xx/bttv-driver.c dprintk("%d: PLL: no change required\n", btv->c.nr); c 813 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, btv->pll.pll_ifreq); c 822 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, c 855 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr,table_idx); c 940 drivers/media/pci/bt8xx/bttv-driver.c if (input >= bttv_tvcards[btv->c.type].video_inputs) c 944 drivers/media/pci/bt8xx/bttv-driver.c mask2 = bttv_tvcards[btv->c.type].gpiomask2; c 957 drivers/media/pci/bt8xx/bttv-driver.c dprintk("%d: video mux: input=%d mux=%d\n", btv->c.nr, input, mux); c 960 drivers/media/pci/bt8xx/bttv-driver.c if(bttv_tvcards[btv->c.type].muxsel_hook) c 961 drivers/media/pci/bt8xx/bttv-driver.c bttv_tvcards[btv->c.type].muxsel_hook (btv, input); c 975 drivers/media/pci/bt8xx/bttv-driver.c gpio_inout(bttv_tvcards[btv->c.type].gpiomask, c 976 drivers/media/pci/bt8xx/bttv-driver.c bttv_tvcards[btv->c.type].gpiomask); c 984 drivers/media/pci/bt8xx/bttv-driver.c gpio_val = bttv_tvcards[btv->c.type].gpiomute; c 986 drivers/media/pci/bt8xx/bttv-driver.c gpio_val = bttv_tvcards[btv->c.type].gpiomux[input]; c 988 drivers/media/pci/bt8xx/bttv-driver.c switch (btv->c.type) { c 995 drivers/media/pci/bt8xx/bttv-driver.c gpio_bits(bttv_tvcards[btv->c.type].gpiomask, gpio_val); c 1068 drivers/media/pci/bt8xx/bttv-driver.c if (btv->c.type == BTTV_BOARD_VOODOOTV_200) c 1086 drivers/media/pci/bt8xx/bttv-driver.c bttv_crop_calc_limits(struct bttv_crop *c) c 1094 drivers/media/pci/bt8xx/bttv-driver.c c->min_scaled_width = 48; c 1095 drivers/media/pci/bt8xx/bttv-driver.c c->min_scaled_height = 32; c 1097 drivers/media/pci/bt8xx/bttv-driver.c c->min_scaled_width = c 1098 drivers/media/pci/bt8xx/bttv-driver.c (max_t(unsigned int, 48, c->rect.width >> 4) + 3) & ~3; c 1099 drivers/media/pci/bt8xx/bttv-driver.c c->min_scaled_height = c 1100 drivers/media/pci/bt8xx/bttv-driver.c max_t(unsigned int, 32, c->rect.height >> 4); c 1103 drivers/media/pci/bt8xx/bttv-driver.c c->max_scaled_width = c->rect.width & ~3; c 1104 drivers/media/pci/bt8xx/bttv-driver.c c->max_scaled_height = c->rect.height; c 1108 drivers/media/pci/bt8xx/bttv-driver.c bttv_crop_reset(struct bttv_crop *c, unsigned int norm) c 1110 drivers/media/pci/bt8xx/bttv-driver.c c->rect = bttv_tvnorms[norm].cropcap.defrect; c 1111 drivers/media/pci/bt8xx/bttv-driver.c bttv_crop_calc_limits(c); c 1147 drivers/media/pci/bt8xx/bttv-driver.c switch (btv->c.type) { c 1189 drivers/media/pci/bt8xx/bttv-driver.c if (bttv_tvcards[btv->c.type].no_video) { c 1208 drivers/media/pci/bt8xx/bttv-driver.c if (bttv_tvcards[btv->c.type].no_video) { c 1241 drivers/media/pci/bt8xx/bttv-driver.c pr_info("%d: reset, reinitialize\n", btv->c.nr); c 1252 drivers/media/pci/bt8xx/bttv-driver.c static int bttv_s_ctrl(struct v4l2_ctrl *c) c 1254 drivers/media/pci/bt8xx/bttv-driver.c struct bttv *btv = container_of(c->handler, struct bttv, ctrl_handler); c 1257 drivers/media/pci/bt8xx/bttv-driver.c switch (c->id) { c 1259 drivers/media/pci/bt8xx/bttv-driver.c bt848_bright(btv, c->val); c 1262 drivers/media/pci/bt8xx/bttv-driver.c bt848_hue(btv, c->val); c 1265 drivers/media/pci/bt8xx/bttv-driver.c bt848_contrast(btv, c->val); c 1268 drivers/media/pci/bt8xx/bttv-driver.c bt848_sat(btv, c->val); c 1271 drivers/media/pci/bt8xx/bttv-driver.c if (c->val) { c 1280 drivers/media/pci/bt8xx/bttv-driver.c audio_mute(btv, c->val); c 1281 drivers/media/pci/bt8xx/bttv-driver.c btv->mute = c->val; c 1284 drivers/media/pci/bt8xx/bttv-driver.c btv->volume_gpio(btv, c->val); c 1288 drivers/media/pci/bt8xx/bttv-driver.c val = c->val ? BT848_SCLOOP_CAGC : 0; c 1293 drivers/media/pci/bt8xx/bttv-driver.c btv->opt_combfilter = c->val; c 1296 drivers/media/pci/bt8xx/bttv-driver.c if (c->val) { c 1305 drivers/media/pci/bt8xx/bttv-driver.c btv->opt_automute = c->val; c 1309 drivers/media/pci/bt8xx/bttv-driver.c (c->val ? BT848_ADC_CRUSH : 0), c 1313 drivers/media/pci/bt8xx/bttv-driver.c btv->opt_vcr_hack = c->val; c 1316 drivers/media/pci/bt8xx/bttv-driver.c btwrite(c->val, BT848_WC_UP); c 1319 drivers/media/pci/bt8xx/bttv-driver.c btwrite(c->val, BT848_WC_DOWN); c 1322 drivers/media/pci/bt8xx/bttv-driver.c btv->opt_uv_ratio = c->val; c 1326 drivers/media/pci/bt8xx/bttv-driver.c btaor((c->val << 7), ~BT848_OFORM_RANGE, BT848_OFORM); c 1329 drivers/media/pci/bt8xx/bttv-driver.c btaor((c->val << 5), ~BT848_OFORM_CORE32, BT848_OFORM); c 1460 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, outbits, data & outbits, data & ~outbits, comment); c 1536 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_crop c; c 1561 drivers/media/pci/bt8xx/bttv-driver.c c.rect = bttv_tvnorms[norm].cropcap.defrect; c 1564 drivers/media/pci/bt8xx/bttv-driver.c c = btv->crop[!!fh->do_crop]; c 1566 drivers/media/pci/bt8xx/bttv-driver.c if (width < c.min_scaled_width || c 1567 drivers/media/pci/bt8xx/bttv-driver.c width > c.max_scaled_width || c 1568 drivers/media/pci/bt8xx/bttv-driver.c height < c.min_scaled_height) c 1577 drivers/media/pci/bt8xx/bttv-driver.c if (height * 2 > c.max_scaled_height) c 1582 drivers/media/pci/bt8xx/bttv-driver.c if (height > c.max_scaled_height) c 1596 drivers/media/pci/bt8xx/bttv-driver.c buf->crop.top != c.rect.top || c 1597 drivers/media/pci/bt8xx/bttv-driver.c buf->crop.left != c.rect.left || c 1598 drivers/media/pci/bt8xx/bttv-driver.c buf->crop.width != c.rect.width || c 1599 drivers/media/pci/bt8xx/bttv-driver.c buf->crop.height != c.rect.height) { c 1605 drivers/media/pci/bt8xx/bttv-driver.c buf->crop = c.rect; c 1736 drivers/media/pci/bt8xx/bttv-driver.c if (i->index >= bttv_tvcards[btv->c.type].video_inputs) c 1779 drivers/media/pci/bt8xx/bttv-driver.c if (i >= bttv_tvcards[btv->c.type].video_inputs) c 1861 drivers/media/pci/bt8xx/bttv-driver.c v4l2_ctrl_handler_log_status(vdev->ctrl_handler, btv->c.v4l2_dev.name); c 1898 drivers/media/pci/bt8xx/bttv-driver.c bttv_crop_adjust (struct bttv_crop * c, c 1908 drivers/media/pci/bt8xx/bttv-driver.c if (width < c->min_scaled_width) { c 1910 drivers/media/pci/bt8xx/bttv-driver.c c->rect.width = width * 16; c 1911 drivers/media/pci/bt8xx/bttv-driver.c } else if (width > c->max_scaled_width) { c 1913 drivers/media/pci/bt8xx/bttv-driver.c c->rect.width = width; c 1917 drivers/media/pci/bt8xx/bttv-driver.c if (c->rect.left > max_left) c 1918 drivers/media/pci/bt8xx/bttv-driver.c c->rect.left = max_left; c 1921 drivers/media/pci/bt8xx/bttv-driver.c if (height < c->min_scaled_height) { c 1923 drivers/media/pci/bt8xx/bttv-driver.c c->rect.height = height * 16; c 1924 drivers/media/pci/bt8xx/bttv-driver.c } else if (frame_height > c->max_scaled_height) { c 1927 drivers/media/pci/bt8xx/bttv-driver.c c->rect.height = (frame_height + 1) & ~1; c 1929 drivers/media/pci/bt8xx/bttv-driver.c max_top = b->top + b->height - c->rect.height; c 1930 drivers/media/pci/bt8xx/bttv-driver.c if (c->rect.top > max_top) c 1931 drivers/media/pci/bt8xx/bttv-driver.c c->rect.top = max_top; c 1934 drivers/media/pci/bt8xx/bttv-driver.c bttv_crop_calc_limits(c); c 1956 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_crop *c; c 1972 drivers/media/pci/bt8xx/bttv-driver.c c = &btv->crop[!!fh->do_crop]; c 1998 drivers/media/pci/bt8xx/bttv-driver.c if (btv->vbi_end > c->rect.top) c 2001 drivers/media/pci/bt8xx/bttv-driver.c min_width = c->min_scaled_width; c 2002 drivers/media/pci/bt8xx/bttv-driver.c min_height = c->min_scaled_height; c 2003 drivers/media/pci/bt8xx/bttv-driver.c max_width = c->max_scaled_width; c 2004 drivers/media/pci/bt8xx/bttv-driver.c max_height = c->max_scaled_height; c 2024 drivers/media/pci/bt8xx/bttv-driver.c bttv_crop_adjust(c, b, *width, *height, field); c 2026 drivers/media/pci/bt8xx/bttv-driver.c if (btv->vbi_end > c->rect.top) { c 2028 drivers/media/pci/bt8xx/bttv-driver.c c->rect.top = btv->vbi_end; c 2447 drivers/media/pci/bt8xx/bttv-driver.c "PCI:%s", pci_name(btv->c.pci)); c 2548 drivers/media/pci/bt8xx/bttv-driver.c dprintk("%d: overlay: !setup_ok\n", btv->c.nr); c 2793 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_crop c; c 2827 drivers/media/pci/bt8xx/bttv-driver.c c.rect.left = clamp_t(s32, sel->r.left, b_left, b_right - 48); c 2828 drivers/media/pci/bt8xx/bttv-driver.c c.rect.left = min(c.rect.left, (__s32) MAX_HDELAY); c 2830 drivers/media/pci/bt8xx/bttv-driver.c c.rect.width = clamp_t(s32, sel->r.width, c 2831 drivers/media/pci/bt8xx/bttv-driver.c 48, b_right - c.rect.left); c 2833 drivers/media/pci/bt8xx/bttv-driver.c c.rect.top = clamp_t(s32, sel->r.top, b_top, b_bottom - 32); c 2835 drivers/media/pci/bt8xx/bttv-driver.c c.rect.top = (c.rect.top + 1) & ~1; c 2837 drivers/media/pci/bt8xx/bttv-driver.c c.rect.height = clamp_t(s32, sel->r.height, c 2838 drivers/media/pci/bt8xx/bttv-driver.c 32, b_bottom - c.rect.top); c 2839 drivers/media/pci/bt8xx/bttv-driver.c c.rect.height = (c.rect.height + 1) & ~1; c 2841 drivers/media/pci/bt8xx/bttv-driver.c bttv_crop_calc_limits(&c); c 2843 drivers/media/pci/bt8xx/bttv-driver.c sel->r = c.rect; c 2845 drivers/media/pci/bt8xx/bttv-driver.c btv->crop[1] = c; c 2849 drivers/media/pci/bt8xx/bttv-driver.c if (fh->width < c.min_scaled_width) { c 2850 drivers/media/pci/bt8xx/bttv-driver.c fh->width = c.min_scaled_width; c 2851 drivers/media/pci/bt8xx/bttv-driver.c btv->init.width = c.min_scaled_width; c 2852 drivers/media/pci/bt8xx/bttv-driver.c } else if (fh->width > c.max_scaled_width) { c 2853 drivers/media/pci/bt8xx/bttv-driver.c fh->width = c.max_scaled_width; c 2854 drivers/media/pci/bt8xx/bttv-driver.c btv->init.width = c.max_scaled_width; c 2857 drivers/media/pci/bt8xx/bttv-driver.c if (fh->height < c.min_scaled_height) { c 2858 drivers/media/pci/bt8xx/bttv-driver.c fh->height = c.min_scaled_height; c 2859 drivers/media/pci/bt8xx/bttv-driver.c btv->init.height = c.min_scaled_height; c 2860 drivers/media/pci/bt8xx/bttv-driver.c } else if (fh->height > c.max_scaled_height) { c 2861 drivers/media/pci/bt8xx/bttv-driver.c fh->height = c.max_scaled_height; c 2862 drivers/media/pci/bt8xx/bttv-driver.c btv->init.height = c.max_scaled_height; c 2877 drivers/media/pci/bt8xx/bttv-driver.c fh->btv->c.nr, (int)count, v4l2_type_names[fh->type]); c 2977 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, v4l2_type_names[type]); c 2993 drivers/media/pci/bt8xx/bttv-driver.c &btv->c.pci->dev, &btv->s_lock, c 2999 drivers/media/pci/bt8xx/bttv-driver.c &btv->c.pci->dev, &btv->s_lock, c 3081 drivers/media/pci/bt8xx/bttv-driver.c fh->btv->c.nr, v4l2_type_names[fh->type], c 3159 drivers/media/pci/bt8xx/bttv-driver.c dprintk("%d: open called (radio)\n", btv->c.nr); c 3371 drivers/media/pci/bt8xx/bttv-driver.c btv->c.v4l2_dev.name, risc->cpu, (unsigned long)risc->dma); c 3374 drivers/media/pci/bt8xx/bttv-driver.c btv->c.v4l2_dev.name, c 3379 drivers/media/pci/bt8xx/bttv-driver.c btv->c.v4l2_dev.name, c 3445 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, c 3453 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr); c 3457 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr); c 3459 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr); c 3541 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, set->top, set->bottom, c 3556 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, wakeup->top); c 3566 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, wakeup->top); c 3575 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, wakeup->bottom); c 3607 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, btv->framedrop, btv->irq_me, btv->irq_total, c 3774 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, count, btv->field_count, c 3819 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, c 3830 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, btread(BT848_RISC_COUNT)); c 3842 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr); c 3845 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr); c 3872 drivers/media/pci/bt8xx/bttv-driver.c vfd->v4l2_dev = &btv->c.v4l2_dev; c 3877 drivers/media/pci/bt8xx/bttv-driver.c type_name, bttv_tvcards[btv->c.type].name); c 3909 drivers/media/pci/bt8xx/bttv-driver.c video_nr[btv->c.nr]) < 0) c 3912 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, video_device_node_name(&btv->video_dev)); c 3915 drivers/media/pci/bt8xx/bttv-driver.c pr_err("%d: device_create_file 'card' failed\n", btv->c.nr); c 3927 drivers/media/pci/bt8xx/bttv-driver.c vbi_nr[btv->c.nr]) < 0) c 3930 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, video_device_node_name(&btv->vbi_dev)); c 3944 drivers/media/pci/bt8xx/bttv-driver.c radio_nr[btv->c.nr]) < 0) c 3947 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, video_device_node_name(&btv->radio_dev)); c 3991 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr = bttv_num; c 3992 drivers/media/pci/bt8xx/bttv-driver.c snprintf(btv->c.v4l2_dev.name, sizeof(btv->c.v4l2_dev.name), c 3993 drivers/media/pci/bt8xx/bttv-driver.c "bttv%d", btv->c.nr); c 4000 drivers/media/pci/bt8xx/bttv-driver.c INIT_LIST_HEAD(&btv->c.subs); c 4009 drivers/media/pci/bt8xx/bttv-driver.c btv->has_radio=radio[btv->c.nr]; c 4012 drivers/media/pci/bt8xx/bttv-driver.c btv->c.pci = dev; c 4015 drivers/media/pci/bt8xx/bttv-driver.c pr_warn("%d: Can't enable device\n", btv->c.nr); c 4019 drivers/media/pci/bt8xx/bttv-driver.c pr_warn("%d: No suitable DMA available\n", btv->c.nr); c 4024 drivers/media/pci/bt8xx/bttv-driver.c btv->c.v4l2_dev.name)) { c 4026 drivers/media/pci/bt8xx/bttv-driver.c btv->c.nr, c 4033 drivers/media/pci/bt8xx/bttv-driver.c result = v4l2_device_register(&dev->dev, &btv->c.v4l2_dev); c 4035 drivers/media/pci/bt8xx/bttv-driver.c pr_warn("%d: v4l2_device_register() failed\n", btv->c.nr); c 4040 drivers/media/pci/bt8xx/bttv-driver.c btv->c.v4l2_dev.ctrl_handler = hdl; c 4047 drivers/media/pci/bt8xx/bttv-driver.c btv->c.pci->irq, lat, c 4053 drivers/media/pci/bt8xx/bttv-driver.c pr_err("%d: ioremap() failed\n", btv->c.nr); c 4063 drivers/media/pci/bt8xx/bttv-driver.c result = request_irq(btv->c.pci->irq, bttv_irq, c 4064 drivers/media/pci/bt8xx/bttv-driver.c IRQF_SHARED, btv->c.v4l2_dev.name, (void *)btv); c 4067 drivers/media/pci/bt8xx/bttv-driver.c bttv_num, btv->c.pci->irq); c 4160 drivers/media/pci/bt8xx/bttv-driver.c if (!bttv_tvcards[btv->c.type].no_video) c 4170 drivers/media/pci/bt8xx/bttv-driver.c if (!bttv_tvcards[btv->c.type].no_video) { c 4186 drivers/media/pci/bt8xx/bttv-driver.c if (bttv_tvcards[btv->c.type].has_dvb) { c 4187 drivers/media/pci/bt8xx/bttv-driver.c bttv_sub_add_device(&btv->c, "dvb"); c 4201 drivers/media/pci/bt8xx/bttv-driver.c free_irq(btv->c.pci->irq,btv); c 4206 drivers/media/pci/bt8xx/bttv-driver.c v4l2_device_unregister(&btv->c.v4l2_dev); c 4211 drivers/media/pci/bt8xx/bttv-driver.c release_mem_region(pci_resource_start(btv->c.pci,0), c 4212 drivers/media/pci/bt8xx/bttv-driver.c pci_resource_len(btv->c.pci,0)); c 4213 drivers/media/pci/bt8xx/bttv-driver.c pci_disable_device(btv->c.pci); c 4223 drivers/media/pci/bt8xx/bttv-driver.c pr_info("%d: unloading\n", btv->c.nr); c 4225 drivers/media/pci/bt8xx/bttv-driver.c if (bttv_tvcards[btv->c.type].has_dvb) c 4239 drivers/media/pci/bt8xx/bttv-driver.c bttv_sub_del_devices(&btv->c); c 4250 drivers/media/pci/bt8xx/bttv-driver.c btcx_riscmem_free(btv->c.pci,&btv->main); c 4253 drivers/media/pci/bt8xx/bttv-driver.c free_irq(btv->c.pci->irq,btv); c 4255 drivers/media/pci/bt8xx/bttv-driver.c release_mem_region(pci_resource_start(btv->c.pci,0), c 4256 drivers/media/pci/bt8xx/bttv-driver.c pci_resource_len(btv->c.pci,0)); c 4257 drivers/media/pci/bt8xx/bttv-driver.c pci_disable_device(btv->c.pci); c 4259 drivers/media/pci/bt8xx/bttv-driver.c v4l2_device_unregister(&btv->c.v4l2_dev); c 4260 drivers/media/pci/bt8xx/bttv-driver.c bttvs[btv->c.nr] = NULL; c 4274 drivers/media/pci/bt8xx/bttv-driver.c dprintk("%d: suspend %d\n", btv->c.nr, state.event); c 4310 drivers/media/pci/bt8xx/bttv-driver.c dprintk("%d: resume\n", btv->c.nr); c 4316 drivers/media/pci/bt8xx/bttv-driver.c pr_warn("%d: Can't enable device\n", btv->c.nr); c 4324 drivers/media/pci/bt8xx/bttv-driver.c pr_warn("%d: Can't enable device\n", btv->c.nr); c 131 drivers/media/pci/bt8xx/bttv-gpio.c struct bttv *btv = container_of(core, struct bttv, c); c 145 drivers/media/pci/bt8xx/bttv-gpio.c struct bttv *btv = container_of(core, struct bttv, c); c 154 drivers/media/pci/bt8xx/bttv-gpio.c struct bttv *btv = container_of(core, struct bttv, c); c 161 drivers/media/pci/bt8xx/bttv-gpio.c struct bttv *btv = container_of(core, struct bttv, c); c 267 drivers/media/pci/bt8xx/bttv-i2c.c btv->c.nr, probe_for, addr); c 275 drivers/media/pci/bt8xx/bttv-i2c.c btv->c.nr, addr); c 320 drivers/media/pci/bt8xx/bttv-i2c.c static void do_i2c_scan(char *name, struct i2c_client *c) c 326 drivers/media/pci/bt8xx/bttv-i2c.c c->addr = i; c 327 drivers/media/pci/bt8xx/bttv-i2c.c rc = i2c_master_recv(c,&buf,0); c 344 drivers/media/pci/bt8xx/bttv-i2c.c strscpy(btv->c.i2c_adap.name, "bt878", c 345 drivers/media/pci/bt8xx/bttv-i2c.c sizeof(btv->c.i2c_adap.name)); c 346 drivers/media/pci/bt8xx/bttv-i2c.c btv->c.i2c_adap.algo = &bttv_algo; c 353 drivers/media/pci/bt8xx/bttv-i2c.c strscpy(btv->c.i2c_adap.name, "bttv", c 354 drivers/media/pci/bt8xx/bttv-i2c.c sizeof(btv->c.i2c_adap.name)); c 358 drivers/media/pci/bt8xx/bttv-i2c.c btv->c.i2c_adap.algo_data = &btv->i2c_algo; c 360 drivers/media/pci/bt8xx/bttv-i2c.c btv->c.i2c_adap.owner = THIS_MODULE; c 362 drivers/media/pci/bt8xx/bttv-i2c.c btv->c.i2c_adap.dev.parent = &btv->c.pci->dev; c 363 drivers/media/pci/bt8xx/bttv-i2c.c snprintf(btv->c.i2c_adap.name, sizeof(btv->c.i2c_adap.name), c 364 drivers/media/pci/bt8xx/bttv-i2c.c "bt%d #%d [%s]", btv->id, btv->c.nr, c 367 drivers/media/pci/bt8xx/bttv-i2c.c i2c_set_adapdata(&btv->c.i2c_adap, &btv->c.v4l2_dev); c 368 drivers/media/pci/bt8xx/bttv-i2c.c btv->i2c_client.adapter = &btv->c.i2c_adap; c 372 drivers/media/pci/bt8xx/bttv-i2c.c btv->i2c_rc = i2c_add_adapter(&btv->c.i2c_adap); c 376 drivers/media/pci/bt8xx/bttv-i2c.c btv->i2c_rc = i2c_bit_add_bus(&btv->c.i2c_adap); c 379 drivers/media/pci/bt8xx/bttv-i2c.c do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); c 387 drivers/media/pci/bt8xx/bttv-i2c.c i2c_del_adapter(&btv->c.i2c_adap); c 41 drivers/media/pci/bt8xx/bttv-if.c return bttvs[card]->c.pci; c 46 drivers/media/pci/bt8xx/bttv-input.c gpio = bttv_gpio_read(&btv->c); c 67 drivers/media/pci/bt8xx/bttv-input.c if (btv->c.type == BTTV_BOARD_WINFAST2000) c 81 drivers/media/pci/bt8xx/bttv-input.c gpio = bttv_gpio_read(&btv->c); c 132 drivers/media/pci/bt8xx/bttv-input.c if (btv->c.type == BTTV_BOARD_ENLTV_FM_2) c 244 drivers/media/pci/bt8xx/bttv-input.c gpio = bttv_gpio_read(&btv->c); c 283 drivers/media/pci/bt8xx/bttv-input.c bttv_gpio_write(&btv->c, gpio & ~(1 << 4)); c 284 drivers/media/pci/bt8xx/bttv-input.c bttv_gpio_write(&btv->c, gpio | (1 << 4)); c 314 drivers/media/pci/bt8xx/bttv-input.c gpio = bttv_gpio_read(&btv->c); c 315 drivers/media/pci/bt8xx/bttv-input.c bttv_gpio_write(&btv->c, gpio & ~(1 << 4)); c 330 drivers/media/pci/bt8xx/bttv-input.c rc = i2c_master_recv(ir->c, &b, 1); c 378 drivers/media/pci/bt8xx/bttv-input.c switch (btv->c.type) { c 389 drivers/media/pci/bt8xx/bttv-input.c i2c_dev = i2c_new_device(&btv->c.i2c_adap, &info); c 399 drivers/media/pci/bt8xx/bttv-input.c i2c_dev = i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list, NULL); c 425 drivers/media/pci/bt8xx/bttv-input.c switch (btv->c.type) { c 504 drivers/media/pci/bt8xx/bttv-input.c ir->last_gpio = ir_extract_bits(bttv_gpio_read(&btv->c), c 510 drivers/media/pci/bt8xx/bttv-input.c dprintk("Ooops: IR config error [card=%d]\n", btv->c.type); c 518 drivers/media/pci/bt8xx/bttv-input.c bttv_gpio_inout(&btv->c, (1 << 4), 1 << 4); c 519 drivers/media/pci/bt8xx/bttv-input.c gpio = bttv_gpio_read(&btv->c); c 520 drivers/media/pci/bt8xx/bttv-input.c bttv_gpio_write(&btv->c, gpio & ~(1 << 4)); c 521 drivers/media/pci/bt8xx/bttv-input.c bttv_gpio_write(&btv->c, gpio | (1 << 4)); c 524 drivers/media/pci/bt8xx/bttv-input.c bttv_gpio_inout(&btv->c, ir->mask_keycode | ir->mask_keydown, 0); c 532 drivers/media/pci/bt8xx/bttv-input.c btv->c.type); c 534 drivers/media/pci/bt8xx/bttv-input.c pci_name(btv->c.pci)); c 540 drivers/media/pci/bt8xx/bttv-input.c if (btv->c.pci->subsystem_vendor) { c 541 drivers/media/pci/bt8xx/bttv-input.c rc->input_id.vendor = btv->c.pci->subsystem_vendor; c 542 drivers/media/pci/bt8xx/bttv-input.c rc->input_id.product = btv->c.pci->subsystem_device; c 544 drivers/media/pci/bt8xx/bttv-input.c rc->input_id.vendor = btv->c.pci->vendor; c 545 drivers/media/pci/bt8xx/bttv-input.c rc->input_id.product = btv->c.pci->device; c 547 drivers/media/pci/bt8xx/bttv-input.c rc->dev.parent = &btv->c.pci->dev; c 54 drivers/media/pci/bt8xx/bttv-risc.c if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions)) < 0) c 137 drivers/media/pci/bt8xx/bttv-risc.c if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,instructions*4*5)) < 0) c 256 drivers/media/pci/bt8xx/bttv-risc.c if ((rc = btcx_riscmem_alloc(btv->c.pci,risc,dwords*4)) < 0) { c 469 drivers/media/pci/bt8xx/bttv-risc.c btv->c.nr,capctl,btv->loop_irq, c 509 drivers/media/pci/bt8xx/bttv-risc.c if ((rc = btcx_riscmem_alloc(btv->c.pci,&btv->main,PAGE_SIZE)) < 0) c 512 drivers/media/pci/bt8xx/bttv-risc.c btv->c.nr, (unsigned long long)btv->main.dma); c 551 drivers/media/pci/bt8xx/bttv-risc.c d2printk("%d: risc=%p slot[%d]=NULL\n", btv->c.nr, risc, slot); c 555 drivers/media/pci/bt8xx/bttv-risc.c btv->c.nr, risc, slot, c 579 drivers/media/pci/bt8xx/bttv-risc.c btcx_riscmem_free(btv->c.pci,&buf->bottom); c 580 drivers/media/pci/bt8xx/bttv-risc.c btcx_riscmem_free(btv->c.pci,&buf->top); c 703 drivers/media/pci/bt8xx/bttv-risc.c btv->c.nr, v4l2_field_names[buf->vb.field], c 864 drivers/media/pci/bt8xx/bttv-risc.c btv->c.nr, v4l2_field_names[buf->vb.field], c 61 drivers/media/pci/bt8xx/bttv-vbi.c pr_debug("%d: " fmt, btv->c.nr, ##__VA_ARGS__); \ c 356 drivers/media/pci/bt8xx/bttv.h #define gpio_inout(mask,bits) bttv_gpio_inout(&btv->c, mask, bits) c 357 drivers/media/pci/bt8xx/bttv.h #define gpio_read() bttv_gpio_read(&btv->c) c 358 drivers/media/pci/bt8xx/bttv.h #define gpio_write(value) bttv_gpio_write(&btv->c, value) c 359 drivers/media/pci/bt8xx/bttv.h #define gpio_bits(mask,bits) bttv_gpio_bits(&btv->c, mask, bits) c 366 drivers/media/pci/bt8xx/bttv.h v4l2_device_call_all(&btv->c.v4l2_dev, 0, o, f, ##args) c 369 drivers/media/pci/bt8xx/bttv.h v4l2_device_call_until_err(&btv->c.v4l2_dev, 0, o, f, ##args) c 356 drivers/media/pci/bt8xx/bttvp.h struct bttv_core c; c 500 drivers/media/pci/bt8xx/bttvp.h return container_of(v4l2_dev, struct bttv, c.v4l2_dev); c 511 drivers/media/pci/bt8xx/bttvp.h return (bttv_tvcards[btv->c.type].muxsel >> (input * 2)) & 3; c 144 drivers/media/pci/bt8xx/dvb-bt8xx.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 152 drivers/media/pci/bt8xx/dvb-bt8xx.c div = (((c->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6; c 154 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->frequency < 542000000) c 156 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 771000000) c 161 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->frequency == 0) c 163 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 443250000) c 188 drivers/media/pci/bt8xx/dvb-bt8xx.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 189 drivers/media/pci/bt8xx/dvb-bt8xx.c u32 freq = c->frequency; c 263 drivers/media/pci/bt8xx/dvb-bt8xx.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 270 drivers/media/pci/bt8xx/dvb-bt8xx.c div = (36000000 + c->frequency + 83333) / 166666; c 273 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->frequency < 175000000) c 275 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 390000000) c 277 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 470000000) c 279 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 750000000) c 284 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->frequency < 175000000) c 286 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 470000000) c 339 drivers/media/pci/bt8xx/dvb-bt8xx.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 346 drivers/media/pci/bt8xx/dvb-bt8xx.c div = (((c->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6; c 348 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->frequency < 150000000) c 350 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 173000000) c 352 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 250000000) c 354 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 400000000) c 356 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 420000000) c 358 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 470000000) c 360 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 600000000) c 362 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 730000000) c 367 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->frequency < 150000000) c 369 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 173000000) c 371 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 250000000) c 373 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 400000000) c 375 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 420000000) c 377 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency < 470000000) c 455 drivers/media/pci/bt8xx/dvb-bt8xx.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 461 drivers/media/pci/bt8xx/dvb-bt8xx.c div = (c->frequency + 36166667) / 166667; c 466 drivers/media/pci/bt8xx/dvb-bt8xx.c if ((c->frequency >= 47000000) && (c->frequency < 153000000)) c 468 drivers/media/pci/bt8xx/dvb-bt8xx.c else if ((c->frequency >= 153000000) && (c->frequency < 430000000)) c 470 drivers/media/pci/bt8xx/dvb-bt8xx.c else if ((c->frequency >= 430000000) && (c->frequency < 824000000)) c 472 drivers/media/pci/bt8xx/dvb-bt8xx.c else if ((c->frequency >= 824000000) && (c->frequency < 863000000)) c 509 drivers/media/pci/bt8xx/dvb-bt8xx.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 514 drivers/media/pci/bt8xx/dvb-bt8xx.c div = (((c->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6; c 521 drivers/media/pci/bt8xx/dvb-bt8xx.c dprintk("frequency %u, div %u\n", c->frequency, div); c 523 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->frequency < 470000000) c 525 drivers/media/pci/bt8xx/dvb-bt8xx.c else if (c->frequency > 823000000) c 530 drivers/media/pci/bt8xx/dvb-bt8xx.c if (c->bandwidth_hz == 8000000) c 124 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_CVI(cobalt, c) \ c 125 drivers/media/pci/cobalt/cobalt-driver.h (cobalt->bar1 + COBALT_VID_BASE + (c) * COBALT_VID_SIZE) c 126 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_CVI_VMR(cobalt, c) \ c 127 drivers/media/pci/cobalt/cobalt-driver.h (cobalt->bar1 + COBALT_VID_BASE + (c) * COBALT_VID_SIZE + 0x100) c 128 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_CVI_EVCNT(cobalt, c) \ c 129 drivers/media/pci/cobalt/cobalt-driver.h (cobalt->bar1 + COBALT_VID_BASE + (c) * COBALT_VID_SIZE + 0x200) c 130 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_CVI_FREEWHEEL(cobalt, c) \ c 131 drivers/media/pci/cobalt/cobalt-driver.h (cobalt->bar1 + COBALT_VID_BASE + (c) * COBALT_VID_SIZE + 0x300) c 132 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_CVI_CLK_LOSS(cobalt, c) \ c 133 drivers/media/pci/cobalt/cobalt-driver.h (cobalt->bar1 + COBALT_VID_BASE + (c) * COBALT_VID_SIZE + 0x400) c 134 drivers/media/pci/cobalt/cobalt-driver.h #define COBALT_CVI_PACKER(cobalt, c) \ c 135 drivers/media/pci/cobalt/cobalt-driver.h (cobalt->bar1 + COBALT_VID_BASE + (c) * COBALT_VID_SIZE + 0x500) c 48 drivers/media/pci/cobalt/cobalt-omnitek.c #define PCI(c) (BASE + 0x40 + ((c) * 0x40)) c 49 drivers/media/pci/cobalt/cobalt-omnitek.c #define SIZE(c) (BASE + 0x58 + ((c) * 0x40)) c 50 drivers/media/pci/cobalt/cobalt-omnitek.c #define DESCRIPTOR(c) (BASE + 0x50 + ((c) * 0x40)) c 51 drivers/media/pci/cobalt/cobalt-omnitek.c #define CS_REG(c) (BASE + 0x60 + ((c) * 0x40)) c 52 drivers/media/pci/cobalt/cobalt-omnitek.c #define BYTES_TRANSFERRED(c) (BASE + 0x64 + ((c) * 0x40)) c 59 drivers/media/pci/cx18/cx18-av-vbi.c static int odd_parity(u8 c) c 61 drivers/media/pci/cx18/cx18-av-vbi.c c ^= (c >> 4); c 62 drivers/media/pci/cx18/cx18-av-vbi.c c ^= (c >> 2); c 63 drivers/media/pci/cx18/cx18-av-vbi.c c ^= (c >> 1); c 65 drivers/media/pci/cx18/cx18-av-vbi.c return c & 1; c 105 drivers/media/pci/cx18/cx18-av-vbi.c u8 c, err = 0; c 110 drivers/media/pci/cx18/cx18-av-vbi.c c = (biphase_tbl[p[i + 1]] & 0xf) | c 112 drivers/media/pci/cx18/cx18-av-vbi.c dst[i / 2] = c; c 313 drivers/media/pci/cx18/cx18-driver.c struct i2c_client *c; c 318 drivers/media/pci/cx18/cx18-driver.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 319 drivers/media/pci/cx18/cx18-driver.c if (!c) c 322 drivers/media/pci/cx18/cx18-driver.c strscpy(c->name, "cx18 tveeprom tmp", sizeof(c->name)); c 323 drivers/media/pci/cx18/cx18-driver.c c->adapter = &cx->i2c_adap[0]; c 324 drivers/media/pci/cx18/cx18-driver.c c->addr = 0xa0 >> 1; c 326 drivers/media/pci/cx18/cx18-driver.c if (tveeprom_read(c, eedata, sizeof(eedata))) c 349 drivers/media/pci/cx18/cx18-driver.c kfree(c); c 283 drivers/media/pci/cx23885/cx23885-i2c.c static void do_i2c_scan(char *name, struct i2c_client *c) c 289 drivers/media/pci/cx23885/cx23885-i2c.c c->addr = i; c 290 drivers/media/pci/cx23885/cx23885-i2c.c rc = i2c_master_recv(c, &buf, 0); c 856 drivers/media/pci/cx88/cx88-dvb.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 866 drivers/media/pci/cx88/cx88-dvb.c div = c->frequency / 125; c 873 drivers/media/pci/cx88/cx88-dvb.c if (c->frequency < 1500000) c 108 drivers/media/pci/cx88/cx88-i2c.c static void do_i2c_scan(const char *name, struct i2c_client *c) c 114 drivers/media/pci/cx88/cx88-i2c.c c->addr = i; c 115 drivers/media/pci/cx88/cx88-i2c.c rc = i2c_master_recv(c, &buf, 0); c 560 drivers/media/pci/cx88/cx88-input.c flags = i2c_smbus_read_byte_data(ir->c, 0x10); c 570 drivers/media/pci/cx88/cx88-input.c code = i2c_smbus_read_byte_data(ir->c, 0x00); c 60 drivers/media/pci/ddbridge/ddbridge-max.c u32 c, v = 0, tag = DDB_LINK_TAG(link); c 64 drivers/media/pci/ddbridge/ddbridge-max.c for (c = 0; c < 10; c++) { c 70 drivers/media/pci/ddbridge/ddbridge-max.c if (c == 10) c 143 drivers/media/pci/ivtv/ivtv-i2c.c i2c_master_send(ir->c, keybuf, 1); c 145 drivers/media/pci/ivtv/ivtv-i2c.c if (i2c_master_recv(ir->c, keybuf, sizeof(keybuf)) != sizeof(keybuf)) { c 74 drivers/media/pci/ivtv/ivtv-vbi.c static int odd_parity(u8 c) c 76 drivers/media/pci/ivtv/ivtv-vbi.c c ^= (c >> 4); c 77 drivers/media/pci/ivtv/ivtv-vbi.c c ^= (c >> 2); c 78 drivers/media/pci/ivtv/ivtv-vbi.c c ^= (c >> 1); c 80 drivers/media/pci/ivtv/ivtv-vbi.c return c & 1; c 165 drivers/media/pci/saa7134/saa7134-dvb.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 176 drivers/media/pci/saa7134/saa7134-dvb.c f.frequency = c->frequency / 1000 * 16 / 1000; c 269 drivers/media/pci/saa7134/saa7134-dvb.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 280 drivers/media/pci/saa7134/saa7134-dvb.c tuner_frequency = c->frequency + 36166000; c 305 drivers/media/pci/saa7134/saa7134-dvb.c if (c->frequency < 49000000) c 307 drivers/media/pci/saa7134/saa7134-dvb.c else if (c->frequency < 161000000) c 309 drivers/media/pci/saa7134/saa7134-dvb.c else if (c->frequency < 444000000) c 311 drivers/media/pci/saa7134/saa7134-dvb.c else if (c->frequency < 861000000) c 317 drivers/media/pci/saa7134/saa7134-dvb.c switch (c->bandwidth_hz) { c 337 drivers/media/pci/saa7134/saa7134-dvb.c tuner_frequency = (((c->frequency / 1000) * 6) + 217496) / 1000; c 416 drivers/media/pci/saa7134/saa7134-i2c.c static void do_i2c_scan(struct i2c_client *c) c 422 drivers/media/pci/saa7134/saa7134-i2c.c c->addr = i; c 423 drivers/media/pci/saa7134/saa7134-i2c.c rc = i2c_master_recv(c,&buf,0); c 113 drivers/media/pci/saa7134/saa7134-input.c struct saa7134_dev *dev = ir->c->adapter->algo_data; c 133 drivers/media/pci/saa7134/saa7134-input.c while (1 != i2c_master_send(ir->c, &b, 1)) { c 146 drivers/media/pci/saa7134/saa7134-input.c rc = i2c_master_recv(ir->c, &b, 1); c 168 drivers/media/pci/saa7134/saa7134-input.c struct saa7134_dev *dev = ir->c->adapter->algo_data; c 189 drivers/media/pci/saa7134/saa7134-input.c rc = i2c_master_recv(ir->c, &b, 1); c 220 drivers/media/pci/saa7134/saa7134-input.c struct saa7134_dev *dev = ir->c->adapter->algo_data; c 241 drivers/media/pci/saa7134/saa7134-input.c rc = i2c_master_recv(ir->c, &b, 1); c 270 drivers/media/pci/saa7134/saa7134-input.c rc = i2c_master_recv(ir->c, &b, 1); c 299 drivers/media/pci/saa7134/saa7134-input.c struct saa7134_dev *dev = ir->c->adapter->algo_data; c 310 drivers/media/pci/saa7134/saa7134-input.c ir->c->addr = 0x5a >> 1; c 312 drivers/media/pci/saa7134/saa7134-input.c rc = i2c_master_recv(ir->c, data, 12); c 341 drivers/media/pci/saa7134/saa7134-input.c rc = i2c_master_recv(ir->c, b, 4); c 635 drivers/media/pci/saa7134/saa7134-video.c col[cols].position = clip_range(clips[i].c.left); c 638 drivers/media/pci/saa7134/saa7134-video.c col[cols].position = clip_range(clips[i].c.left+clips[i].c.width); c 641 drivers/media/pci/saa7134/saa7134-video.c row[rows].position = clip_range(clips[i].c.top / div); c 644 drivers/media/pci/saa7134/saa7134-video.c row[rows].position = clip_range((clips[i].c.top + clips[i].c.height) c 1287 drivers/media/pci/saa7134/saa7134-video.c if (copy_to_user(&f->fmt.win.clips[i].c, &dev->clips[i].c, c 1648 drivers/media/pci/saa7134/saa7134-video.c struct v4l2_rect *c = &dev->crop_current; c 1662 drivers/media/pci/saa7134/saa7134-video.c *c = sel->r; c 1663 drivers/media/pci/saa7134/saa7134-video.c if (c->top < b->top) c 1664 drivers/media/pci/saa7134/saa7134-video.c c->top = b->top; c 1665 drivers/media/pci/saa7134/saa7134-video.c if (c->top > b->top + b->height) c 1666 drivers/media/pci/saa7134/saa7134-video.c c->top = b->top + b->height; c 1667 drivers/media/pci/saa7134/saa7134-video.c if (c->height > b->top - c->top + b->height) c 1668 drivers/media/pci/saa7134/saa7134-video.c c->height = b->top - c->top + b->height; c 1670 drivers/media/pci/saa7134/saa7134-video.c if (c->left < b->left) c 1671 drivers/media/pci/saa7134/saa7134-video.c c->left = b->left; c 1672 drivers/media/pci/saa7134/saa7134-video.c if (c->left > b->left + b->width) c 1673 drivers/media/pci/saa7134/saa7134-video.c c->left = b->left + b->width; c 1674 drivers/media/pci/saa7134/saa7134-video.c if (c->width > b->left - c->left + b->width) c 1675 drivers/media/pci/saa7134/saa7134-video.c c->width = b->left - c->left + b->width; c 1676 drivers/media/pci/saa7134/saa7134-video.c sel->r = *c; c 223 drivers/media/pci/saa7164/saa7164-buffer.c struct list_head *c, *n; c 250 drivers/media/pci/saa7164/saa7164-buffer.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 251 drivers/media/pci/saa7164/saa7164-buffer.c buf = list_entry(c, struct saa7164_buffer, list); c 252 drivers/media/pci/saa7164/saa7164-core.c struct list_head *c, *n; c 257 drivers/media/pci/saa7164/saa7164-core.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 259 drivers/media/pci/saa7164/saa7164-core.c buf = list_entry(c, struct saa7164_buffer, list); c 575 drivers/media/pci/saa7164/saa7164-core.c struct list_head *c, *n; c 591 drivers/media/pci/saa7164/saa7164-core.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 592 drivers/media/pci/saa7164/saa7164-core.c buf = list_entry(c, struct saa7164_buffer, list); c 1054 drivers/media/pci/saa7164/saa7164-core.c int i, c; c 1078 drivers/media/pci/saa7164/saa7164-core.c c = 0; c 1082 drivers/media/pci/saa7164/saa7164-core.c if (c == 0) c 1087 drivers/media/pci/saa7164/saa7164-core.c if (++c == 16) { c 1089 drivers/media/pci/saa7164/saa7164-core.c c = 0; c 1093 drivers/media/pci/saa7164/saa7164-core.c c = 0; c 1097 drivers/media/pci/saa7164/saa7164-core.c if (c == 0) c 1102 drivers/media/pci/saa7164/saa7164-core.c if (++c == 16) { c 1104 drivers/media/pci/saa7164/saa7164-core.c c = 0; c 477 drivers/media/pci/saa7164/saa7164-dvb.c struct list_head *c, *n; c 487 drivers/media/pci/saa7164/saa7164-dvb.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 488 drivers/media/pci/saa7164/saa7164-dvb.c b = list_entry(c, struct saa7164_buffer, list); c 489 drivers/media/pci/saa7164/saa7164-dvb.c list_del(c); c 61 drivers/media/pci/saa7164/saa7164-encoder.c struct list_head *c, *n, *p, *q, *l, *v; c 70 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 71 drivers/media/pci/saa7164/saa7164-encoder.c buf = list_entry(c, struct saa7164_buffer, list); c 72 drivers/media/pci/saa7164/saa7164-encoder.c list_del(c); c 592 drivers/media/pci/saa7164/saa7164-encoder.c struct list_head *c, *n; c 608 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 609 drivers/media/pci/saa7164/saa7164-encoder.c buf = list_entry(c, struct saa7164_buffer, list); c 614 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(c, n, &port->list_buf_used.list) { c 615 drivers/media/pci/saa7164/saa7164-encoder.c ubuf = list_entry(c, struct saa7164_user_buffer, list); c 30 drivers/media/pci/saa7164/saa7164-vbi.c struct list_head *c, *n, *p, *q, *l, *v; c 39 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 40 drivers/media/pci/saa7164/saa7164-vbi.c buf = list_entry(c, struct saa7164_buffer, list); c 41 drivers/media/pci/saa7164/saa7164-vbi.c list_del(c); c 276 drivers/media/pci/saa7164/saa7164-vbi.c struct list_head *c, *n; c 292 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(c, n, &port->dmaqueue.list) { c 293 drivers/media/pci/saa7164/saa7164-vbi.c buf = list_entry(c, struct saa7164_buffer, list); c 298 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(c, n, &port->list_buf_used.list) { c 299 drivers/media/pci/saa7164/saa7164-vbi.c ubuf = list_entry(c, struct saa7164_user_buffer, list); c 585 drivers/media/pci/ttpci/av7110_av.c int c = 0; c 590 drivers/media/pci/ttpci/av7110_av.c while (c < length - 3 && !found) { c 591 drivers/media/pci/ttpci/av7110_av.c if (buf[c] == 0x00 && buf[c + 1] == 0x00 && c 592 drivers/media/pci/ttpci/av7110_av.c buf[c + 2] == 0x01) { c 593 drivers/media/pci/ttpci/av7110_av.c switch ( buf[c + 3] ) { c 609 drivers/media/pci/ttpci/av7110_av.c c++; c 613 drivers/media/pci/ttpci/av7110_av.c c++; c 615 drivers/media/pci/ttpci/av7110_av.c if (c == length - 3 && !found) { c 628 drivers/media/pci/ttpci/av7110_av.c return c; c 633 drivers/media/pci/ttpci/av7110_av.c int c, c2, l, add; c 636 drivers/media/pci/ttpci/av7110_av.c c = 0; c 642 drivers/media/pci/ttpci/av7110_av.c if (buf[c] == 0x00 && buf[c + 1] == 0x01) { c 644 drivers/media/pci/ttpci/av7110_av.c c += 2; c 648 drivers/media/pci/ttpci/av7110_av.c if (buf[c] == 0x01) { c 650 drivers/media/pci/ttpci/av7110_av.c c++; c 657 drivers/media/pci/ttpci/av7110_av.c switch (buf[c]) { c 672 drivers/media/pci/ttpci/av7110_av.c p->pes[3] = buf[c]; c 674 drivers/media/pci/ttpci/av7110_av.c memcpy(p->pes + p->pos, buf + c, (TS_SIZE - 4) - p->pos); c 675 drivers/media/pci/ttpci/av7110_av.c c += (TS_SIZE - 4) - p->pos; c 681 drivers/media/pci/ttpci/av7110_av.c c = 0; c 689 drivers/media/pci/ttpci/av7110_av.c c2 = find_pes_header(buf + c, length - c, &p->frags); c 691 drivers/media/pci/ttpci/av7110_av.c l = c2+c; c 695 drivers/media/pci/ttpci/av7110_av.c c += l; c 702 drivers/media/pci/ttpci/av7110_av.c while (c < length) { c 703 drivers/media/pci/ttpci/av7110_av.c c2 = find_pes_header(buf + c + add, length - c - add, &p->frags); c 705 drivers/media/pci/ttpci/av7110_av.c c2 += c + add; c 706 drivers/media/pci/ttpci/av7110_av.c if (c2 > c){ c 707 drivers/media/pci/ttpci/av7110_av.c p_to_t(buf + c, c2 - c, pid, &p->counter, p->feed); c 708 drivers/media/pci/ttpci/av7110_av.c c = c2; c 714 drivers/media/pci/ttpci/av7110_av.c l = length - c; c 717 drivers/media/pci/ttpci/av7110_av.c p_to_t(buf + c, l, pid, &p->counter, p->feed); c 718 drivers/media/pci/ttpci/av7110_av.c memcpy(p->pes, buf + c + l, rest); c 720 drivers/media/pci/ttpci/av7110_av.c c = length; c 729 drivers/media/pci/ttpci/av7110_av.c int c = 0; c 742 drivers/media/pci/ttpci/av7110_av.c c += 4; c 746 drivers/media/pci/ttpci/av7110_av.c c++; c 749 drivers/media/pci/ttpci/av7110_av.c c++; c 753 drivers/media/pci/ttpci/av7110_av.c c++; c 757 drivers/media/pci/ttpci/av7110_av.c return c; c 766 drivers/media/pci/ttpci/av7110_av.c long c = 0; c 790 drivers/media/pci/ttpci/av7110_av.c while (c < length) { c 792 drivers/media/pci/ttpci/av7110_av.c if (length - c >= (TS_SIZE - 4)){ c 795 drivers/media/pci/ttpci/av7110_av.c memcpy(obuf + l, buf + c, TS_SIZE - l); c 796 drivers/media/pci/ttpci/av7110_av.c c += TS_SIZE - l; c 799 drivers/media/pci/ttpci/av7110_av.c obuf, length - c); c 800 drivers/media/pci/ttpci/av7110_av.c memcpy(obuf + l, buf + c, TS_SIZE - l); c 801 drivers/media/pci/ttpci/av7110_av.c c = length; c 959 drivers/media/pci/ttpci/av7110_av.c unsigned char c; c 969 drivers/media/pci/ttpci/av7110_av.c if (get_user(c, buf)) c 971 drivers/media/pci/ttpci/av7110_av.c if (c == 0x47 && count % TS_SIZE == 0) c 1001 drivers/media/pci/ttpci/av7110_av.c unsigned char c; c 1010 drivers/media/pci/ttpci/av7110_av.c if (get_user(c, buf)) c 1012 drivers/media/pci/ttpci/av7110_av.c if (c == 0x47 && count % TS_SIZE == 0) c 1040 drivers/media/pci/ttpci/av7110_av.c unsigned char c; c 1041 drivers/media/pci/ttpci/av7110_av.c if (get_user(c, buf + i)) c 1044 drivers/media/pci/ttpci/av7110_av.c progressive = c & 0x08; c 1047 drivers/media/pci/ttpci/av7110_av.c if (c == 0x00) { c 1052 drivers/media/pci/ttpci/av7110_av.c case 2: if (c == 0x01) c 1055 drivers/media/pci/ttpci/av7110_av.c case 3: if (c == 0xb5) c 1058 drivers/media/pci/ttpci/av7110_av.c case 4: if ((c & 0xf0) == 0x10) c 860 drivers/media/pci/ttpci/av7110_hw.c u8 c; c 894 drivers/media/pci/ttpci/av7110_hw.c c = ((u8 *)av7110->bmpbuf)[1024 + i * delta + delta - 1]; c 896 drivers/media/pci/ttpci/av7110_hw.c c |= (((u8 *)av7110->bmpbuf)[1024 + i * delta + d] c 898 drivers/media/pci/ttpci/av7110_hw.c ((u8 *)av7110->bmpbuf)[1024 + i] = c; c 149 drivers/media/pci/ttpci/av7110_ipack.c int c = 0; c 151 drivers/media/pci/ttpci/av7110_ipack.c while (c < count && (p->mpeg == 0 || c 158 drivers/media/pci/ttpci/av7110_ipack.c if (buf[c] == 0x00) c 162 drivers/media/pci/ttpci/av7110_ipack.c c++; c 165 drivers/media/pci/ttpci/av7110_ipack.c if (buf[c] == 0x01) c 167 drivers/media/pci/ttpci/av7110_ipack.c else if (buf[c] == 0) c 171 drivers/media/pci/ttpci/av7110_ipack.c c++; c 175 drivers/media/pci/ttpci/av7110_ipack.c switch (buf[c]) { c 190 drivers/media/pci/ttpci/av7110_ipack.c p->cid = buf[c]; c 191 drivers/media/pci/ttpci/av7110_ipack.c c++; c 200 drivers/media/pci/ttpci/av7110_ipack.c if (count-c > 1) { c 201 drivers/media/pci/ttpci/av7110_ipack.c p->plen[0] = buf[c]; c 202 drivers/media/pci/ttpci/av7110_ipack.c c++; c 203 drivers/media/pci/ttpci/av7110_ipack.c p->plen[1] = buf[c]; c 204 drivers/media/pci/ttpci/av7110_ipack.c c++; c 208 drivers/media/pci/ttpci/av7110_ipack.c p->plen[0] = buf[c]; c 214 drivers/media/pci/ttpci/av7110_ipack.c p->plen[1] = buf[c]; c 215 drivers/media/pci/ttpci/av7110_ipack.c c++; c 221 drivers/media/pci/ttpci/av7110_ipack.c p->flag1 = buf[c]; c 222 drivers/media/pci/ttpci/av7110_ipack.c c++; c 237 drivers/media/pci/ttpci/av7110_ipack.c p->flag2 = buf[c]; c 238 drivers/media/pci/ttpci/av7110_ipack.c c++; c 245 drivers/media/pci/ttpci/av7110_ipack.c p->hlength = buf[c]; c 246 drivers/media/pci/ttpci/av7110_ipack.c c++; c 253 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 276 drivers/media/pci/ttpci/av7110_ipack.c while (c < count && p->found < 14) { c 277 drivers/media/pci/ttpci/av7110_ipack.c p->pts[p->found - 9] = buf[c]; c 278 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, 1); c 279 drivers/media/pci/ttpci/av7110_ipack.c c++; c 282 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 293 drivers/media/pci/ttpci/av7110_ipack.c while (!p->which && c < count && c 295 drivers/media/pci/ttpci/av7110_ipack.c p->check = buf[c]; c 296 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, 1); c 297 drivers/media/pci/ttpci/av7110_ipack.c c++; c 302 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 306 drivers/media/pci/ttpci/av7110_ipack.c p->check = buf[c]; c 307 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, 1); c 308 drivers/media/pci/ttpci/av7110_ipack.c c++; c 313 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 315 drivers/media/pci/ttpci/av7110_ipack.c p->check = buf[c]; c 316 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, 1); c 317 drivers/media/pci/ttpci/av7110_ipack.c c++; c 321 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 326 drivers/media/pci/ttpci/av7110_ipack.c p->check = buf[c]; c 327 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, 1); c 328 drivers/media/pci/ttpci/av7110_ipack.c c++; c 332 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 342 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 346 drivers/media/pci/ttpci/av7110_ipack.c while (c < count && p->which < 7) { c 347 drivers/media/pci/ttpci/av7110_ipack.c p->pts[p->which - 2] = buf[c]; c 348 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, 1); c 349 drivers/media/pci/ttpci/av7110_ipack.c c++; c 354 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 357 drivers/media/pci/ttpci/av7110_ipack.c while (c < count && p->which < 12) { c 359 drivers/media/pci/ttpci/av7110_ipack.c p->pts[p->which - 2] = buf[c]; c 360 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, 1); c 361 drivers/media/pci/ttpci/av7110_ipack.c c++; c 366 drivers/media/pci/ttpci/av7110_ipack.c if (c == count) c 374 drivers/media/pci/ttpci/av7110_ipack.c while (c < count && p->found < p->plength + 6) { c 375 drivers/media/pci/ttpci/av7110_ipack.c l = count - c; c 378 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, l); c 380 drivers/media/pci/ttpci/av7110_ipack.c c += l; c 387 drivers/media/pci/ttpci/av7110_ipack.c if (p->found + count - c < p->plength + 6) { c 388 drivers/media/pci/ttpci/av7110_ipack.c p->found += count - c; c 389 drivers/media/pci/ttpci/av7110_ipack.c c = count; c 391 drivers/media/pci/ttpci/av7110_ipack.c c += p->plength + 6 - p->found; c 399 drivers/media/pci/ttpci/av7110_ipack.c if (c < count) c 400 drivers/media/pci/ttpci/av7110_ipack.c av7110_ipack_instant_repack(buf + c, count - c, p); c 490 drivers/media/pci/ttpci/budget-av.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 496 drivers/media/pci/ttpci/budget-av.c if ((c->frequency < 950000) || (c->frequency > 2150000)) c 499 drivers/media/pci/ttpci/budget-av.c div = (c->frequency + (125 - 1)) / 125; /* round correctly */ c 505 drivers/media/pci/ttpci/budget-av.c if (c->symbol_rate < 4000000) c 508 drivers/media/pci/ttpci/budget-av.c if (c->frequency < 1250000) c 510 drivers/media/pci/ttpci/budget-av.c else if (c->frequency < 1550000) c 512 drivers/media/pci/ttpci/budget-av.c else if (c->frequency < 2050000) c 514 drivers/media/pci/ttpci/budget-av.c else if (c->frequency < 2150000) c 605 drivers/media/pci/ttpci/budget-av.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 614 drivers/media/pci/ttpci/budget-av.c u32 div = (c->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL; c 619 drivers/media/pci/ttpci/budget-av.c buf[3] = (c->frequency < 150000000 ? 0x01 : c 620 drivers/media/pci/ttpci/budget-av.c c->frequency < 445000000 ? 0x02 : 0x04); c 686 drivers/media/pci/ttpci/budget-av.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 695 drivers/media/pci/ttpci/budget-av.c tuner_frequency = c->frequency + 36166000; c 720 drivers/media/pci/ttpci/budget-av.c if (c->frequency < 49000000) c 722 drivers/media/pci/ttpci/budget-av.c else if (c->frequency < 161000000) c 724 drivers/media/pci/ttpci/budget-av.c else if (c->frequency < 444000000) c 726 drivers/media/pci/ttpci/budget-av.c else if (c->frequency < 861000000) c 732 drivers/media/pci/ttpci/budget-av.c switch (c->bandwidth_hz) { c 751 drivers/media/pci/ttpci/budget-av.c tuner_frequency = (((c->frequency / 1000) * 6) + 217496) / 1000; c 194 drivers/media/pci/ttpci/budget.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 199 drivers/media/pci/ttpci/budget.c u32 div = (c->frequency + 479500) / 125; c 201 drivers/media/pci/ttpci/budget.c if (c->frequency > 2000000) c 203 drivers/media/pci/ttpci/budget.c else if (c->frequency > 1800000) c 205 drivers/media/pci/ttpci/budget.c else if (c->frequency > 1600000) c 207 drivers/media/pci/ttpci/budget.c else if (c->frequency > 1200000) c 209 drivers/media/pci/ttpci/budget.c else if (c->frequency >= 1100000) c 236 drivers/media/pci/ttpci/budget.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 242 drivers/media/pci/ttpci/budget.c div = (c->frequency + 35937500 + 31250) / 62500; c 247 drivers/media/pci/ttpci/budget.c data[3] = (c->frequency < 174000000 ? 0x88 : c->frequency < 470000000 ? 0x84 : 0x81); c 264 drivers/media/pci/ttpci/budget.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 277 drivers/media/pci/ttpci/budget.c div = (36125000 + c->frequency) / 166666; c 281 drivers/media/pci/ttpci/budget.c if (c->frequency < 175000000) c 283 drivers/media/pci/ttpci/budget.c else if (c->frequency < 390000000) c 285 drivers/media/pci/ttpci/budget.c else if (c->frequency < 470000000) c 287 drivers/media/pci/ttpci/budget.c else if (c->frequency < 750000000) c 292 drivers/media/pci/ttpci/budget.c if (c->frequency < 175000000) c 294 drivers/media/pci/ttpci/budget.c else if (c->frequency < 470000000) c 322 drivers/media/pci/ttpci/budget.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 328 drivers/media/pci/ttpci/budget.c div = c->frequency / 125; c 346 drivers/media/pci/ttpci/budget.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 352 drivers/media/pci/ttpci/budget.c div = c->frequency / 1000; c 25 drivers/media/pci/ttpci/dvb_filter.c int c = 0; c 29 drivers/media/pci/ttpci/dvb_filter.c while ( !found && c < count){ c 30 drivers/media/pci/ttpci/dvb_filter.c u8 *b = mbuf+c; c 35 drivers/media/pci/ttpci/dvb_filter.c c++; c 43 drivers/media/pci/ttpci/dvb_filter.c ai->off = c; c 44 drivers/media/pci/ttpci/dvb_filter.c if (c+5 >= count) return -1; c 47 drivers/media/pci/ttpci/dvb_filter.c headr = mbuf+c+2; c 234 drivers/media/platform/atmel/atmel-isc-base.c unsigned int c; c 236 drivers/media/platform/atmel/atmel-isc-base.c for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) { c 238 drivers/media/platform/atmel/atmel-isc-base.c isc->ctrls.gain[c] = 1 << 9; c 244 drivers/media/platform/atmel/atmel-isc-base.c isc->ctrls.offset[c] = ISC_WB_O_ZERO_VAL; c 1659 drivers/media/platform/atmel/atmel-isc-base.c u32 c, offset[4]; c 1678 drivers/media/platform/atmel/atmel-isc-base.c for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) { c 1684 drivers/media/platform/atmel/atmel-isc-base.c offset[c] = ctrls->hist_minmax[c][HIST_MIN_INDEX]; c 1694 drivers/media/platform/atmel/atmel-isc-base.c ctrls->offset[c] = (offset[c] - 1) << 3; c 1697 drivers/media/platform/atmel/atmel-isc-base.c if (!ctrls->offset[c]) c 1698 drivers/media/platform/atmel/atmel-isc-base.c ctrls->offset[c] = ISC_WB_O_ZERO_VAL; c 1708 drivers/media/platform/atmel/atmel-isc-base.c s_gain[c] = (HIST_ENTRIES << 9) / c 1709 drivers/media/platform/atmel/atmel-isc-base.c (ctrls->hist_minmax[c][HIST_MAX_INDEX] - c 1710 drivers/media/platform/atmel/atmel-isc-base.c ctrls->hist_minmax[c][HIST_MIN_INDEX] + 1); c 1718 drivers/media/platform/atmel/atmel-isc-base.c if (hist_count[c]) c 1719 drivers/media/platform/atmel/atmel-isc-base.c gw_gain[c] = div_u64(avg << 9, hist_count[c]); c 1721 drivers/media/platform/atmel/atmel-isc-base.c gw_gain[c] = 1 << 9; c 1724 drivers/media/platform/atmel/atmel-isc-base.c ctrls->gain[c] = s_gain[c] * gw_gain[c]; c 1725 drivers/media/platform/atmel/atmel-isc-base.c ctrls->gain[c] >>= 9; c 540 drivers/media/platform/davinci/vpbe_display.c struct v4l2_rect *c) c 544 drivers/media/platform/davinci/vpbe_display.c if ((c->width == 0) || c 545 drivers/media/platform/davinci/vpbe_display.c ((c->width + c->left) > vpbe_dev->current_timings.xres)) c 546 drivers/media/platform/davinci/vpbe_display.c c->width = vpbe_dev->current_timings.xres - c->left; c 548 drivers/media/platform/davinci/vpbe_display.c if ((c->height == 0) || ((c->height + c->top) > c 550 drivers/media/platform/davinci/vpbe_display.c c->height = vpbe_dev->current_timings.yres - c->top; c 554 drivers/media/platform/davinci/vpbe_display.c c->height &= (~0x01); c 66 drivers/media/platform/omap3isp/isphist.c int c; c 85 drivers/media/platform/omap3isp/isphist.c for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) { c 86 drivers/media/platform/omap3isp/isphist.c if (c < conf->num_regions) { c 87 drivers/media/platform/omap3isp/isphist.c reg_hor[c] = (conf->region[c].h_start << c 89 drivers/media/platform/omap3isp/isphist.c | (conf->region[c].h_end << c 91 drivers/media/platform/omap3isp/isphist.c reg_ver[c] = (conf->region[c].v_start << c 93 drivers/media/platform/omap3isp/isphist.c | (conf->region[c].v_end << c 96 drivers/media/platform/omap3isp/isphist.c reg_hor[c] = 0; c 97 drivers/media/platform/omap3isp/isphist.c reg_ver[c] = 0; c 307 drivers/media/platform/omap3isp/isphist.c int c; c 320 drivers/media/platform/omap3isp/isphist.c for (c = 0; c < user_cfg->num_regions; c++) { c 321 drivers/media/platform/omap3isp/isphist.c if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK) c 323 drivers/media/platform/omap3isp/isphist.c if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK) c 325 drivers/media/platform/omap3isp/isphist.c if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK) c 327 drivers/media/platform/omap3isp/isphist.c if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK) c 329 drivers/media/platform/omap3isp/isphist.c if (user_cfg->region[c].h_start > user_cfg->region[c].h_end) c 331 drivers/media/platform/omap3isp/isphist.c if (user_cfg->region[c].v_start > user_cfg->region[c].v_end) c 364 drivers/media/platform/omap3isp/isphist.c int c; c 375 drivers/media/platform/omap3isp/isphist.c for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) { c 376 drivers/media/platform/omap3isp/isphist.c if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3) c 378 drivers/media/platform/omap3isp/isphist.c else if (cur_cfg->wg[c] != user_cfg->wg[c]) c 386 drivers/media/platform/omap3isp/isphist.c for (c = 0; c < user_cfg->num_regions; c++) { c 387 drivers/media/platform/omap3isp/isphist.c if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start) c 389 drivers/media/platform/omap3isp/isphist.c if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end) c 391 drivers/media/platform/omap3isp/isphist.c if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start) c 393 drivers/media/platform/omap3isp/isphist.c if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end) c 88 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg; c 93 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data, c 108 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c for (i = 0; i <= c->num_data; i++) { c 109 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c if (i == c->num_data) c 110 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->clk.pos; c 112 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->data[i].pos; c 128 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg; c 132 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c for (i = 0; i <= c->num_data; i++) { c 133 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c if (i == c->num_data) c 134 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->clk.pos; c 136 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->data[i].pos; c 85 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c int c = i + 22; c 90 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c)); c 140 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg; c 145 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c settle_cnt = csiphy_settle_cnt_calc(pixel_clock, bpp, c->num_data, c 148 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c val = BIT(c->clk.pos); c 149 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c for (i = 0; i < c->num_data; i++) c 150 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c val |= BIT(c->data[i].pos * 2); c 157 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c for (i = 0; i <= c->num_data; i++) { c 158 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c if (i == c->num_data) c 161 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c l = c->data[i].pos * 2; c 85 drivers/media/platform/qcom/camss/camss-ispif.c #define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(c) \ c 365 drivers/media/platform/qcom/venus/core.h unsigned int c; c 367 drivers/media/platform/qcom/venus/core.h for (c = 0; c < core->codecs_count; c++) { c 368 drivers/media/platform/qcom/venus/core.h if (core->caps[c].codec == codec && c 369 drivers/media/platform/qcom/venus/core.h core->caps[c].domain == domain) c 370 drivers/media/platform/qcom/venus/core.h return &core->caps[c]; c 485 drivers/media/platform/rcar_jpu.c static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c) c 487 drivers/media/platform/rcar_jpu.c return container_of(c->handler, struct jpu_ctx, ctrl_handler); c 620 drivers/media/platform/rcar_jpu.c int c; c 624 drivers/media/platform/rcar_jpu.c c = get_byte(&jpeg_buffer); c 625 drivers/media/platform/rcar_jpu.c while (c == 0xff || c == 0); c 627 drivers/media/platform/rcar_jpu.c if (!soi && c == SOI) { c 630 drivers/media/platform/rcar_jpu.c } else if (soi != (c != SOI)) c 633 drivers/media/platform/rcar_jpu.c switch (c) { c 93 drivers/media/platform/s5p-g2d/g2d-hw.c void g2d_set_cmd(struct g2d_dev *d, u32 c) c 95 drivers/media/platform/s5p-g2d/g2d-hw.c w(c, BITBLT_COMMAND_REG); c 84 drivers/media/platform/s5p-g2d/g2d.h void g2d_set_cmd(struct g2d_dev *d, u32 c); c 577 drivers/media/platform/s5p-jpeg/jpeg-core.c static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c) c 579 drivers/media/platform/s5p-jpeg/jpeg-core.c return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler); c 770 drivers/media/platform/s5p-jpeg/jpeg-core.c int c, x, components; c 789 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 790 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 795 drivers/media/platform/s5p-jpeg/jpeg-core.c exynos4_jpeg_select_dec_h_tbl(jpeg->regs, c, c 807 drivers/media/platform/s5p-jpeg/jpeg-core.c int c, i, n, j; c 819 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 820 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 822 drivers/media/platform/s5p-jpeg/jpeg-core.c id = c & 0xf; c 823 drivers/media/platform/s5p-jpeg/jpeg-core.c class = (c >> 4) & 0xf; c 826 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 827 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 829 drivers/media/platform/s5p-jpeg/jpeg-core.c word |= c << ((i % 4) * 8); c 836 drivers/media/platform/s5p-jpeg/jpeg-core.c n += c; c 840 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 841 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 843 drivers/media/platform/s5p-jpeg/jpeg-core.c word |= c << ((i % 4) * 8); c 865 drivers/media/platform/s5p-jpeg/jpeg-core.c int c, x, components; c 880 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 881 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 887 drivers/media/platform/s5p-jpeg/jpeg-core.c exynos4_jpeg_select_dec_q_tbl(jpeg->regs, c, x); c 897 drivers/media/platform/s5p-jpeg/jpeg-core.c int c, i, j; c 909 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 910 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 912 drivers/media/platform/s5p-jpeg/jpeg-core.c id = c & 0xf; c 914 drivers/media/platform/s5p-jpeg/jpeg-core.c if ((c >> 4) & 0xf) c 917 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 918 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 920 drivers/media/platform/s5p-jpeg/jpeg-core.c word |= c << ((i % 4) * 8); c 1114 drivers/media/platform/s5p-jpeg/jpeg-core.c int c, components = 0, notfound, n_dht = 0, n_dqt = 0; c 1128 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 1129 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 1131 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c != 0xff) c 1134 drivers/media/platform/s5p-jpeg/jpeg-core.c c = get_byte(&jpeg_buffer); c 1135 drivers/media/platform/s5p-jpeg/jpeg-core.c while (c == 0xff); c 1136 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == -1) c 1138 drivers/media/platform/s5p-jpeg/jpeg-core.c if (c == 0) c 1141 drivers/media/platform/s5p-jpeg/jpeg-core.c switch (c) { c 265 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c void exynos4_jpeg_select_dec_q_tbl(void __iomem *base, char c, char x) c 271 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c reg |= EXYNOS4_Q_TBL_COMP(c, x); c 275 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c void exynos4_jpeg_select_dec_h_tbl(void __iomem *base, char c, char x) c 281 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c reg |= EXYNOS4_HUFF_TBL_COMP(c, x); c 33 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h void exynos4_jpeg_select_dec_q_tbl(void __iomem *base, char c, char x); c 34 drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.h void exynos4_jpeg_select_dec_h_tbl(void __iomem *base, char c, char x); c 322 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS4_Q_TBL_COMP(c, n) ((n) << (((c) - 1) << 1)) c 339 drivers/media/platform/s5p-jpeg/jpeg-regs.h #define EXYNOS4_HUFF_TBL_COMP(c, n) ((n) << ((((c) - 1) << 1) + 6)) c 536 drivers/media/platform/s5p-mfc/s5p_mfc_common.h #define call_cop(c, op, args...) \ c 537 drivers/media/platform/s5p-mfc/s5p_mfc_common.h (((c)->c_ops->op) ? \ c 538 drivers/media/platform/s5p-mfc/s5p_mfc_common.h ((c)->c_ops->op(args)) : 0) c 658 drivers/media/platform/sti/bdisp/bdisp-hw.c static int bdisp_hw_get_op_cfg(struct bdisp_ctx *ctx, struct bdisp_op_cfg *c) c 669 drivers/media/platform/sti/bdisp/bdisp-hw.c c->wide = src->width > MAX_SRC_WIDTH; c 671 drivers/media/platform/sti/bdisp/bdisp-hw.c c->hflip = ctx->hflip; c 672 drivers/media/platform/sti/bdisp/bdisp-hw.c c->vflip = ctx->vflip; c 674 drivers/media/platform/sti/bdisp/bdisp-hw.c c->src_interlaced = (src->field == V4L2_FIELD_INTERLACED); c 676 drivers/media/platform/sti/bdisp/bdisp-hw.c c->src_nbp = src->fmt->nb_planes; c 677 drivers/media/platform/sti/bdisp/bdisp-hw.c c->src_yuv = (src->fmt->pixelformat == V4L2_PIX_FMT_NV12) || c 679 drivers/media/platform/sti/bdisp/bdisp-hw.c c->src_420 = c->src_yuv; c 681 drivers/media/platform/sti/bdisp/bdisp-hw.c c->dst_nbp = dst->fmt->nb_planes; c 682 drivers/media/platform/sti/bdisp/bdisp-hw.c c->dst_yuv = (dst->fmt->pixelformat == V4L2_PIX_FMT_NV12) || c 684 drivers/media/platform/sti/bdisp/bdisp-hw.c c->dst_420 = c->dst_yuv; c 686 drivers/media/platform/sti/bdisp/bdisp-hw.c c->cconv = (c->src_yuv != c->dst_yuv); c 688 drivers/media/platform/sti/bdisp/bdisp-hw.c if (bdisp_hw_get_hv_inc(ctx, &c->h_inc, &c->v_inc)) { c 694 drivers/media/platform/sti/bdisp/bdisp-hw.c if (c->src_interlaced) c 695 drivers/media/platform/sti/bdisp/bdisp-hw.c c->v_inc /= 2; c 697 drivers/media/platform/sti/bdisp/bdisp-hw.c if ((c->h_inc != (1 << 10)) || (c->v_inc != (1 << 10))) c 698 drivers/media/platform/sti/bdisp/bdisp-hw.c c->scale = true; c 700 drivers/media/platform/sti/bdisp/bdisp-hw.c c->scale = false; c 30 drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c static inline const char *dvb_card_str(unsigned int c) c 32 drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c switch (c) { c 20 drivers/media/platform/sti/hva/hva.h #define ctx_to_dev(c) (c->hva_dev->dev) c 22 drivers/media/platform/sti/hva/hva.h #define ctx_to_hdev(c) (c->hva_dev) c 1018 drivers/media/platform/stm32/stm32-dcmi.c struct v4l2_rect c = dcmi->crop; c 1029 drivers/media/platform/stm32/stm32-dcmi.c v4l2_rect_map_inside(&c, &max_rect); c 1030 drivers/media/platform/stm32/stm32-dcmi.c c.top = clamp_t(s32, c.top, 0, pix->height - c.height); c 1031 drivers/media/platform/stm32/stm32-dcmi.c c.left = clamp_t(s32, c.left, 0, pix->width - c.width); c 1032 drivers/media/platform/stm32/stm32-dcmi.c dcmi->crop = c; c 73 drivers/media/platform/vivid/vivid-kthread-cap.c struct v4l2_rect *r = &dev->clips_out[i].c; c 578 drivers/media/platform/vivid/vivid-kthread-cap.c struct v4l2_rect *r = &dev->clips_cap[i].c; c 1122 drivers/media/platform/vivid/vivid-vid-cap.c struct v4l2_rect *r = &dev->try_clips_cap[i].c; c 1134 drivers/media/platform/vivid/vivid-vid-cap.c struct v4l2_rect *r1 = &dev->try_clips_cap[i].c; c 1137 drivers/media/platform/vivid/vivid-vid-cap.c struct v4l2_rect *r2 = &dev->try_clips_cap[j].c; c 898 drivers/media/platform/vivid/vivid-vid-out.c struct v4l2_rect *r = &dev->try_clips_out[i].c; c 910 drivers/media/platform/vivid/vivid-vid-out.c struct v4l2_rect *r1 = &dev->try_clips_out[i].c; c 913 drivers/media/platform/vivid/vivid-vid-out.c struct v4l2_rect *r2 = &dev->try_clips_out[j].c; c 483 drivers/media/radio/radio-tea5777.c static int tea575x_s_ctrl(struct v4l2_ctrl *c) c 486 drivers/media/radio/radio-tea5777.c container_of(c->handler, struct radio_tea5777, ctrl_handler); c 488 drivers/media/radio/radio-tea5777.c switch (c->id) { c 490 drivers/media/radio/radio-tea5777.c if (c->val) c 1114 drivers/media/radio/si4713/si4713.c int c; c 1139 drivers/media/radio/si4713/si4713.c for (c = 1; !ret && c < ctrl->ncontrols; c++) { c 1140 drivers/media/radio/si4713/si4713.c ctrl = ctrl->cluster[c]; c 575 drivers/media/tuners/e4000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 577 drivers/media/tuners/e4000.c dev->f_frequency = c->frequency; c 578 drivers/media/tuners/e4000.c dev->f_bandwidth = c->bandwidth_hz; c 320 drivers/media/tuners/fc2580.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 322 drivers/media/tuners/fc2580.c dev->f_frequency = c->frequency; c 323 drivers/media/tuners/fc2580.c dev->f_bandwidth = c->bandwidth_hz; c 210 drivers/media/tuners/it913x.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 218 drivers/media/tuners/it913x.c dev->role, c->frequency, c->bandwidth_hz); c 225 drivers/media/tuners/it913x.c if (c->frequency <= 74000000) { c 228 drivers/media/tuners/it913x.c } else if (c->frequency <= 111000000) { c 231 drivers/media/tuners/it913x.c } else if (c->frequency <= 148000000) { c 234 drivers/media/tuners/it913x.c } else if (c->frequency <= 222000000) { c 237 drivers/media/tuners/it913x.c } else if (c->frequency <= 296000000) { c 240 drivers/media/tuners/it913x.c } else if (c->frequency <= 445000000) { c 243 drivers/media/tuners/it913x.c } else if (c->frequency <= dev->fn_min) { c 246 drivers/media/tuners/it913x.c } else if (c->frequency <= 950000000) { c 273 drivers/media/tuners/it913x.c t_cal_freq = (c->frequency / 1000) * n_div * dev->fdiv; c 286 drivers/media/tuners/it913x.c if (c->frequency <= 440000000) { c 289 drivers/media/tuners/it913x.c } else if (c->frequency <= 484000000) { c 292 drivers/media/tuners/it913x.c } else if (c->frequency <= 533000000) { c 295 drivers/media/tuners/it913x.c } else if (c->frequency <= 587000000) { c 298 drivers/media/tuners/it913x.c } else if (c->frequency <= 645000000) { c 301 drivers/media/tuners/it913x.c } else if (c->frequency <= 710000000) { c 304 drivers/media/tuners/it913x.c } else if (c->frequency <= 782000000) { c 307 drivers/media/tuners/it913x.c } else if (c->frequency <= 860000000) { c 310 drivers/media/tuners/it913x.c } else if (c->frequency <= 1492000000) { c 313 drivers/media/tuners/it913x.c } else if (c->frequency <= 1685000000) { c 326 drivers/media/tuners/it913x.c if (c->bandwidth_hz <= 5000000) c 328 drivers/media/tuners/it913x.c else if (c->bandwidth_hz <= 6000000) c 330 drivers/media/tuners/it913x.c else if (c->bandwidth_hz <= 7000000) c 27 drivers/media/tuners/m88rs6000t.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 40 drivers/media/tuners/m88rs6000t.c if (c->symbol_rate > 45010000) { c 51 drivers/media/tuners/m88rs6000t.c if (c->delivery_system == SYS_DVBS) c 388 drivers/media/tuners/m88rs6000t.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 395 drivers/media/tuners/m88rs6000t.c c->frequency, c->symbol_rate); c 397 drivers/media/tuners/m88rs6000t.c if (c->symbol_rate < 5000000) c 402 drivers/media/tuners/m88rs6000t.c realFreq = c->frequency + lpf_offset_KHz; c 408 drivers/media/tuners/m88rs6000t.c ret = m88rs6000t_set_bb(dev, c->symbol_rate / 1000, lpf_offset_KHz); c 252 drivers/media/tuners/max2165.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 255 drivers/media/tuners/max2165.c switch (c->bandwidth_hz) { c 258 drivers/media/tuners/max2165.c priv->frequency = c->frequency; c 262 drivers/media/tuners/max2165.c c->bandwidth_hz); c 266 drivers/media/tuners/max2165.c dprintk("%s() frequency=%d\n", __func__, c->frequency); c 270 drivers/media/tuners/max2165.c max2165_set_bandwidth(priv, c->bandwidth_hz); c 204 drivers/media/tuners/mc44s803.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 208 drivers/media/tuners/mc44s803.c priv->frequency = c->frequency; c 213 drivers/media/tuners/mc44s803.c n1 = (c->frequency + MC44S803_IF1 + 500000) / 1000000; c 216 drivers/media/tuners/mc44s803.c freq = freq - c->frequency; c 190 drivers/media/tuners/mt2060.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 211 drivers/media/tuners/mt2060.c freq = c->frequency / 1000; /* Hz -> kHz */ c 688 drivers/media/tuners/mt2063.c const u32 c = d - pAS_Info->f_out_bw; c 708 drivers/media/tuners/mt2063.c gc_Scale = max((u32) gcd(lo_gcd, c), f_Scale); c 732 drivers/media/tuners/mt2063.c ((c + hgcs) / gc_Scale)) / ((f_LO2 + hgcs) / gc_Scale); c 740 drivers/media/tuners/mt2063.c *fp = ((f_Spur - (s32) c) / (mc - n)) + 1; c 763 drivers/media/tuners/mt2063.c ((c + hgcs) / gc_Scale)) / ((f_LO2 + hgcs) / gc_Scale); c 772 drivers/media/tuners/mt2063.c *fm = (-(f_Spur + (s32) c) / (ma - n)) + 1; c 2097 drivers/media/tuners/mt2063.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 2114 drivers/media/tuners/mt2063.c if (c->bandwidth_hz == 0) c 2116 drivers/media/tuners/mt2063.c if (c->bandwidth_hz <= 6000000) c 2118 drivers/media/tuners/mt2063.c else if (c->bandwidth_hz <= 7000000) c 2123 drivers/media/tuners/mt2063.c switch (c->delivery_system) { c 2148 drivers/media/tuners/mt2063.c c->frequency, ch_bw, pict2chanb_vsb); c 2150 drivers/media/tuners/mt2063.c status = MT2063_Tune(state, (c->frequency + (pict2chanb_vsb + (ch_bw / 2)))); c 2155 drivers/media/tuners/mt2063.c state->frequency = c->frequency; c 83 drivers/media/tuners/mt2131.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 95 drivers/media/tuners/mt2131.c freq = c->frequency / 1000; /* Hz -> kHz */ c 118 drivers/media/tuners/mt2266.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 134 drivers/media/tuners/mt2266.c priv->frequency = c->frequency; c 140 drivers/media/tuners/mt2266.c switch (c->bandwidth_hz) { c 155 drivers/media/tuners/mt2266.c priv->bandwidth = c->bandwidth_hz; c 30 drivers/media/tuners/mxl301rf.c static struct mxl301rf_state *cfg_to_state(struct mxl301rf_config *c) c 32 drivers/media/tuners/mxl301rf.c return container_of(c, struct mxl301rf_state, cfg); c 3959 drivers/media/tuners/mxl5005s.c struct mxl5005s_config *c = state->config; c 3966 drivers/media/tuners/mxl5005s.c c->mod_mode, c 3967 drivers/media/tuners/mxl5005s.c c->if_mode, c 3969 drivers/media/tuners/mxl5005s.c c->if_freq, c 3970 drivers/media/tuners/mxl5005s.c c->xtal_freq, c 3971 drivers/media/tuners/mxl5005s.c c->agc_mode, c 3972 drivers/media/tuners/mxl5005s.c c->top, c 3973 drivers/media/tuners/mxl5005s.c c->output_load, c 3974 drivers/media/tuners/mxl5005s.c c->clock_out, c 3975 drivers/media/tuners/mxl5005s.c c->div_out, c 3976 drivers/media/tuners/mxl5005s.c c->cap_select, c 3977 drivers/media/tuners/mxl5005s.c c->rssi_enable, c 3979 drivers/media/tuners/mxl5005s.c c->tracking_filter); c 3987 drivers/media/tuners/mxl5005s.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 3988 drivers/media/tuners/mxl5005s.c u32 delsys = c->delivery_system; c 3989 drivers/media/tuners/mxl5005s.c u32 bw = c->bandwidth_hz; c 4032 drivers/media/tuners/mxl5005s.c dprintk(1, "%s() freq=%d\n", __func__, c->frequency); c 4033 drivers/media/tuners/mxl5005s.c ret = mxl5005s_SetRfFreqHz(fe, c->frequency); c 601 drivers/media/tuners/mxl5007t.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 602 drivers/media/tuners/mxl5007t.c u32 delsys = c->delivery_system; c 607 drivers/media/tuners/mxl5007t.c u32 freq = c->frequency; c 621 drivers/media/tuners/mxl5007t.c switch (c->bandwidth_hz) { c 654 drivers/media/tuners/mxl5007t.c state->bandwidth = c->bandwidth_hz; c 59 drivers/media/tuners/qm1d1c0042.c static struct qm1d1c0042_state *cfg_to_state(struct qm1d1c0042_config *c) c 61 drivers/media/tuners/qm1d1c0042.c return container_of(c, struct qm1d1c0042_state, cfg); c 46 drivers/media/tuners/qt1010.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 106 drivers/media/tuners/qt1010.c freq = c->frequency; c 280 drivers/media/tuners/qt1010.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 361 drivers/media/tuners/qt1010.c if (!c->frequency) c 362 drivers/media/tuners/qt1010.c c->frequency = 545000000; /* Sigmatek DVB-110 545000000 */ c 2209 drivers/media/tuners/r820t.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 2214 drivers/media/tuners/r820t.c __func__, c->delivery_system, c->frequency, c->bandwidth_hz); c 2220 drivers/media/tuners/r820t.c bw = (c->bandwidth_hz + 500000) / 1000000; c 2224 drivers/media/tuners/r820t.c rc = generic_set_freq(fe, c->frequency, bw, c 2225 drivers/media/tuners/r820t.c V4L2_TUNER_DIGITAL_TV, 0, c->delivery_system); c 73 drivers/media/tuners/si2157.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 230 drivers/media/tuners/si2157.c c->strength.len = 1; c 231 drivers/media/tuners/si2157.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 276 drivers/media/tuners/si2157.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 284 drivers/media/tuners/si2157.c c->delivery_system, c->frequency, c->bandwidth_hz); c 291 drivers/media/tuners/si2157.c if (c->bandwidth_hz <= 6000000) c 293 drivers/media/tuners/si2157.c else if (c->bandwidth_hz <= 7000000) c 295 drivers/media/tuners/si2157.c else if (c->bandwidth_hz <= 8000000) c 300 drivers/media/tuners/si2157.c switch (c->delivery_system) { c 358 drivers/media/tuners/si2157.c cmd.args[4] = (c->frequency >> 0) & 0xff; c 359 drivers/media/tuners/si2157.c cmd.args[5] = (c->frequency >> 8) & 0xff; c 360 drivers/media/tuners/si2157.c cmd.args[6] = (c->frequency >> 16) & 0xff; c 361 drivers/media/tuners/si2157.c cmd.args[7] = (c->frequency >> 24) & 0xff; c 401 drivers/media/tuners/si2157.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 414 drivers/media/tuners/si2157.c c->strength.stat[0].scale = FE_SCALE_DECIBEL; c 415 drivers/media/tuners/si2157.c c->strength.stat[0].svalue = (s8) cmd.args[3] * 1000; c 420 drivers/media/tuners/si2157.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 22 drivers/media/tuners/tda18212.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 52 drivers/media/tuners/tda18212.c c->delivery_system, c->frequency, c 53 drivers/media/tuners/tda18212.c c->bandwidth_hz); c 58 drivers/media/tuners/tda18212.c switch (c->delivery_system) { c 68 drivers/media/tuners/tda18212.c switch (c->bandwidth_hz) { c 87 drivers/media/tuners/tda18212.c switch (c->bandwidth_hz) { c 131 drivers/media/tuners/tda18212.c buf[4] = ((c->frequency / 1000) >> 16) & 0xff; c 132 drivers/media/tuners/tda18212.c buf[5] = ((c->frequency / 1000) >> 8) & 0xff; c 133 drivers/media/tuners/tda18212.c buf[6] = ((c->frequency / 1000) >> 0) & 0xff; c 114 drivers/media/tuners/tda18218.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 115 drivers/media/tuners/tda18218.c u32 bw = c->bandwidth_hz; c 152 drivers/media/tuners/tda18218.c LO_Frac = c->frequency + priv->if_frequency; c 253 drivers/media/tuners/tda18250.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 263 drivers/media/tuners/tda18250.c utmp = ((c->frequency < 100000000) && c 264 drivers/media/tuners/tda18250.c ((c->delivery_system == SYS_DVBC_ANNEX_A) || c 265 drivers/media/tuners/tda18250.c (c->delivery_system == SYS_DVBC_ANNEX_C)) && c 266 drivers/media/tuners/tda18250.c (c->bandwidth_hz == 6000000)) ? 0x80 : 0x00; c 272 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 279 drivers/media/tuners/tda18250.c switch (c->bandwidth_hz) { c 281 drivers/media/tuners/tda18250.c utmp = (c->frequency < 800000000) ? 6 : 4; c 284 drivers/media/tuners/tda18250.c utmp = (c->frequency < 100000000) ? 2 : 3; c 295 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 299 drivers/media/tuners/tda18250.c utmp = (c->frequency < 320000000) ? 20 : 16; c 300 drivers/media/tuners/tda18250.c utmp2 = (c->frequency < 320000000) ? 22 : 18; c 303 drivers/media/tuners/tda18250.c switch (c->bandwidth_hz) { c 305 drivers/media/tuners/tda18250.c if (c->frequency < 600000000) { c 308 drivers/media/tuners/tda18250.c } else if (c->frequency < 800000000) { c 317 drivers/media/tuners/tda18250.c utmp = (c->frequency < 320000000) ? 16 : 18; c 318 drivers/media/tuners/tda18250.c utmp2 = (c->frequency < 320000000) ? 18 : 20; c 333 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 348 drivers/media/tuners/tda18250.c (c->frequency > 800000000) ? 0x40 : 0x20); c 353 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 357 drivers/media/tuners/tda18250.c utmp = (c->frequency < 320000000) ? 5 : 7; c 358 drivers/media/tuners/tda18250.c utmp2 = (c->frequency < 320000000) ? 10 : 12; c 370 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 374 drivers/media/tuners/tda18250.c if (c->bandwidth_hz == 8000000) c 377 drivers/media/tuners/tda18250.c utmp = (c->frequency < 320000000) ? 0x04 : 0x02; c 380 drivers/media/tuners/tda18250.c if (c->bandwidth_hz == 6000000) c 381 drivers/media/tuners/tda18250.c utmp = ((c->frequency > 172544000) && c 382 drivers/media/tuners/tda18250.c (c->frequency < 320000000)) ? 0x04 : 0x02; c 384 drivers/media/tuners/tda18250.c utmp = ((c->frequency > 320000000) && c 385 drivers/media/tuners/tda18250.c (c->frequency < 600000000)) ? 0x02 : 0x04; c 392 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 399 drivers/media/tuners/tda18250.c utmp = (c->frequency < 600000000) ? 0 : 3; c 407 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 411 drivers/media/tuners/tda18250.c if (c->bandwidth_hz == 8000000) c 433 drivers/media/tuners/tda18250.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 447 drivers/media/tuners/tda18250.c fvco = lopd * scale * ((c->frequency / 1000) + dev->if_frequency); c 497 drivers/media/tuners/tda18250.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 534 drivers/media/tuners/tda18250.c c->delivery_system, c->frequency, c->bandwidth_hz); c 537 drivers/media/tuners/tda18250.c switch (c->delivery_system) { c 544 drivers/media/tuners/tda18250.c if (c->bandwidth_hz == 0) { c 547 drivers/media/tuners/tda18250.c } else if (c->bandwidth_hz <= 6000000) { c 550 drivers/media/tuners/tda18250.c } else if (c->bandwidth_hz <= 7000000) { c 553 drivers/media/tuners/tda18250.c } else if (c->bandwidth_hz <= 8000000) { c 563 drivers/media/tuners/tda18250.c if (c->bandwidth_hz == 0) { c 566 drivers/media/tuners/tda18250.c } else if (c->bandwidth_hz <= 6000000) { c 569 drivers/media/tuners/tda18250.c } else if (c->bandwidth_hz <= 8000000) { c 580 drivers/media/tuners/tda18250.c c->delivery_system); c 616 drivers/media/tuners/tda18250.c buf[0] = ((c->frequency / 1000) >> 16) & 0xff; c 617 drivers/media/tuners/tda18250.c buf[1] = ((c->frequency / 1000) >> 8) & 0xff; c 618 drivers/media/tuners/tda18250.c buf[2] = ((c->frequency / 1000) >> 0) & 0xff; c 922 drivers/media/tuners/tda18271-fe.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 923 drivers/media/tuners/tda18271-fe.c u32 delsys = c->delivery_system; c 924 drivers/media/tuners/tda18271-fe.c u32 bw = c->bandwidth_hz; c 925 drivers/media/tuners/tda18271-fe.c u32 freq = c->frequency; c 144 drivers/media/tuners/tda827x.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 155 drivers/media/tuners/tda827x.c if (c->bandwidth_hz == 0) { c 157 drivers/media/tuners/tda827x.c } else if (c->bandwidth_hz <= 6000000) { c 159 drivers/media/tuners/tda827x.c } else if (c->bandwidth_hz <= 7000000) { c 164 drivers/media/tuners/tda827x.c tuner_freq = c->frequency; c 208 drivers/media/tuners/tda827x.c priv->frequency = c->frequency; c 209 drivers/media/tuners/tda827x.c priv->bandwidth = c->bandwidth_hz; c 503 drivers/media/tuners/tda827x.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 519 drivers/media/tuners/tda827x.c if (c->bandwidth_hz == 0) { c 521 drivers/media/tuners/tda827x.c } else if (c->bandwidth_hz <= 6000000) { c 523 drivers/media/tuners/tda827x.c } else if (c->bandwidth_hz <= 7000000) { c 528 drivers/media/tuners/tda827x.c tuner_freq = c->frequency; c 530 drivers/media/tuners/tda827x.c switch (c->delivery_system) { c 636 drivers/media/tuners/tda827x.c priv->frequency = c->frequency; c 637 drivers/media/tuners/tda827x.c priv->bandwidth = c->bandwidth_hz; c 52 drivers/media/tuners/tda9887.c unsigned char c; c 143 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 154 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 165 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 176 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 187 drivers/media/tuners/tda9887.c .c = ( cTopDefault), c 195 drivers/media/tuners/tda9887.c .c = ( cTopDefault), c 205 drivers/media/tuners/tda9887.c .c = ( cTopDefault), c 214 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 225 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 236 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 249 drivers/media/tuners/tda9887.c .c = ( cDeemphasisOFF | c 261 drivers/media/tuners/tda9887.c .c = ( cDeemphasisON | c 428 drivers/media/tuners/tda9887.c buf[2] = norm->c; c 79 drivers/media/tuners/tua9001.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 86 drivers/media/tuners/tua9001.c c->delivery_system, c->frequency, c->bandwidth_hz); c 88 drivers/media/tuners/tua9001.c switch (c->delivery_system) { c 90 drivers/media/tuners/tua9001.c switch (c->bandwidth_hz) { c 116 drivers/media/tuners/tua9001.c data[1].val = div_u64((u64) (c->frequency - 150000000) * 48, 1000000); c 890 drivers/media/tuners/tuner-simple.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 891 drivers/media/tuners/tuner-simple.c u32 delsys = c->delivery_system; c 892 drivers/media/tuners/tuner-simple.c u32 bw = c->bandwidth_hz; c 899 drivers/media/tuners/tuner-simple.c frequency = simple_dvb_configure(fe, buf+1, delsys, c->frequency, bw); c 906 drivers/media/tuners/tuner-simple.c priv->bandwidth = c->bandwidth_hz; c 913 drivers/media/tuners/tuner-simple.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 914 drivers/media/tuners/tuner-simple.c u32 delsys = c->delivery_system; c 915 drivers/media/tuners/tuner-simple.c u32 bw = c->bandwidth_hz; c 916 drivers/media/tuners/tuner-simple.c u32 freq = c->frequency; c 1190 drivers/media/tuners/tuner-xc2028.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1191 drivers/media/tuners/tuner-xc2028.c u32 delsys = c->delivery_system; c 1192 drivers/media/tuners/tuner-xc2028.c u32 bw = c->bandwidth_hz; c 1245 drivers/media/tuners/tuner-xc2028.c if (c->frequency < 470000000) c 1252 drivers/media/tuners/tuner-xc2028.c if (c->frequency < 470000000) c 1282 drivers/media/tuners/tuner-xc2028.c return generic_set_freq(fe, c->frequency, c 1140 drivers/media/tuners/xc4000.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 1141 drivers/media/tuners/xc4000.c u32 delsys = c->delivery_system; c 1142 drivers/media/tuners/xc4000.c u32 bw = c->bandwidth_hz; c 1147 drivers/media/tuners/xc4000.c dprintk(1, "%s() frequency=%d (Hz)\n", __func__, c->frequency); c 1170 drivers/media/tuners/xc4000.c if (c->frequency < 400000000) { c 1198 drivers/media/tuners/xc4000.c priv->freq_hz = c->frequency - priv->freq_offset; c 1207 drivers/media/tuners/xc4000.c priv->bandwidth = c->bandwidth_hz; c 369 drivers/media/usb/as102/as102_usb_drv.c as102_dev->bus_adap.cmd = &as102_dev->bus_adap.token.usb.c; c 44 drivers/media/usb/as102/as102_usb_drv.h struct as10x_cmd_t c; c 462 drivers/media/usb/as102/as10x_cmd.h struct as10x_cmd_t c; c 345 drivers/media/usb/au0828/au0828-i2c.c static void do_i2c_scan(char *name, struct i2c_client *c) c 351 drivers/media/usb/au0828/au0828-i2c.c c->addr = i; c 352 drivers/media/usb/au0828/au0828-i2c.c rc = i2c_master_recv(c, &buf, 0); c 26 drivers/media/usb/cx231xx/cx231xx-input.c rc = i2c_master_recv(ir->c, &cmd, 1); c 218 drivers/media/usb/dvb-usb-v2/lmedm04.c struct dtv_frontend_properties *c; c 224 drivers/media/usb/dvb-usb-v2/lmedm04.c c = &fe->dtv_property_cache; c 226 drivers/media/usb/dvb-usb-v2/lmedm04.c c->block_count.len = 1; c 227 drivers/media/usb/dvb-usb-v2/lmedm04.c c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 228 drivers/media/usb/dvb-usb-v2/lmedm04.c c->block_error.len = 1; c 229 drivers/media/usb/dvb-usb-v2/lmedm04.c c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 230 drivers/media/usb/dvb-usb-v2/lmedm04.c c->post_bit_count.len = 1; c 231 drivers/media/usb/dvb-usb-v2/lmedm04.c c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 232 drivers/media/usb/dvb-usb-v2/lmedm04.c c->post_bit_error.len = 1; c 233 drivers/media/usb/dvb-usb-v2/lmedm04.c c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 236 drivers/media/usb/dvb-usb-v2/lmedm04.c c->strength.len = 1; c 237 drivers/media/usb/dvb-usb-v2/lmedm04.c c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 238 drivers/media/usb/dvb-usb-v2/lmedm04.c c->cnr.len = 1; c 239 drivers/media/usb/dvb-usb-v2/lmedm04.c c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c 258 drivers/media/usb/dvb-usb-v2/lmedm04.c c->strength.len = 1; c 259 drivers/media/usb/dvb-usb-v2/lmedm04.c c->strength.stat[0].scale = FE_SCALE_RELATIVE; c 260 drivers/media/usb/dvb-usb-v2/lmedm04.c c->strength.stat[0].uvalue = (u64)s_tmp; c 262 drivers/media/usb/dvb-usb-v2/lmedm04.c c->cnr.len = 1; c 263 drivers/media/usb/dvb-usb-v2/lmedm04.c c->cnr.stat[0].scale = FE_SCALE_RELATIVE; c 264 drivers/media/usb/dvb-usb-v2/lmedm04.c c->cnr.stat[0].uvalue = (u64)c_tmp; c 896 drivers/media/usb/dvb-usb-v2/lmedm04.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 902 drivers/media/usb/dvb-usb-v2/lmedm04.c if (c->strength.stat[0].scale == FE_SCALE_RELATIVE) c 903 drivers/media/usb/dvb-usb-v2/lmedm04.c *strength = (u16)c->strength.stat[0].uvalue; c 912 drivers/media/usb/dvb-usb-v2/lmedm04.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 918 drivers/media/usb/dvb-usb-v2/lmedm04.c if (c->cnr.stat[0].scale == FE_SCALE_RELATIVE) c 919 drivers/media/usb/dvb-usb-v2/lmedm04.c *snr = (u16)c->cnr.stat[0].uvalue; c 264 drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 265 drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c u32 delsys = c->delivery_system; c 281 drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c switch (c->bandwidth_hz) { c 300 drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c ret = mxl1x1sf_tune_rf(fe, c->frequency, bw); c 304 drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c state->frequency = c->frequency; c 305 drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c state->bandwidth = c->bandwidth_hz; c 93 drivers/media/usb/dvb-usb/pctv452e.c u8 c; /* transaction counter, wraps around... */ c 116 drivers/media/usb/dvb-usb/pctv452e.c id = state->c++; c 419 drivers/media/usb/dvb-usb/pctv452e.c id = state->c++; c 535 drivers/media/usb/dvb-usb/pctv452e.c b0[1] = state->c++; c 544 drivers/media/usb/dvb-usb/pctv452e.c b0[1] = state->c++; c 571 drivers/media/usb/dvb-usb/pctv452e.c id = state->c++; c 730 drivers/media/usb/em28xx/em28xx-dvb.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 738 drivers/media/usb/em28xx/em28xx-dvb.c if (c->lna == 1) c 752 drivers/media/usb/em28xx/em28xx-dvb.c KBUILD_MODNAME, c->lna); c 759 drivers/media/usb/em28xx/em28xx-dvb.c struct dtv_frontend_properties *c = &fe->dtv_property_cache; c 764 drivers/media/usb/em28xx/em28xx-dvb.c if (c->lna == 1) c 616 drivers/media/usb/em28xx/em28xx-i2c.c unsigned char c; c 620 drivers/media/usb/em28xx/em28xx-i2c.c c = (char)len; c 623 drivers/media/usb/em28xx/em28xx-i2c.c c = *buf++; c 625 drivers/media/usb/em28xx/em28xx-i2c.c l = (l << 8) | c; c 421 drivers/media/usb/go7007/go7007-fw.c CODE_GEN(c, buf + 6); c 445 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, frame == PFRAME ? 0x2 : 0x3, 13); c 446 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0xffff, 16); c 447 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 0x7 : 0x4, 4); c 449 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 0x7 : 0x4, 4); c 451 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, 4); /* Is this supposed to be here?? */ c 452 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, 3); /* What is this?? */ c 454 drivers/media/usb/go7007/go7007-fw.c j = 8 - (CODE_LENGTH(c) % 8); c 456 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, j); c 459 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 24); c 460 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0xb5, 8); c 461 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x844, 12); c 462 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, frame == PFRAME ? 0xff : 0x44, 8); c 464 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, pict_struct, 4); c 466 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x000, 11); c 468 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x200, 11); c 470 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x3, 4); c 471 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x20c, 11); c 474 drivers/media/usb/go7007/go7007-fw.c j = 8 - (CODE_LENGTH(c) % 8); c 476 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, j); c 480 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 1, 24); c 481 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, i + 1, 8); c 482 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x2, 6); c 483 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 1); c 484 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, mb_code, mb_len); c 486 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 2); c 487 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); c 490 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x3, 2); c 492 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); c 494 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x3, 2); c 496 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x8, 11); c 497 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, addrinctab[j][0], addrinctab[j][1]); c 498 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, mb_code, mb_len); c 500 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 2); c 501 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); c 504 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x3, 2); c 506 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); c 508 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x3, 2); c 511 drivers/media/usb/go7007/go7007-fw.c j = 8 - (CODE_LENGTH(c) % 8); c 513 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, j); c 516 drivers/media/usb/go7007/go7007-fw.c i = CODE_LENGTH(c) + 4 * 8; c 527 drivers/media/usb/go7007/go7007-fw.c CODE_GEN(c, buf + 6); c 575 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->width, 12); c 576 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->height, 12); c 577 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, aspect_ratio, 4); c 578 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, picture_rate, 4); c 579 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 20000 : 0x3ffff, 18); c 580 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 1, 1); c 581 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 112 : 20, 10); c 582 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, 3); c 585 drivers/media/usb/go7007/go7007-fw.c i = 8 - (CODE_LENGTH(c) % 8); c 587 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, i); c 590 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 24); c 591 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0xb5, 8); c 592 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x148, 12); c 594 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x20001, 20); c 596 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0xa0001, 20); c 597 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, 16); c 600 drivers/media/usb/go7007/go7007-fw.c i = 8 - (CODE_LENGTH(c) % 8); c 602 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, i); c 605 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 24); c 606 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0xb52, 12); c 607 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->standard == GO7007_STD_NTSC ? 2 : 1, 3); c 608 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x105, 9); c 609 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x505, 16); c 610 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->width, 14); c 611 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 1, 1); c 612 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->height, 14); c 615 drivers/media/usb/go7007/go7007-fw.c i = 8 - (CODE_LENGTH(c) % 8); c 617 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, i); c 621 drivers/media/usb/go7007/go7007-fw.c i = CODE_LENGTH(c) + 4 * 8; c 721 drivers/media/usb/go7007/go7007-fw.c CODE_GEN(c, buf + 6); c 724 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, frame == PFRAME ? 0x1 : 0x2, 2); c 726 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 1); c 727 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 2); c 728 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, vti_bitlen(go)); c 729 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x3, 2); c 731 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, 1); c 732 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0xc, 11); c 734 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x4, 3); c 739 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 1); c 742 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x47, 8); c 745 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x27, 7); c 748 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x5f, 8); c 757 drivers/media/usb/go7007/go7007-fw.c i = 8 - (CODE_LENGTH(c) % 8); c 758 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, 1); c 759 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, (1 << (i - 1)) - 1, i - 1); c 761 drivers/media/usb/go7007/go7007-fw.c i = CODE_LENGTH(c) + 4 * 8; c 779 drivers/media/usb/go7007/go7007-fw.c CODE_GEN(c, buf + 2 + sizeof(head)); c 794 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x191, 17); c 795 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, aspect_ratio, 4); c 796 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x1, 4); c 797 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, fps, 16); c 798 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x3, 2); c 799 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 1001, vti_bitlen(go)); c 800 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 1, 1); c 801 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->width, 13); c 802 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 1, 1); c 803 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, go->height, 13); c 804 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0x2830, 14); c 807 drivers/media/usb/go7007/go7007-fw.c i = 8 - (CODE_LENGTH(c) % 8); c 808 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, 0, 1); c 809 drivers/media/usb/go7007/go7007-fw.c CODE_ADD(c, (1 << (i - 1)) - 1, i - 1); c 811 drivers/media/usb/go7007/go7007-fw.c i = CODE_LENGTH(c) + sizeof(head) * 8; c 438 drivers/media/usb/gspca/cpia1.c u8 a, u8 b, u8 c, u8 d) c 467 drivers/media/usb/gspca/cpia1.c cmd[4] = c; c 560 drivers/media/usb/gspca/cpia1.c u8 a, u8 b, u8 c, u8 d, c 570 drivers/media/usb/gspca/cpia1.c cmd[4] = c; c 397 drivers/media/usb/gspca/gl860/gl860-mi2020.c u8 c; c 399 drivers/media/usb/gspca/gl860/gl860-mi2020.c ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &c); c 400 drivers/media/usb/gspca/gl860/gl860-mi2020.c ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &c); c 406 drivers/media/usb/gspca/gl860/gl860-mi2020.c ctrl_in(gspca_dev, 0xc0, 2, 0x7a00, 0x8030, 1, &c); c 457 drivers/media/usb/gspca/gl860/gl860-mi2020.c u8 c; c 567 drivers/media/usb/gspca/gl860/gl860-mi2020.c ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, &c); c 325 drivers/media/usb/hdpvr/hdpvr-video.c uint c = 0; c 351 drivers/media/usb/hdpvr/hdpvr-video.c while (buf && ++c < 500 && c 358 drivers/media/usb/hdpvr/hdpvr-video.c "%2d: got %d bytes\n", c, actual_length); c 362 drivers/media/usb/hdpvr/hdpvr-video.c "used %d urbs to empty device buffers\n", c-1); c 353 drivers/media/usb/pwc/pwc-dec23.c const int *c = src; c 356 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[0] >> scalebits]; c 357 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[1] >> scalebits]; c 358 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[2] >> scalebits]; c 359 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[3] >> scalebits]; c 362 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[4] >> scalebits]; c 363 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[5] >> scalebits]; c 364 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[6] >> scalebits]; c 365 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[7] >> scalebits]; c 368 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[8] >> scalebits]; c 369 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[9] >> scalebits]; c 370 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[10] >> scalebits]; c 371 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[11] >> scalebits]; c 374 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[12] >> scalebits]; c 375 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[13] >> scalebits]; c 376 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[14] >> scalebits]; c 377 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[15] >> scalebits]; c 380 drivers/media/usb/pwc/pwc-dec23.c const int *c = src; c 382 drivers/media/usb/pwc/pwc-dec23.c for (i = 0; i < 4; i++, c++) c 383 drivers/media/usb/pwc/pwc-dec23.c *d++ = CLAMP((*c) >> scalebits); c 386 drivers/media/usb/pwc/pwc-dec23.c for (i = 0; i < 4; i++, c++) c 387 drivers/media/usb/pwc/pwc-dec23.c *d++ = CLAMP((*c) >> scalebits); c 390 drivers/media/usb/pwc/pwc-dec23.c for (i = 0; i < 4; i++, c++) c 391 drivers/media/usb/pwc/pwc-dec23.c *d++ = CLAMP((*c) >> scalebits); c 394 drivers/media/usb/pwc/pwc-dec23.c for (i = 0; i < 4; i++, c++) c 395 drivers/media/usb/pwc/pwc-dec23.c *d++ = CLAMP((*c) >> scalebits); c 408 drivers/media/usb/pwc/pwc-dec23.c const int *c = src; c 411 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[0] >> scalebits]; c 412 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[4] >> scalebits]; c 413 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[1] >> scalebits]; c 414 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[5] >> scalebits]; c 415 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[2] >> scalebits]; c 416 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[6] >> scalebits]; c 417 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[3] >> scalebits]; c 418 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[7] >> scalebits]; c 421 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[12] >> scalebits]; c 422 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[8] >> scalebits]; c 423 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[13] >> scalebits]; c 424 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[9] >> scalebits]; c 425 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[14] >> scalebits]; c 426 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[10] >> scalebits]; c 427 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[15] >> scalebits]; c 428 drivers/media/usb/pwc/pwc-dec23.c *d++ = cm[c[11] >> scalebits]; c 108 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 c; /* transaction counter, wraps around... */ c 199 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 id = ++ttusb->c; c 309 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b[1] = ++ttusb->c; c 317 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b[1] = ++ttusb->c; c 326 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b[1] = ++ttusb->c; c 347 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b[] = { 0xaa, ++ttusb->c, 0x22, 4, chan_id, filter_type, c 359 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b[] = { 0xaa, ++ttusb->c, 0x23, 1, channel_id }; c 388 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b[] = { 0xaa, ++ttusb->c, 0x25, 1, filter_id }; c 397 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b0[] = { 0xaa, ++ttusb->c, 0x15, 1, 0 }; c 398 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b1[] = { 0xaa, ++ttusb->c, 0x15, 1, 1 }; c 399 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b2[] = { 0xaa, ++ttusb->c, 0x32, 1, 0 }; c 402 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c { 0xaa, ++ttusb->c, 0x31, 5, 0x10, 0x02, 0x01, 0x00, 0x1e }; c 404 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c { 0x55, ttusb->c, 0x31, 4, 0x10, 0x02, 0x01, 0x00, 0x1e }; c 406 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 get_version[] = { 0xaa, ++ttusb->c, 0x17, 5, 0, 0, 0, 0, 0 }; c 408 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c { 0xaa, ++ttusb->c, 0x26, 28, 0, 0, 0, 0, 0 }; c 472 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b[12] = { 0xaa, ++ttusb->c, 0x18 }; c 494 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b[] = { 0xaa, ++ttusb->c, 0x16, 5, /*power: */ 1, c 532 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c u8 b[] = { 0xaa, ++ttusb->c, 0x19, 1, freq }; c 1650 drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c ttusb->c = 0; c 386 drivers/media/usb/ttusb-dec/ttusb_dec.c u8 c[COMMAND_PACKET_SIZE]; c 393 drivers/media/usb/ttusb-dec/ttusb_dec.c result = ttusb_dec_send_command(dec, 0x08, 0, NULL, &c_length, c); c 399 drivers/media/usb/ttusb-dec/ttusb_dec.c memcpy(&tmp, c, 4); c 403 drivers/media/usb/ttusb-dec/ttusb_dec.c memcpy(&tmp, &c[4], 4); c 407 drivers/media/usb/ttusb-dec/ttusb_dec.c memcpy(&tmp, &c[8], 4); c 1046 drivers/media/usb/ttusb-dec/ttusb_dec.c u8 c[COMMAND_PACKET_SIZE]; c 1061 drivers/media/usb/ttusb-dec/ttusb_dec.c &c_length, c); c 1069 drivers/media/usb/ttusb-dec/ttusb_dec.c finfo->stream_id = c[1]; c 179 drivers/media/v4l2-core/tuner-core.c static void set_tv_freq(struct i2c_client *c, unsigned int freq); c 180 drivers/media/v4l2-core/tuner-core.c static void set_radio_freq(struct i2c_client *c, unsigned int freq); c 297 drivers/media/v4l2-core/tuner-core.c static void set_type(struct i2c_client *c, unsigned int type, c 301 drivers/media/v4l2-core/tuner-core.c struct tuner *t = to_tuner(i2c_get_clientdata(c)); c 308 drivers/media/v4l2-core/tuner-core.c dprintk("tuner 0x%02x: Tuner type absent\n", c->addr); c 354 drivers/media/v4l2-core/tuner-core.c i2c_master_send(c, buffer, 4); c 358 drivers/media/v4l2-core/tuner-core.c i2c_master_send(c, buffer, 4); c 368 drivers/media/v4l2-core/tuner-core.c i2c_master_send(c, buffer, 4); c 489 drivers/media/v4l2-core/tuner-core.c set_radio_freq(c, t->radio_freq); c 491 drivers/media/v4l2-core/tuner-core.c set_tv_freq(c, t->tv_freq); c 495 drivers/media/v4l2-core/tuner-core.c c->adapter->name, c->dev.driver->name, c->addr << 1, type, c 524 drivers/media/v4l2-core/tuner-core.c struct i2c_client *c = v4l2_get_subdevdata(sd); c 534 drivers/media/v4l2-core/tuner-core.c (tun_setup->addr == c->addr)) { c 535 drivers/media/v4l2-core/tuner-core.c set_type(c, tun_setup->type, tun_setup->mode_mask, c 891 drivers/media/v4l2-core/tuner-core.c static void set_tv_freq(struct i2c_client *c, unsigned int freq) c 893 drivers/media/v4l2-core/tuner-core.c struct tuner *t = to_tuner(i2c_get_clientdata(c)); c 1032 drivers/media/v4l2-core/tuner-core.c static void set_radio_freq(struct i2c_client *c, unsigned int freq) c 1034 drivers/media/v4l2-core/tuner-core.c struct tuner *t = to_tuner(i2c_get_clientdata(c)); c 1324 drivers/media/v4l2-core/tuner-core.c struct i2c_client *c = to_i2c_client(dev); c 1325 drivers/media/v4l2-core/tuner-core.c struct tuner *t = to_tuner(i2c_get_clientdata(c)); c 1340 drivers/media/v4l2-core/tuner-core.c struct i2c_client *c = to_i2c_client(dev); c 1341 drivers/media/v4l2-core/tuner-core.c struct tuner *t = to_tuner(i2c_get_clientdata(c)); c 139 drivers/media/v4l2-core/v4l2-compat-ioctl32.c struct v4l2_rect c; c 185 drivers/media/v4l2-core/v4l2-compat-ioctl32.c if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) c 219 drivers/media/v4l2-core/v4l2-compat-ioctl32.c if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c))) c 1836 drivers/media/v4l2-core/v4l2-ctrls.c static int ptr_to_user(struct v4l2_ext_control *c, c 1843 drivers/media/v4l2-core/v4l2-ctrls.c return copy_to_user(c->ptr, ptr.p, c->size) ? c 1849 drivers/media/v4l2-core/v4l2-ctrls.c if (c->size < len + 1) { c 1850 drivers/media/v4l2-core/v4l2-ctrls.c c->size = ctrl->elem_size; c 1853 drivers/media/v4l2-core/v4l2-ctrls.c return copy_to_user(c->string, ptr.p_char, len + 1) ? c 1856 drivers/media/v4l2-core/v4l2-ctrls.c c->value64 = *ptr.p_s64; c 1859 drivers/media/v4l2-core/v4l2-ctrls.c c->value = *ptr.p_s32; c 1866 drivers/media/v4l2-core/v4l2-ctrls.c static int cur_to_user(struct v4l2_ext_control *c, c 1869 drivers/media/v4l2-core/v4l2-ctrls.c return ptr_to_user(c, ctrl, ctrl->p_cur); c 1873 drivers/media/v4l2-core/v4l2-ctrls.c static int new_to_user(struct v4l2_ext_control *c, c 1876 drivers/media/v4l2-core/v4l2-ctrls.c return ptr_to_user(c, ctrl, ctrl->p_new); c 1880 drivers/media/v4l2-core/v4l2-ctrls.c static int req_to_user(struct v4l2_ext_control *c, c 1883 drivers/media/v4l2-core/v4l2-ctrls.c return ptr_to_user(c, ref->ctrl, ref->p_req); c 1887 drivers/media/v4l2-core/v4l2-ctrls.c static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) c 1894 drivers/media/v4l2-core/v4l2-ctrls.c return ptr_to_user(c, ctrl, ctrl->p_new); c 1898 drivers/media/v4l2-core/v4l2-ctrls.c static int user_to_ptr(struct v4l2_ext_control *c, c 1909 drivers/media/v4l2-core/v4l2-ctrls.c ret = copy_from_user(ptr.p, c->ptr, c->size) ? -EFAULT : 0; c 1912 drivers/media/v4l2-core/v4l2-ctrls.c for (idx = c->size / ctrl->elem_size; idx < ctrl->elems; idx++) c 1919 drivers/media/v4l2-core/v4l2-ctrls.c *ptr.p_s64 = c->value64; c 1922 drivers/media/v4l2-core/v4l2-ctrls.c size = c->size; c 1927 drivers/media/v4l2-core/v4l2-ctrls.c ret = copy_from_user(ptr.p_char, c->string, size) ? -EFAULT : 0; c 1939 drivers/media/v4l2-core/v4l2-ctrls.c *ptr.p_s32 = c->value; c 1946 drivers/media/v4l2-core/v4l2-ctrls.c static int user_to_new(struct v4l2_ext_control *c, c 1949 drivers/media/v4l2-core/v4l2-ctrls.c return user_to_ptr(c, ctrl, ctrl->p_new); c 3306 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_control *c = &cs->controls[i]; c 3309 drivers/media/v4l2-core/v4l2-ctrls.c u32 id = c->id & V4L2_CTRL_ID_MASK; c 3349 drivers/media/v4l2-core/v4l2-ctrls.c if (c->size < tot_size) { c 3355 drivers/media/v4l2-core/v4l2-ctrls.c c->size = tot_size; c 3360 drivers/media/v4l2-core/v4l2-ctrls.c id, c->size, tot_size); c 3363 drivers/media/v4l2-core/v4l2-ctrls.c c->size = tot_size; c 3456 drivers/media/v4l2-core/v4l2-ctrls.c int (*ctrl_to_user)(struct v4l2_ext_control *c, c 3590 drivers/media/v4l2-core/v4l2-ctrls.c static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) c 3612 drivers/media/v4l2-core/v4l2-ctrls.c new_to_user(c, ctrl); c 3614 drivers/media/v4l2-core/v4l2-ctrls.c cur_to_user(c, ctrl); c 3623 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_control c; c 3628 drivers/media/v4l2-core/v4l2-ctrls.c ret = get_ctrl(ctrl, &c); c 3629 drivers/media/v4l2-core/v4l2-ctrls.c control->value = c.value; c 3636 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_control c; c 3640 drivers/media/v4l2-core/v4l2-ctrls.c c.value = 0; c 3641 drivers/media/v4l2-core/v4l2-ctrls.c get_ctrl(ctrl, &c); c 3642 drivers/media/v4l2-core/v4l2-ctrls.c return c.value; c 3648 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_control c; c 3652 drivers/media/v4l2-core/v4l2-ctrls.c c.value64 = 0; c 3653 drivers/media/v4l2-core/v4l2-ctrls.c get_ctrl(ctrl, &c); c 3654 drivers/media/v4l2-core/v4l2-ctrls.c return c.value64; c 4022 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_control *c) c 4027 drivers/media/v4l2-core/v4l2-ctrls.c user_to_new(c, ctrl); c 4030 drivers/media/v4l2-core/v4l2-ctrls.c cur_to_user(c, ctrl); c 4039 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ext_control c = { control->id }; c 4048 drivers/media/v4l2-core/v4l2-ctrls.c c.value = control->value; c 4049 drivers/media/v4l2-core/v4l2-ctrls.c ret = set_ctrl_lock(fh, ctrl, &c); c 4050 drivers/media/v4l2-core/v4l2-ctrls.c control->value = c.value; c 864 drivers/media/v4l2-core/v4l2-dv-timings.c struct v4l2_hdmi_colorimetry c = { c 881 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_OPRGB; c 882 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_OPRGB; c 885 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_BT2020; c 886 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 897 drivers/media/v4l2-core/v4l2-dv-timings.c c.quantization = V4L2_QUANTIZATION_LIM_RANGE; c 903 drivers/media/v4l2-core/v4l2-dv-timings.c c.quantization = V4L2_QUANTIZATION_LIM_RANGE; c 910 drivers/media/v4l2-core/v4l2-dv-timings.c c.quantization = V4L2_QUANTIZATION_LIM_RANGE; c 916 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_SMPTE170M; c 917 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_601; c 919 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_REC709; c 920 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_709; c 922 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 925 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_SMPTE170M; c 926 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_601; c 927 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 930 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_REC709; c 931 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_709; c 932 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 937 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_REC709; c 938 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_XV709; c 939 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 942 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_REC709; c 943 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_XV601; c 944 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 947 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_SRGB; c 948 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_601; c 949 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_SRGB; c 952 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_OPRGB; c 953 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_601; c 954 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_OPRGB; c 957 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_BT2020; c 958 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020; c 959 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 962 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_BT2020; c 963 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020_CONST_LUM; c 964 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 967 drivers/media/v4l2-core/v4l2-dv-timings.c c.colorspace = V4L2_COLORSPACE_REC709; c 968 drivers/media/v4l2-core/v4l2-dv-timings.c c.ycbcr_enc = V4L2_YCBCR_ENC_709; c 969 drivers/media/v4l2-core/v4l2-dv-timings.c c.xfer_func = V4L2_XFER_FUNC_709; c 982 drivers/media/v4l2-core/v4l2-dv-timings.c return c; c 151 drivers/media/v4l2-core/v4l2-flash-led-class.c static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c) c 153 drivers/media/v4l2-core/v4l2-flash-led-class.c struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); c 158 drivers/media/v4l2-core/v4l2-flash-led-class.c switch (c->id) { c 161 drivers/media/v4l2-core/v4l2-flash-led-class.c return v4l2_flash_update_led_brightness(v4l2_flash, c); c 170 drivers/media/v4l2-core/v4l2-flash-led-class.c c->val = fled_cdev->brightness.val; c 176 drivers/media/v4l2-core/v4l2-flash-led-class.c c->val = is_strobing; c 180 drivers/media/v4l2-core/v4l2-flash-led-class.c return led_get_flash_fault(fled_cdev, &c->val); c 193 drivers/media/v4l2-core/v4l2-flash-led-class.c static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) c 195 drivers/media/v4l2-core/v4l2-flash-led-class.c struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); c 202 drivers/media/v4l2-core/v4l2-flash-led-class.c switch (c->id) { c 204 drivers/media/v4l2-core/v4l2-flash-led-class.c switch (c->val) { c 239 drivers/media/v4l2-core/v4l2-flash-led-class.c external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL); c 264 drivers/media/v4l2-core/v4l2-flash-led-class.c return led_set_flash_timeout(fled_cdev, c->val); c 270 drivers/media/v4l2-core/v4l2-flash-led-class.c return led_set_flash_brightness(fled_cdev, c->val); c 273 drivers/media/v4l2-core/v4l2-flash-led-class.c v4l2_flash_set_led_brightness(v4l2_flash, c); c 286 drivers/media/v4l2-core/v4l2-flash-led-class.c struct v4l2_ctrl_config *c) c 288 drivers/media/v4l2-core/v4l2-flash-led-class.c c->min = s->min; c 289 drivers/media/v4l2-core/v4l2-flash-led-class.c c->max = s->max; c 290 drivers/media/v4l2-core/v4l2-flash-led-class.c c->step = s->step; c 291 drivers/media/v4l2-core/v4l2-flash-led-class.c c->def = s->val; c 533 drivers/media/v4l2-core/v4l2-ioctl.c const struct v4l2_captureparm *c = &p->parm.capture; c 536 drivers/media/v4l2-core/v4l2-ioctl.c c->capability, c->capturemode, c 537 drivers/media/v4l2-core/v4l2-ioctl.c c->timeperframe.numerator, c->timeperframe.denominator, c 538 drivers/media/v4l2-core/v4l2-ioctl.c c->extendedmode, c->readbuffers); c 541 drivers/media/v4l2-core/v4l2-ioctl.c const struct v4l2_outputparm *c = &p->parm.output; c 544 drivers/media/v4l2-core/v4l2-ioctl.c c->capability, c->outputmode, c 545 drivers/media/v4l2-core/v4l2-ioctl.c c->timeperframe.numerator, c->timeperframe.denominator, c 546 drivers/media/v4l2-core/v4l2-ioctl.c c->extendedmode, c->writebuffers); c 625 drivers/media/v4l2-core/v4l2-ioctl.c p->c.width, p->c.height, c 626 drivers/media/v4l2-core/v4l2-ioctl.c p->c.left, p->c.top); c 822 drivers/media/v4l2-core/v4l2-ioctl.c const struct v4l2_event_ctrl *c; c 833 drivers/media/v4l2-core/v4l2-ioctl.c c = &p->u.ctrl; c 835 drivers/media/v4l2-core/v4l2-ioctl.c c->changes, c->type); c 836 drivers/media/v4l2-core/v4l2-ioctl.c if (c->type == V4L2_CTRL_TYPE_INTEGER64) c 837 drivers/media/v4l2-core/v4l2-ioctl.c pr_cont("value64=%lld, ", c->value64); c 839 drivers/media/v4l2-core/v4l2-ioctl.c pr_cont("value=%d, ", c->value); c 841 drivers/media/v4l2-core/v4l2-ioctl.c c->flags, c->minimum, c->maximum, c 842 drivers/media/v4l2-core/v4l2-ioctl.c c->step, c->default_value); c 905 drivers/media/v4l2-core/v4l2-ioctl.c static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv) c 910 drivers/media/v4l2-core/v4l2-ioctl.c c->reserved[0] = 0; c 911 drivers/media/v4l2-core/v4l2-ioctl.c for (i = 0; i < c->count; i++) c 912 drivers/media/v4l2-core/v4l2-ioctl.c c->controls[i].reserved2[0] = 0; c 919 drivers/media/v4l2-core/v4l2-ioctl.c if (!allow_priv && c->which == V4L2_CID_PRIVATE_BASE) c 921 drivers/media/v4l2-core/v4l2-ioctl.c if (!c->which) c 924 drivers/media/v4l2-core/v4l2-ioctl.c for (i = 0; i < c->count; i++) { c 925 drivers/media/v4l2-core/v4l2-ioctl.c if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) { c 926 drivers/media/v4l2-core/v4l2-ioctl.c c->error_idx = i; c 2317 drivers/media/v4l2-core/v4l2-ioctl.c p->c = s.r; c 2328 drivers/media/v4l2-core/v4l2-ioctl.c .r = p->c, c 1005 drivers/memory/tegra/tegra124-emc.c struct clk *c = data; c 1007 drivers/memory/tegra/tegra124-emc.c *rate = clk_get_rate(c); c 1014 drivers/memory/tegra/tegra124-emc.c struct clk *c = data; c 1016 drivers/memory/tegra/tegra124-emc.c return clk_set_rate(c, rate); c 144 drivers/mfd/ab8500-debugfs.c #define REG_FMT_DEC(c) ((c)->fmt & 0x1) c 145 drivers/mfd/ab8500-debugfs.c #define REG_FMT_HEX(c) (!REG_FMT_DEC(c)) c 76 drivers/mfd/htc-pasic3.c int c; c 78 drivers/mfd/htc-pasic3.c c = pasic3_read_register(dev, 0x28); c 79 drivers/mfd/htc-pasic3.c pasic3_write_register(dev, 0x28, c & 0x7f); c 81 drivers/mfd/htc-pasic3.c dev_dbg(dev, "DS1WM OWM_EN low (active) %02x\n", c & 0x7f); c 88 drivers/mfd/htc-pasic3.c int c; c 90 drivers/mfd/htc-pasic3.c c = pasic3_read_register(dev, 0x28); c 91 drivers/mfd/htc-pasic3.c pasic3_write_register(dev, 0x28, c | 0x80); c 93 drivers/mfd/htc-pasic3.c dev_dbg(dev, "DS1WM OWM_EN high (inactive) %02x\n", c | 0x80); c 453 drivers/mfd/menelaus.c struct i2c_client *c = the_menelaus->client; c 463 drivers/mfd/menelaus.c dev_dbg(&c->dev, "Setting voltage '%s'" c 525 drivers/mfd/menelaus.c struct i2c_client *c = the_menelaus->client; c 536 drivers/mfd/menelaus.c dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n", c 734 drivers/mfd/menelaus.c struct i2c_client *c = the_menelaus->client; c 741 drivers/mfd/menelaus.c dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val); c 315 drivers/mfd/mfd-core.c static int mfd_remove_devices_fn(struct device *dev, void *c) c 319 drivers/mfd/mfd-core.c atomic_t **usage_count = c; c 951 drivers/mfd/tps65010.c struct i2c_client *c; c 957 drivers/mfd/tps65010.c c = the_tps->client; c 961 drivers/mfd/tps65010.c i2c_smbus_read_byte_data(c, TPS_VDCDC2)); c 963 drivers/mfd/tps65010.c status = i2c_smbus_write_byte_data(c, TPS_VDCDC2, value); c 970 drivers/mfd/tps65010.c i2c_smbus_read_byte_data(c, TPS_VDCDC2)); c 19 drivers/misc/altera-stapl/altera-jtag.c #define alt_jtag_io(a, b, c)\ c 20 drivers/misc/altera-stapl/altera-jtag.c astate->config->jtag_io(astate->config->dev, a, b, c); c 415 drivers/misc/cxl/guest.c char c; c 435 drivers/misc/cxl/guest.c c = *((char *) cr); c 436 drivers/misc/cxl/guest.c *val = c; c 468 drivers/misc/habanalabs/debugfs.c char *c; c 480 drivers/misc/habanalabs/debugfs.c c = strchr(kbuf, ' '); c 481 drivers/misc/habanalabs/debugfs.c if (!c) c 483 drivers/misc/habanalabs/debugfs.c *c = '\0'; c 489 drivers/misc/habanalabs/debugfs.c if (strncmp(c+1, "0x", 2)) c 491 drivers/misc/habanalabs/debugfs.c rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr); c 22 drivers/misc/hmc6352.c static int compass_command(struct i2c_client *c, u8 cmd) c 24 drivers/misc/hmc6352.c int ret = i2c_master_send(c, &cmd, 1); c 26 drivers/misc/hmc6352.c dev_warn(&c->dev, "command '%c' failed.\n", cmd); c 33 drivers/misc/hmc6352.c struct i2c_client *c = to_i2c_client(dev); c 44 drivers/misc/hmc6352.c ret = compass_command(c, map[val]); c 95 drivers/misc/hpilo.c u64 c; c 98 drivers/misc/hpilo.c c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; c 99 drivers/misc/hpilo.c if (c & ENTRY_MASK_C) { c 101 drivers/misc/hpilo.c *entry = c & ENTRY_MASK_NOSTATE; c 104 drivers/misc/hpilo.c (c | ENTRY_MASK) + 1; c 118 drivers/misc/hpilo.c u64 c; c 121 drivers/misc/hpilo.c c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; c 122 drivers/misc/hpilo.c if (c & ENTRY_MASK_C) c 88 drivers/misc/ibmasm/ibmasm.h #define to_command(c) container_of(c, struct command, kref) c 1050 drivers/misc/ibmvmc.c size_t c = count; c 1111 drivers/misc/ibmvmc.c while (c > 0) { c 1112 drivers/misc/ibmvmc.c bytes = min_t(size_t, c, vmc_buffer->size); c 1119 drivers/misc/ibmvmc.c c -= bytes; c 48 drivers/misc/lis3lv02d/lis3lv02d_i2c.c struct i2c_client *c = lis3->bus_priv; c 49 drivers/misc/lis3lv02d/lis3lv02d_i2c.c return i2c_smbus_write_byte_data(c, reg, value); c 54 drivers/misc/lis3lv02d/lis3lv02d_i2c.c struct i2c_client *c = lis3->bus_priv; c 55 drivers/misc/lis3lv02d/lis3lv02d_i2c.c *v = i2c_smbus_read_byte_data(c, reg); c 62 drivers/misc/lis3lv02d/lis3lv02d_i2c.c struct i2c_client *c = lis3->bus_priv; c 64 drivers/misc/lis3lv02d/lis3lv02d_i2c.c return i2c_smbus_read_i2c_block_data(c, reg, len, v); c 302 drivers/misc/mic/card/mic_x100.c struct cpuinfo_x86 *c = &cpu_data(0); c 304 drivers/misc/mic/card/mic_x100.c if (!(c->x86 == 11 && c->x86_model == 1)) { c 431 drivers/misc/ocxl/config.c static bool char_allowed(int c) c 436 drivers/misc/ocxl/config.c if ((c >= 0x30 && c <= 0x39) /* digits */ || c 437 drivers/misc/ocxl/config.c (c >= 0x41 && c <= 0x5A) /* upper case */ || c 438 drivers/misc/ocxl/config.c (c >= 0x61 && c <= 0x7A) /* lower case */ || c 439 drivers/misc/ocxl/config.c c == 0 /* NULL */ || c 440 drivers/misc/ocxl/config.c c == 0x2D /* - */ || c 441 drivers/misc/ocxl/config.c c == 0x5F /* _ */ || c 442 drivers/misc/ocxl/config.c c == 0x2C /* , */) c 670 drivers/misc/pti.c static void pti_console_write(struct console *c, const char *buf, unsigned len) c 692 drivers/misc/pti.c static struct tty_driver *pti_console_device(struct console *c, int *index) c 694 drivers/misc/pti.c *index = c->index; c 707 drivers/misc/pti.c static int pti_console_setup(struct console *c, char *opts) c 17 drivers/misc/sgi-gru/gruhandles.c #define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq) c 21 drivers/misc/sgi-gru/gruhandles.c #define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz) c 471 drivers/misc/sgi-gru/grutables.h #define get_tfm_for_cpu(g, c) \ c 472 drivers/misc/sgi-gru/grutables.h ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c))) c 486 drivers/misc/sgi-gru/grutables.h #define get_gru(b, c) (&gru_base[b]->bs_grus[c]) c 393 drivers/misc/vmw_vmci/vmci_context.c struct vmci_ctx *c, *context = NULL; c 399 drivers/misc/vmw_vmci/vmci_context.c list_for_each_entry_rcu(c, &ctx_list.head, list_item) { c 400 drivers/misc/vmw_vmci/vmci_context.c if (c->cid == cid) { c 409 drivers/misc/vmw_vmci/vmci_context.c context = c; c 15 drivers/mmc/core/card.h #define mmc_card_name(c) ((c)->cid.prod_name) c 16 drivers/mmc/core/card.h #define mmc_card_id(c) (dev_name(&(c)->dev)) c 27 drivers/mmc/core/card.h #define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) c 28 drivers/mmc/core/card.h #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) c 29 drivers/mmc/core/card.h #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) c 30 drivers/mmc/core/card.h #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) c 31 drivers/mmc/core/card.h #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) c 32 drivers/mmc/core/card.h #define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED) c 34 drivers/mmc/core/card.h #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) c 35 drivers/mmc/core/card.h #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) c 36 drivers/mmc/core/card.h #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) c 37 drivers/mmc/core/card.h #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) c 38 drivers/mmc/core/card.h #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) c 39 drivers/mmc/core/card.h #define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED) c 40 drivers/mmc/core/card.h #define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED) c 185 drivers/mmc/core/card.h static inline int mmc_card_lenient_fn0(const struct mmc_card *c) c 187 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_LENIENT_FN0; c 190 drivers/mmc/core/card.h static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c) c 192 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; c 195 drivers/mmc/core/card.h static inline int mmc_card_disable_cd(const struct mmc_card *c) c 197 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_DISABLE_CD; c 200 drivers/mmc/core/card.h static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c) c 202 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF; c 205 drivers/mmc/core/card.h static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c) c 207 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512; c 210 drivers/mmc/core/card.h static inline int mmc_card_long_read_time(const struct mmc_card *c) c 212 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_LONG_READ_TIME; c 215 drivers/mmc/core/card.h static inline int mmc_card_broken_irq_polling(const struct mmc_card *c) c 217 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING; c 220 drivers/mmc/core/card.h static inline int mmc_card_broken_hpi(const struct mmc_card *c) c 222 drivers/mmc/core/card.h return c->quirks & MMC_QUIRK_BROKEN_HPI; c 182 drivers/mmc/core/sdio_uart.c unsigned char c; c 183 drivers/mmc/core/sdio_uart.c c = sdio_readb(port->func, port->regs_offset + offset, NULL); c 184 drivers/mmc/core/sdio_uart.c return c; c 371 drivers/mmc/host/au1xmmc.c chan_tab_t *c = *((chan_tab_t **)chan); c 372 drivers/mmc/host/au1xmmc.c au1x_dma_chan_t *cp = c->chan_ptr; c 1075 drivers/mmc/host/mmci.c mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) c 1089 drivers/mmc/host/mmci.c c |= host->variant->cmdreg_stop; c 1091 drivers/mmc/host/mmci.c c |= cmd->opcode | host->variant->cmdreg_cpsm_enable; c 1094 drivers/mmc/host/mmci.c c |= host->variant->cmdreg_lrsp_crc; c 1096 drivers/mmc/host/mmci.c c |= host->variant->cmdreg_srsp_crc; c 1098 drivers/mmc/host/mmci.c c |= host->variant->cmdreg_srsp; c 1101 drivers/mmc/host/mmci.c c |= MCI_CPSM_INTERRUPT; c 1104 drivers/mmc/host/mmci.c c |= host->variant->data_cmd_enable; c 1109 drivers/mmc/host/mmci.c writel(c, base + MMCICOMMAND); c 497 drivers/mmc/host/mxcmmc.c u32 a, b, c; c 520 drivers/mmc/host/mxcmmc.c c = mxcmci_readw(host, MMC_REG_RES_FIFO); c 521 drivers/mmc/host/mxcmmc.c cmd->resp[0] = a << 24 | b << 8 | c >> 8; c 407 drivers/mmc/host/omap.c struct dma_chan *c; c 411 drivers/mmc/host/omap.c c = host->dma_tx; c 414 drivers/mmc/host/omap.c c = host->dma_rx; c 416 drivers/mmc/host/omap.c if (c) { c 418 drivers/mmc/host/omap.c dmaengine_terminate_all(c); c 422 drivers/mmc/host/omap.c dev = c->device->dev; c 984 drivers/mmc/host/omap.c struct dma_chan *c; c 1001 drivers/mmc/host/omap.c c = host->dma_tx; c 1006 drivers/mmc/host/omap.c c = host->dma_rx; c 1012 drivers/mmc/host/omap.c if (!c) c 1028 drivers/mmc/host/omap.c if (dmaengine_slave_config(c, &cfg)) c 1034 drivers/mmc/host/omap.c host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, c 1039 drivers/mmc/host/omap.c tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, c 1076 drivers/mmc/host/omap.c struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? c 1079 drivers/mmc/host/omap.c dma_async_issue_pending(c); c 1399 drivers/mmc/host/omap_hsmmc.c struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); c 1401 drivers/mmc/host/omap_hsmmc.c dma_unmap_sg(c->device->dev, data->sg, data->sg_len, c 1417 drivers/mmc/host/omap_hsmmc.c struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); c 1420 drivers/mmc/host/omap_hsmmc.c &host->next_data, c)) c 78 drivers/mmc/host/sdhci-acpi.c static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) c 80 drivers/mmc/host/sdhci-acpi.c return (void *)c->private; c 83 drivers/mmc/host/sdhci-acpi.c static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) c 85 drivers/mmc/host/sdhci-acpi.c return c->slot && (c->slot->flags & flag); c 170 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = dev_get_drvdata(dev); c 171 drivers/mmc/host/sdhci-acpi.c struct intel_host *intel_host = sdhci_acpi_priv(c); c 381 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = platform_get_drvdata(pdev); c 382 drivers/mmc/host/sdhci-acpi.c struct intel_host *intel_host = sdhci_acpi_priv(c); c 383 drivers/mmc/host/sdhci-acpi.c struct sdhci_host *host = c->host; c 403 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = platform_get_drvdata(pdev); c 404 drivers/mmc/host/sdhci-acpi.c struct intel_host *intel_host = sdhci_acpi_priv(c); c 407 drivers/mmc/host/sdhci-acpi.c c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25; c 410 drivers/mmc/host/sdhci-acpi.c c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50; c 413 drivers/mmc/host/sdhci-acpi.c c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50; c 416 drivers/mmc/host/sdhci-acpi.c c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104; c 479 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = platform_get_drvdata(pdev); c 480 drivers/mmc/host/sdhci-acpi.c struct sdhci_host *host = c->host; c 481 drivers/mmc/host/sdhci-acpi.c int *irq = sdhci_acpi_priv(c); c 500 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = platform_get_drvdata(pdev); c 501 drivers/mmc/host/sdhci-acpi.c struct sdhci_host *host = c->host; c 503 drivers/mmc/host/sdhci-acpi.c int *irq = sdhci_acpi_priv(c); c 588 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = platform_get_drvdata(pdev); c 589 drivers/mmc/host/sdhci-acpi.c struct sdhci_host *host = c->host; c 680 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c; c 725 drivers/mmc/host/sdhci-acpi.c c = sdhci_priv(host); c 726 drivers/mmc/host/sdhci-acpi.c c->host = host; c 727 drivers/mmc/host/sdhci-acpi.c c->slot = slot; c 728 drivers/mmc/host/sdhci-acpi.c c->pdev = pdev; c 729 drivers/mmc/host/sdhci-acpi.c c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); c 731 drivers/mmc/host/sdhci-acpi.c platform_set_drvdata(pdev, c); c 748 drivers/mmc/host/sdhci-acpi.c if (c->slot) { c 749 drivers/mmc/host/sdhci-acpi.c if (c->slot->probe_slot) { c 750 drivers/mmc/host/sdhci-acpi.c err = c->slot->probe_slot(pdev, hid, uid); c 754 drivers/mmc/host/sdhci-acpi.c if (c->slot->chip) { c 755 drivers/mmc/host/sdhci-acpi.c host->ops = c->slot->chip->ops; c 756 drivers/mmc/host/sdhci-acpi.c host->quirks |= c->slot->chip->quirks; c 757 drivers/mmc/host/sdhci-acpi.c host->quirks2 |= c->slot->chip->quirks2; c 758 drivers/mmc/host/sdhci-acpi.c host->mmc->caps |= c->slot->chip->caps; c 759 drivers/mmc/host/sdhci-acpi.c host->mmc->caps2 |= c->slot->chip->caps2; c 760 drivers/mmc/host/sdhci-acpi.c host->mmc->pm_caps |= c->slot->chip->pm_caps; c 762 drivers/mmc/host/sdhci-acpi.c host->quirks |= c->slot->quirks; c 763 drivers/mmc/host/sdhci-acpi.c host->quirks2 |= c->slot->quirks2; c 764 drivers/mmc/host/sdhci-acpi.c host->mmc->caps |= c->slot->caps; c 765 drivers/mmc/host/sdhci-acpi.c host->mmc->caps2 |= c->slot->caps2; c 766 drivers/mmc/host/sdhci-acpi.c host->mmc->pm_caps |= c->slot->pm_caps; c 771 drivers/mmc/host/sdhci-acpi.c if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { c 772 drivers/mmc/host/sdhci-acpi.c bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); c 779 drivers/mmc/host/sdhci-acpi.c c->use_runtime_pm = false; c 787 drivers/mmc/host/sdhci-acpi.c if (c->slot && c->slot->setup_host) { c 788 drivers/mmc/host/sdhci-acpi.c err = c->slot->setup_host(pdev); c 797 drivers/mmc/host/sdhci-acpi.c if (c->use_runtime_pm) { c 810 drivers/mmc/host/sdhci-acpi.c sdhci_cleanup_host(c->host); c 812 drivers/mmc/host/sdhci-acpi.c if (c->slot && c->slot->free_slot) c 813 drivers/mmc/host/sdhci-acpi.c c->slot->free_slot(pdev); c 815 drivers/mmc/host/sdhci-acpi.c sdhci_free_host(c->host); c 821 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = platform_get_drvdata(pdev); c 825 drivers/mmc/host/sdhci-acpi.c if (c->use_runtime_pm) { c 831 drivers/mmc/host/sdhci-acpi.c if (c->slot && c->slot->remove_slot) c 832 drivers/mmc/host/sdhci-acpi.c c->slot->remove_slot(pdev); c 834 drivers/mmc/host/sdhci-acpi.c dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); c 835 drivers/mmc/host/sdhci-acpi.c sdhci_remove_host(c->host, dead); c 837 drivers/mmc/host/sdhci-acpi.c if (c->slot && c->slot->free_slot) c 838 drivers/mmc/host/sdhci-acpi.c c->slot->free_slot(pdev); c 840 drivers/mmc/host/sdhci-acpi.c sdhci_free_host(c->host); c 849 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = dev_get_drvdata(dev); c 850 drivers/mmc/host/sdhci-acpi.c struct sdhci_host *host = c->host; c 860 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = dev_get_drvdata(dev); c 862 drivers/mmc/host/sdhci-acpi.c sdhci_acpi_byt_setting(&c->pdev->dev); c 864 drivers/mmc/host/sdhci-acpi.c return sdhci_resume_host(c->host); c 873 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = dev_get_drvdata(dev); c 874 drivers/mmc/host/sdhci-acpi.c struct sdhci_host *host = c->host; c 884 drivers/mmc/host/sdhci-acpi.c struct sdhci_acpi_host *c = dev_get_drvdata(dev); c 886 drivers/mmc/host/sdhci-acpi.c sdhci_acpi_byt_setting(&c->pdev->dev); c 888 drivers/mmc/host/sdhci-acpi.c return sdhci_runtime_resume_host(c->host, 0); c 57 drivers/mmc/host/sdhci.h #define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) c 58 drivers/mmc/host/sdhci.h #define SDHCI_GET_CMD(c) ((c>>8) & 0x3f) c 251 drivers/mmc/host/tmio_mmc_core.c int c = cmd->opcode; c 254 drivers/mmc/host/tmio_mmc_core.c case MMC_RSP_NONE: c |= RESP_NONE; break; c 257 drivers/mmc/host/tmio_mmc_core.c c |= RESP_R1; break; c 258 drivers/mmc/host/tmio_mmc_core.c case MMC_RSP_R1B: c |= RESP_R1B; break; c 259 drivers/mmc/host/tmio_mmc_core.c case MMC_RSP_R2: c |= RESP_R2; break; c 260 drivers/mmc/host/tmio_mmc_core.c case MMC_RSP_R3: c |= RESP_R3; break; c 274 drivers/mmc/host/tmio_mmc_core.c c |= DATA_PRESENT; c 277 drivers/mmc/host/tmio_mmc_core.c c |= TRANSFER_MULTI; c 285 drivers/mmc/host/tmio_mmc_core.c c |= NO_CMD12_ISSUE; c 288 drivers/mmc/host/tmio_mmc_core.c c |= TRANSFER_READ; c 295 drivers/mmc/host/tmio_mmc_core.c sd_ctrl_write16(host, CTL_SD_CMD, c); c 405 drivers/mmc/host/toshsd.c int c = cmd->opcode; c 424 drivers/mmc/host/toshsd.c c |= SD_CMD_RESP_TYPE_NONE; c 428 drivers/mmc/host/toshsd.c c |= SD_CMD_RESP_TYPE_EXT_R1; c 431 drivers/mmc/host/toshsd.c c |= SD_CMD_RESP_TYPE_EXT_R1B; c 434 drivers/mmc/host/toshsd.c c |= SD_CMD_RESP_TYPE_EXT_R2; c 437 drivers/mmc/host/toshsd.c c |= SD_CMD_RESP_TYPE_EXT_R3; c 449 drivers/mmc/host/toshsd.c c |= SD_CMD_TYPE_ACMD; c 452 drivers/mmc/host/toshsd.c c |= (3 << 8); /* removed from ipaq-asic3.h for some reason */ c 455 drivers/mmc/host/toshsd.c c |= SD_CMD_DATA_PRESENT; c 460 drivers/mmc/host/toshsd.c c |= SD_CMD_MULTI_BLOCK; c 464 drivers/mmc/host/toshsd.c c |= SD_CMD_TRANSFER_READ; c 471 drivers/mmc/host/toshsd.c iowrite16(c, host->ioaddr + SD_CMD); c 220 drivers/mmc/host/vub300.c #define FUN(c) (0x000007 & (c->arg>>28)) c 221 drivers/mmc/host/vub300.c #define REG(c) (0x01FFFF & (c->arg>>9)) c 1207 drivers/mmc/host/vub300.c u8 c; c 1211 drivers/mmc/host/vub300.c c = *data++; c 1212 drivers/mmc/host/vub300.c } while (size-- && c); /* skip comment */ c 1984 drivers/mmc/host/vub300.c u64 c = kHzClock; c 1986 drivers/mmc/host/vub300.c buf[i] = c; c 1987 drivers/mmc/host/vub300.c c >>= 8; c 309 drivers/mtd/inftlmount.c static int memcmpb(void *a, int c, int n) c 313 drivers/mtd/inftlmount.c if (c != ((unsigned char *)a)[i]) c 600 drivers/mtd/maps/vmu-flash.c int c, error; c 608 drivers/mtd/maps/vmu-flash.c c = hweight_long(test_flash_data); c 610 drivers/mtd/maps/vmu-flash.c basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]); c 52 drivers/mtd/nand/onenand/omap2.c struct omap2_onenand *c = dev_id; c 54 drivers/mtd/nand/onenand/omap2.c complete(&c->irq_done); c 59 drivers/mtd/nand/onenand/omap2.c static inline unsigned short read_reg(struct omap2_onenand *c, int reg) c 61 drivers/mtd/nand/onenand/omap2.c return readw(c->onenand.base + reg); c 64 drivers/mtd/nand/onenand/omap2.c static inline void write_reg(struct omap2_onenand *c, unsigned short value, c 67 drivers/mtd/nand/onenand/omap2.c writew(value, c->onenand.base + reg); c 70 drivers/mtd/nand/onenand/omap2.c static int omap2_onenand_set_cfg(struct omap2_onenand *c, c 106 drivers/mtd/nand/onenand/omap2.c write_reg(c, reg, ONENAND_REG_SYS_CFG1); c 144 drivers/mtd/nand/onenand/omap2.c struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); c 170 drivers/mtd/nand/onenand/omap2.c intr = read_reg(c, ONENAND_REG_INTERRUPT); c 174 drivers/mtd/nand/onenand/omap2.c ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); c 188 drivers/mtd/nand/onenand/omap2.c syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); c 191 drivers/mtd/nand/onenand/omap2.c write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); c 193 drivers/mtd/nand/onenand/omap2.c syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); c 196 drivers/mtd/nand/onenand/omap2.c reinit_completion(&c->irq_done); c 197 drivers/mtd/nand/onenand/omap2.c result = gpiod_get_value(c->int_gpiod); c 199 drivers/mtd/nand/onenand/omap2.c ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); c 200 drivers/mtd/nand/onenand/omap2.c intr = read_reg(c, ONENAND_REG_INTERRUPT); c 206 drivers/mtd/nand/onenand/omap2.c if (!wait_for_completion_io_timeout(&c->irq_done, c 209 drivers/mtd/nand/onenand/omap2.c ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); c 219 drivers/mtd/nand/onenand/omap2.c intr = read_reg(c, c 224 drivers/mtd/nand/onenand/omap2.c intr = read_reg(c, ONENAND_REG_INTERRUPT); c 233 drivers/mtd/nand/onenand/omap2.c syscfg = read_reg(c, ONENAND_REG_SYS_CFG1); c 235 drivers/mtd/nand/onenand/omap2.c write_reg(c, syscfg, ONENAND_REG_SYS_CFG1); c 240 drivers/mtd/nand/onenand/omap2.c intr = read_reg(c, ONENAND_REG_INTERRUPT); c 245 drivers/mtd/nand/onenand/omap2.c ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); c 263 drivers/mtd/nand/onenand/omap2.c intr = read_reg(c, ONENAND_REG_INTERRUPT); c 264 drivers/mtd/nand/onenand/omap2.c ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); c 267 drivers/mtd/nand/onenand/omap2.c int ecc = read_reg(c, ONENAND_REG_ECC_STATUS); c 272 drivers/mtd/nand/onenand/omap2.c addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1); c 273 drivers/mtd/nand/onenand/omap2.c addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8); c 324 drivers/mtd/nand/onenand/omap2.c static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c, c 331 drivers/mtd/nand/onenand/omap2.c tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, c 334 drivers/mtd/nand/onenand/omap2.c dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n"); c 338 drivers/mtd/nand/onenand/omap2.c reinit_completion(&c->dma_done); c 341 drivers/mtd/nand/onenand/omap2.c tx->callback_param = &c->dma_done; c 345 drivers/mtd/nand/onenand/omap2.c dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n"); c 349 drivers/mtd/nand/onenand/omap2.c dma_async_issue_pending(c->dma_chan); c 351 drivers/mtd/nand/onenand/omap2.c if (!wait_for_completion_io_timeout(&c->dma_done, c 353 drivers/mtd/nand/onenand/omap2.c dmaengine_terminate_sync(c->dma_chan); c 364 drivers/mtd/nand/onenand/omap2.c struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); c 366 drivers/mtd/nand/onenand/omap2.c struct device *dev = &c->pdev->dev; c 389 drivers/mtd/nand/onenand/omap2.c dma_src = c->phys_base + bram_offset; c 396 drivers/mtd/nand/onenand/omap2.c err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); c 412 drivers/mtd/nand/onenand/omap2.c struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); c 414 drivers/mtd/nand/onenand/omap2.c struct device *dev = &c->pdev->dev; c 430 drivers/mtd/nand/onenand/omap2.c dma_dst = c->phys_base + bram_offset; c 436 drivers/mtd/nand/onenand/omap2.c err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count); c 450 drivers/mtd/nand/onenand/omap2.c struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); c 456 drivers/mtd/nand/onenand/omap2.c memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE); c 465 drivers/mtd/nand/onenand/omap2.c struct omap2_onenand *c; c 482 drivers/mtd/nand/onenand/omap2.c c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL); c 483 drivers/mtd/nand/onenand/omap2.c if (!c) c 486 drivers/mtd/nand/onenand/omap2.c init_completion(&c->irq_done); c 487 drivers/mtd/nand/onenand/omap2.c init_completion(&c->dma_done); c 488 drivers/mtd/nand/onenand/omap2.c c->gpmc_cs = val; c 489 drivers/mtd/nand/onenand/omap2.c c->phys_base = res->start; c 491 drivers/mtd/nand/onenand/omap2.c c->onenand.base = devm_ioremap_resource(dev, res); c 492 drivers/mtd/nand/onenand/omap2.c if (IS_ERR(c->onenand.base)) c 493 drivers/mtd/nand/onenand/omap2.c return PTR_ERR(c->onenand.base); c 495 drivers/mtd/nand/onenand/omap2.c c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN); c 496 drivers/mtd/nand/onenand/omap2.c if (IS_ERR(c->int_gpiod)) { c 497 drivers/mtd/nand/onenand/omap2.c r = PTR_ERR(c->int_gpiod); c 504 drivers/mtd/nand/onenand/omap2.c if (c->int_gpiod) { c 505 drivers/mtd/nand/onenand/omap2.c r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod), c 507 drivers/mtd/nand/onenand/omap2.c IRQF_TRIGGER_RISING, "onenand", c); c 511 drivers/mtd/nand/onenand/omap2.c c->onenand.wait = omap2_onenand_wait; c 517 drivers/mtd/nand/onenand/omap2.c c->dma_chan = dma_request_channel(mask, NULL, NULL); c 518 drivers/mtd/nand/onenand/omap2.c if (c->dma_chan) { c 519 drivers/mtd/nand/onenand/omap2.c c->onenand.read_bufferram = omap2_onenand_read_bufferram; c 520 drivers/mtd/nand/onenand/omap2.c c->onenand.write_bufferram = omap2_onenand_write_bufferram; c 523 drivers/mtd/nand/onenand/omap2.c c->pdev = pdev; c 524 drivers/mtd/nand/onenand/omap2.c c->mtd.priv = &c->onenand; c 525 drivers/mtd/nand/onenand/omap2.c c->mtd.dev.parent = dev; c 526 drivers/mtd/nand/onenand/omap2.c mtd_set_of_node(&c->mtd, dev->of_node); c 529 drivers/mtd/nand/onenand/omap2.c c->gpmc_cs, c->phys_base, c->onenand.base, c 530 drivers/mtd/nand/onenand/omap2.c c->dma_chan ? "DMA" : "PIO"); c 532 drivers/mtd/nand/onenand/omap2.c if ((r = onenand_scan(&c->mtd, 1)) < 0) c 535 drivers/mtd/nand/onenand/omap2.c freq = omap2_onenand_get_freq(c->onenand.version_id); c 555 drivers/mtd/nand/onenand/omap2.c r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs, c 560 drivers/mtd/nand/onenand/omap2.c r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write, c 569 drivers/mtd/nand/onenand/omap2.c r = mtd_device_register(&c->mtd, NULL, 0); c 573 drivers/mtd/nand/onenand/omap2.c platform_set_drvdata(pdev, c); c 578 drivers/mtd/nand/onenand/omap2.c onenand_release(&c->mtd); c 580 drivers/mtd/nand/onenand/omap2.c if (c->dma_chan) c 581 drivers/mtd/nand/onenand/omap2.c dma_release_channel(c->dma_chan); c 588 drivers/mtd/nand/onenand/omap2.c struct omap2_onenand *c = dev_get_drvdata(&pdev->dev); c 590 drivers/mtd/nand/onenand/omap2.c onenand_release(&c->mtd); c 591 drivers/mtd/nand/onenand/omap2.c if (c->dma_chan) c 592 drivers/mtd/nand/onenand/omap2.c dma_release_channel(c->dma_chan); c 598 drivers/mtd/nand/raw/atmel/pmecc.c s16 a, b, c; c 605 drivers/mtd/nand/raw/atmel/pmecc.c c = index_of[smu[ro * num + k]]; c 606 drivers/mtd/nand/raw/atmel/pmecc.c tmp = a + (cw_len - b) + c; c 628 drivers/mtd/nand/raw/atmel/pmecc.c s16 a, b, c; c 632 drivers/mtd/nand/raw/atmel/pmecc.c c = index_of[b]; c 633 drivers/mtd/nand/raw/atmel/pmecc.c tmp = a + c; c 281 drivers/mtd/nand/raw/cafe_nand.c int c; c 284 drivers/mtd/nand/raw/cafe_nand.c for (c = 500000; c != 0; c--) { c 289 drivers/mtd/nand/raw/cafe_nand.c if (!(c % 100000)) c 295 drivers/mtd/nand/raw/cafe_nand.c command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ)); c 557 drivers/mtd/nand/raw/cafe_nand.c u8 c; c 560 drivers/mtd/nand/raw/cafe_nand.c c = 0; c 563 drivers/mtd/nand/raw/cafe_nand.c c ^= b; c 570 drivers/mtd/nand/raw/cafe_nand.c return c; c 88 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_CLOCKS(c, n, s) (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s) c 93 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_WWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 24)) c 95 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_WHOLD(c, n) (SLCTAC_CLOCKS(c, n, 20)) c 97 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_WSETUP(c, n) (SLCTAC_CLOCKS(c, n, 16)) c 101 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_RWIDTH(c, n) (SLCTAC_CLOCKS(c, n, 8)) c 103 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_RHOLD(c, n) (SLCTAC_CLOCKS(c, n, 4)) c 105 drivers/mtd/nand/raw/lpc32xx_slc.c #define SLCTAC_RSETUP(c, n) (SLCTAC_CLOCKS(c, n, 0)) c 450 drivers/mtd/nand/raw/mpc5121_nfc.c uint c = prv->column; c 454 drivers/mtd/nand/raw/mpc5121_nfc.c if (prv->spareonly || c >= mtd->writesize) { c 456 drivers/mtd/nand/raw/mpc5121_nfc.c if (c >= mtd->writesize) c 457 drivers/mtd/nand/raw/mpc5121_nfc.c c -= mtd->writesize; c 460 drivers/mtd/nand/raw/mpc5121_nfc.c mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); c 468 drivers/mtd/nand/raw/mpc5121_nfc.c l = min((uint)len, mtd->writesize - c); c 472 drivers/mtd/nand/raw/mpc5121_nfc.c memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); c 474 drivers/mtd/nand/raw/mpc5121_nfc.c memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); c 624 drivers/mtd/nand/raw/mtk_nand.c static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c) c 1376 drivers/mtd/nand/raw/sunxi_nand.c #define sunxi_nand_lookup_timing(l, p, c) \ c 1377 drivers/mtd/nand/raw/sunxi_nand.c _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) c 270 drivers/mtd/nftlcore.c foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; c 380 drivers/mtd/nftlcore.c oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS); c 381 drivers/mtd/nftlcore.c oob.u.c.unused = 0xffffffff; c 251 drivers/mtd/nftlmount.c static int memcmpb(void *a, int c, int n) c 255 drivers/mtd/nftlmount.c if (c != ((unsigned char *)a)[i]) c 76 drivers/mtd/tests/nandbiterrs.c unsigned char c; c 81 drivers/mtd/tests/nandbiterrs.c c = v & 0xFF; c 83 drivers/mtd/tests/nandbiterrs.c c = (c & 0x0F) << 4 | (c & 0xF0) >> 4; c 84 drivers/mtd/tests/nandbiterrs.c c = (c & 0x33) << 2 | (c & 0xCC) >> 2; c 85 drivers/mtd/tests/nandbiterrs.c c = (c & 0x55) << 1 | (c & 0xAA) >> 1; c 86 drivers/mtd/tests/nandbiterrs.c return c; c 1311 drivers/mtd/ubi/io.c uint8_t c = ((uint8_t *)buf)[i]; c 1315 drivers/mtd/ubi/io.c if (c == c1) c 335 drivers/net/appletalk/ltpc.c static int wait_timeout(struct net_device *dev, int c) c 344 drivers/net/appletalk/ltpc.c if ( c != inb_p(dev->base_addr+6) ) return 0; c 703 drivers/net/appletalk/ltpc.c lt_command c; c 704 drivers/net/appletalk/ltpc.c c.getflags.command = LT_GETFLAGS; c 705 drivers/net/appletalk/ltpc.c return do_read(dev, &c, sizeof(c.getflags),&c,0); c 710 drivers/net/appletalk/ltpc.c lt_command c; c 711 drivers/net/appletalk/ltpc.c c.setflags.command = LT_SETFLAGS; c 712 drivers/net/appletalk/ltpc.c c.setflags.flags = x; c 713 drivers/net/appletalk/ltpc.c return do_write(dev, &c, sizeof(c.setflags),&c,0); c 823 drivers/net/appletalk/ltpc.c struct lt_init c; c 834 drivers/net/appletalk/ltpc.c c.command = LT_INIT; c 835 drivers/net/appletalk/ltpc.c c.hint = sa->sat_addr.s_node; c 837 drivers/net/appletalk/ltpc.c aa->s_node = do_read(dev,&c,sizeof(c),&c,0); c 338 drivers/net/can/kvaser_pciefd.c int c; c 344 drivers/net/can/kvaser_pciefd.c c = tx_len; c 345 drivers/net/can/kvaser_pciefd.c while (c--) { c 357 drivers/net/can/kvaser_pciefd.c c = rx_len; c 358 drivers/net/can/kvaser_pciefd.c while (c-- > 0) { c 375 drivers/net/can/kvaser_pciefd.c if (c != -1) { c 62 drivers/net/can/peak_canfd/peak_pciefd_main.c #define PCIEFD_CANX_OFF(c) (((c) + 1) * 0x1000) c 122 drivers/net/can/sja1000/peak_pci.c #define PCA9553_LED(c) (1 << (c)) c 123 drivers/net/can/sja1000/peak_pci.c #define PCA9553_LED_STATE(s, c) ((s) << ((c) << 1)) c 125 drivers/net/can/sja1000/peak_pci.c #define PCA9553_LED_ON(c) PCA9553_LED_STATE(PCA9553_ON, c) c 126 drivers/net/can/sja1000/peak_pci.c #define PCA9553_LED_OFF(c) PCA9553_LED_STATE(PCA9553_OFF, c) c 127 drivers/net/can/sja1000/peak_pci.c #define PCA9553_LED_SLOW(c) PCA9553_LED_STATE(PCA9553_SLOW, c) c 128 drivers/net/can/sja1000/peak_pci.c #define PCA9553_LED_FAST(c) PCA9553_LED_STATE(PCA9553_FAST, c) c 129 drivers/net/can/sja1000/peak_pci.c #define PCA9553_LED_MASK(c) PCA9553_LED_STATE(0x03, c) c 398 drivers/net/can/sja1000/peak_pci.c int c = (priv->reg_base - card->reg_base) / PEAK_PCI_CHAN_SIZE; c 405 drivers/net/can/sja1000/peak_pci.c peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_ON); c 409 drivers/net/can/sja1000/peak_pci.c peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_SLOW); c 38 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CHAN_OFF(c) ((c) * PCC_CHAN_SIZE) c 60 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CCR_RST_CHAN(c) (0x01 << ((c) + 2)) c 65 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_LED(c) (1 << (c)) c 74 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CCR_LED_CHAN(s, c) ((s) << (((c) + 2) << 1)) c 76 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CCR_LED_ON_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_ON, c) c 77 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CCR_LED_FAST_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_FAST, c) c 78 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CCR_LED_SLOW_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_SLOW, c) c 79 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CCR_LED_OFF_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_OFF, c) c 80 drivers/net/can/sja1000/peak_pcmcia.c #define PCC_CCR_LED_MASK_CHAN(c) PCC_CCR_LED_OFF_CHAN(c) c 188 drivers/net/can/sja1000/peak_pcmcia.c int c = (priv->reg_base - card->ioport_addr) / PCC_CHAN_SIZE; c 195 drivers/net/can/sja1000/peak_pcmcia.c pcan_set_leds(card, PCC_LED(c), PCC_LED_ON); c 199 drivers/net/can/sja1000/peak_pcmcia.c pcan_set_leds(card, PCC_LED(c), PCC_LED_SLOW); c 1316 drivers/net/ethernet/8390/pcnet_cs.c static void copyin(void *dest, void __iomem *src, int c) c 1322 drivers/net/ethernet/8390/pcnet_cs.c if (c <= 0) c 1324 drivers/net/ethernet/8390/pcnet_cs.c odd = (c & 1); c >>= 1; c 1326 drivers/net/ethernet/8390/pcnet_cs.c if (c) { c 1327 drivers/net/ethernet/8390/pcnet_cs.c do { *d++ = __raw_readw(s++); } while (--c); c 1334 drivers/net/ethernet/8390/pcnet_cs.c static void copyout(void __iomem *dest, const void *src, int c) c 1340 drivers/net/ethernet/8390/pcnet_cs.c if (c <= 0) c 1342 drivers/net/ethernet/8390/pcnet_cs.c odd = (c & 1); c >>= 1; c 1344 drivers/net/ethernet/8390/pcnet_cs.c if (c) { c 1345 drivers/net/ethernet/8390/pcnet_cs.c do { __raw_writew(*s++, d++); } while (--c); c 4164 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c int c, old; c 4166 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c c = atomic_read(v); c 4168 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c if (unlikely(c + a >= u)) c 4171 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c old = atomic_cmpxchg((v), c, c + a); c 4172 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c if (likely(old == c)) c 4174 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c c = old; c 4192 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c int c, old; c 4194 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c c = atomic_read(v); c 4196 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c if (unlikely(c - a < u)) c 4199 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c old = atomic_cmpxchg((v), c, c - a); c 4200 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c if (likely(old == c)) c 4202 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c c = old; c 751 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c 755 drivers/net/ethernet/chelsio/cxgb/cxgb2.c adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; c 756 drivers/net/ethernet/chelsio/cxgb/cxgb2.c adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; c 757 drivers/net/ethernet/chelsio/cxgb/cxgb2.c adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; c 762 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c 766 drivers/net/ethernet/chelsio/cxgb/cxgb2.c c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; c 767 drivers/net/ethernet/chelsio/cxgb/cxgb2.c c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; c 768 drivers/net/ethernet/chelsio/cxgb/cxgb2.c c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; c 2001 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c 2009 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) c 2015 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c qsp->coalesce_usecs = c->rx_coalesce_usecs; c 2022 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c 2028 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c c->rx_coalesce_usecs = q->coalesce_usecs; c 918 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c unsigned int i, c, left, val, offset = addr & 0xff; c 929 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c for (left = n; left; left -= c) { c 930 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c c = min(left, 4U); c 931 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c for (val = 0, i = 0; i < c; ++i) c 934 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c ret = sf1_write(adapter, c, c != left, val); c 289 drivers/net/ethernet/chelsio/cxgb3/xgmac.c int hash = 0, octet, bit, i = 0, c; c 292 drivers/net/ethernet/chelsio/cxgb3/xgmac.c for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) { c 293 drivers/net/ethernet/chelsio/cxgb3/xgmac.c hash ^= (c & 1) << i; c 21 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key) c 23 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c unsigned int clipt_size_half = c->clipt_size / 2; c 48 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c struct fw_clip_cmd c; c 50 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c memset(&c, 0, sizeof(c)); c 51 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | c 53 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c)); c 54 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); c 55 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); c 56 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); c 63 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c struct fw_clip_cmd c; c 65 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c memset(&c, 0, sizeof(c)); c 66 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | c 68 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c)); c 69 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); c 70 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); c 71 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); c 1281 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c char c = '\n', s[256]; c 1297 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c &data[7], &c) < 8 || c != '\n') c 1384 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c static unsigned int xdigit2int(unsigned char c) c 1386 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10; c 55 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h static inline unsigned int hex2val(char c) c 57 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10; c 1096 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c 1102 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c c->rx_coalesce_usecs = qtimer_val(adap, rq); c 1103 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ? c 1105 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); c 1106 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev); c 1107 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c c->tx_coalesce_usecs = get_dbqtimer(dev); c 3705 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) c 3721 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c memset(c, 0, sizeof(*c)); c 3722 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | c 3724 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); c 3725 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); c 3729 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | c 3731 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); c 4969 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct fw_caps_config_cmd c; c 4996 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (adap_init1(adap, &c)) c 130 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 133 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 134 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 138 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 139 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.init.sc = FW_PTP_SC_RXTIME_STAMP; c 140 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.init.mode = cpu_to_be16(mode); c 142 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 151 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 154 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 155 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 159 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 160 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.init.sc = FW_PTP_SC_TX_TYPE; c 161 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.init.mode = cpu_to_be16(PTP_TS_NONE); c 163 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 175 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 178 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 179 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 184 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 185 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.init.sc = FW_PTP_SC_RDRX_TYPE; c 186 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.init.txchan = pi->tx_chan; c 187 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.init.absid = cpu_to_be16(receive_q->rspq.abs_id); c 189 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 207 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 210 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 211 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 215 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 216 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.sc = FW_PTP_SC_ADJ_FREQ; c 217 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.sign = (ppb < 0) ? 1 : 0; c 220 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.ppb = cpu_to_be32(ppb); c 222 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 239 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 242 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 243 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 247 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 248 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.sc = FW_PTP_SC_ADJ_FTIME; c 249 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.sign = (delta < 0) ? 1 : 0; c 252 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.tm = cpu_to_be64(delta); c 254 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 273 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 281 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 282 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 286 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 287 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.sc = FW_PTP_SC_ADJ_TIME; c 288 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.sign = (delta < 0) ? 1 : 0; c 291 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.tm = cpu_to_be64(delta); c 293 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 340 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 344 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 345 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 349 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 350 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.sc = FW_PTP_SC_SET_TIME; c 353 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.ts.tm = cpu_to_be64(ns); c 355 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 365 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c struct fw_ptp_cmd c; c 368 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c memset(&c, 0, sizeof(c)); c 369 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | c 373 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); c 374 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; c 376 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL); c 3016 drivers/net/ethernet/chelsio/cxgb4/sge.c __sum16 c = (__force __sum16)pkt->csum; c 3017 drivers/net/ethernet/chelsio/cxgb4/sge.c skb->csum = csum_unfold(c); c 3517 drivers/net/ethernet/chelsio/cxgb4/sge.c struct fw_iq_cmd c; c 3531 drivers/net/ethernet/chelsio/cxgb4/sge.c memset(&c, 0, sizeof(c)); c 3532 drivers/net/ethernet/chelsio/cxgb4/sge.c c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | c 3535 drivers/net/ethernet/chelsio/cxgb4/sge.c c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | c 3536 drivers/net/ethernet/chelsio/cxgb4/sge.c FW_LEN16(c)); c 3537 drivers/net/ethernet/chelsio/cxgb4/sge.c c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | c 3543 drivers/net/ethernet/chelsio/cxgb4/sge.c c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | c 3547 drivers/net/ethernet/chelsio/cxgb4/sge.c c.iqsize = htons(iq->size); c 3548 drivers/net/ethernet/chelsio/cxgb4/sge.c c.iqaddr = cpu_to_be64(iq->phys_addr); c 3550 drivers/net/ethernet/chelsio/cxgb4/sge.c c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F | c 3576 drivers/net/ethernet/chelsio/cxgb4/sge.c c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | c 3581 drivers/net/ethernet/chelsio/cxgb4/sge.c c.iqns_to_fl0congen |= c 3593 drivers/net/ethernet/chelsio/cxgb4/sge.c c.fl0dcaen_to_fl0cidxfthresh = c 3600 drivers/net/ethernet/chelsio/cxgb4/sge.c c.fl0size = htons(flsz); c 3601 drivers/net/ethernet/chelsio/cxgb4/sge.c c.fl0addr = cpu_to_be64(fl->addr); c 3604 drivers/net/ethernet/chelsio/cxgb4/sge.c ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); c 3613 drivers/net/ethernet/chelsio/cxgb4/sge.c iq->cntxt_id = ntohs(c.iqid); c 3614 drivers/net/ethernet/chelsio/cxgb4/sge.c iq->abs_id = ntohs(c.physiqid); c 3633 drivers/net/ethernet/chelsio/cxgb4/sge.c fl->cntxt_id = ntohs(c.fl0id); c 3735 drivers/net/ethernet/chelsio/cxgb4/sge.c struct fw_eq_eth_cmd c; c 3748 drivers/net/ethernet/chelsio/cxgb4/sge.c memset(&c, 0, sizeof(c)); c 3749 drivers/net/ethernet/chelsio/cxgb4/sge.c c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | c 3753 drivers/net/ethernet/chelsio/cxgb4/sge.c c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | c 3754 drivers/net/ethernet/chelsio/cxgb4/sge.c FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); c 3762 drivers/net/ethernet/chelsio/cxgb4/sge.c c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | c 3765 drivers/net/ethernet/chelsio/cxgb4/sge.c c.fetchszm_to_iqid = c 3771 drivers/net/ethernet/chelsio/cxgb4/sge.c c.dcaen_to_eqsize = c 3779 drivers/net/ethernet/chelsio/cxgb4/sge.c c.eqaddr = cpu_to_be64(txq->q.phys_addr); c 3788 drivers/net/ethernet/chelsio/cxgb4/sge.c c.timeren_timerix = c 3792 drivers/net/ethernet/chelsio/cxgb4/sge.c ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); c 3804 drivers/net/ethernet/chelsio/cxgb4/sge.c init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); c 3820 drivers/net/ethernet/chelsio/cxgb4/sge.c struct fw_eq_ctrl_cmd c; c 3832 drivers/net/ethernet/chelsio/cxgb4/sge.c c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | c 3836 drivers/net/ethernet/chelsio/cxgb4/sge.c c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | c 3837 drivers/net/ethernet/chelsio/cxgb4/sge.c FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); c 3838 drivers/net/ethernet/chelsio/cxgb4/sge.c c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); c 3839 drivers/net/ethernet/chelsio/cxgb4/sge.c c.physeqid_pkd = htonl(0); c 3840 drivers/net/ethernet/chelsio/cxgb4/sge.c c.fetchszm_to_iqid = c 3844 drivers/net/ethernet/chelsio/cxgb4/sge.c c.dcaen_to_eqsize = c 3851 drivers/net/ethernet/chelsio/cxgb4/sge.c c.eqaddr = cpu_to_be64(txq->q.phys_addr); c 3853 drivers/net/ethernet/chelsio/cxgb4/sge.c ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); c 3863 drivers/net/ethernet/chelsio/cxgb4/sge.c init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); c 3889 drivers/net/ethernet/chelsio/cxgb4/sge.c struct fw_eq_ofld_cmd c; c 3904 drivers/net/ethernet/chelsio/cxgb4/sge.c memset(&c, 0, sizeof(c)); c 3907 drivers/net/ethernet/chelsio/cxgb4/sge.c c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F | c 3911 drivers/net/ethernet/chelsio/cxgb4/sge.c c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | c 3912 drivers/net/ethernet/chelsio/cxgb4/sge.c FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); c 3913 drivers/net/ethernet/chelsio/cxgb4/sge.c c.fetchszm_to_iqid = c 3917 drivers/net/ethernet/chelsio/cxgb4/sge.c c.dcaen_to_eqsize = c 3924 drivers/net/ethernet/chelsio/cxgb4/sge.c c.eqaddr = cpu_to_be64(txq->q.phys_addr); c 3926 drivers/net/ethernet/chelsio/cxgb4/sge.c ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); c 3938 drivers/net/ethernet/chelsio/cxgb4/sge.c init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); c 3084 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c unsigned int i, c, left, val, offset = addr & 0xff; c 3095 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (left = n; left; left -= c) { c 3096 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c = min(left, 4U); c 3097 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (val = 0, i = 0; i < c; ++i) c 3100 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = sf1_write(adapter, c, c != left, 1, val); c 3463 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int k, int c) c 3472 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (k > c) { c 3482 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), c 3483 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, c 3546 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c uint32_t d, c, k; c 3549 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c = be32_to_cpu(card_fw->fw_ver); c 3559 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), c 3560 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), c 3880 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_params_cmd c; c 3882 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 3883 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = c 3888 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c 3889 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.param[0].mnem = c 3892 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.param[0].val = cpu_to_be32(op); c 3894 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); c 4276 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_port_cmd c; c 4278 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 4279 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | c 4282 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.action_to_len16 = c 4286 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c FW_LEN16(c)); c 4288 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); c 4290 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG); c 4291 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 5218 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_rss_glb_config_cmd c; c 5220 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 5221 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | c 5223 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c 5225 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.manual.mode_pkd = c 5228 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.basicvirtual.mode_pkd = c 5230 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); c 5233 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); c 5249 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_rss_vi_config_cmd c; c 5251 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 5252 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | c 5255 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c 5256 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | c 5258 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); c 5315 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_ldst_cmd c; c 5318 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 5319 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | c 5324 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c 5326 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.addrval.addr = cpu_to_be32(start_index + i); c 5327 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); c 5328 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, c 5334 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c vals[i] = be32_to_cpu(c.u.addrval.val); c 6526 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_ldst_cmd c; c 6528 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 6530 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | c 6534 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c 6535 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.addrval.addr = cpu_to_be32(addr); c 6536 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.addrval.val = cpu_to_be32(val); c 6538 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 6557 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_ldst_cmd c; c 6559 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 6561 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | c 6564 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c 6565 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) | c 6567 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.mdio.raddr = cpu_to_be16(reg); c 6569 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); c 6571 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c *valp = be16_to_cpu(c.u.mdio.rval); c 6590 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_ldst_cmd c; c 6592 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 6594 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | c 6597 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c 6598 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) | c 6600 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.mdio.raddr = cpu_to_be16(reg); c 6601 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.mdio.rval = cpu_to_be16(val); c 6603 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 6784 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_ldst_cmd c; c 6786 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 6790 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | c 6793 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c 6794 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F); c 6796 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); c 6856 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_hello_cmd c; c 6862 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 6863 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c INIT_CMD(c, HELLO, WRITE); c 6864 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.err_to_clearinit = cpu_to_be32( c 6880 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); c 6889 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c v = be32_to_cpu(c.err_to_clearinit); c 6980 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_bye_cmd c; c 6982 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 6983 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c INIT_CMD(c, BYE, WRITE); c 6984 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 6997 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_initialize_cmd c; c 6999 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7000 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c INIT_CMD(c, INITIALIZE, WRITE); c 7001 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 7014 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_reset_cmd c; c 7016 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7017 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c INIT_CMD(c, RESET, WRITE); c 7018 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.val = cpu_to_be32(reset); c 7019 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 7047 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_reset_cmd c; c 7049 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7050 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c INIT_CMD(c, RESET, WRITE); c 7051 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F); c 7052 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F); c 7053 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 7434 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_initialize_cmd c; c 7436 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7437 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c INIT_CMD(c, INITIALIZE, WRITE); c 7438 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 7461 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_params_cmd c; c 7462 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c __be32 *p = &c.param[0].mnem; c 7467 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7468 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | c 7472 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c 7481 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); c 7483 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) c 7523 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_params_cmd c; c 7524 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c __be32 *p = &c.param[0].mnem; c 7529 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7530 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | c 7534 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c 7541 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); c 7592 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_pfvf_cmd c; c 7594 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7595 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F | c 7598 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c 7599 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) | c 7601 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) | c 7604 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) | c 7607 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) | c 7610 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 7635 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_cmd c; c 7637 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7638 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F | c 7641 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c)); c 7642 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.portid_pkd = FW_VI_CMD_PORTID_V(port); c 7643 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.nmac = nmac - 1; c 7645 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); c 7650 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memcpy(mac, c.mac, sizeof(c.mac)); c 7653 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); c 7656 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); c 7659 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); c 7662 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); c 7666 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd)); c 7669 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16)); c 7672 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16)); c 7674 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid)); c 7690 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_cmd c; c 7692 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7693 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | c 7698 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c)); c 7699 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); c 7701 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); c 7722 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_rxmode_cmd c; c 7736 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7737 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | c 7740 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c 7741 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.mtu_to_vlanexen = c 7747 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); c 7766 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 7770 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7771 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 7776 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | c 7779 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c p = c.u.exact; c 7783 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); c 7806 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 7807 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_raw *p = &c.u.raw; c 7810 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7811 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 7817 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | c 7834 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); c 7858 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 7859 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_vni *p = c.u.exact_vni; c 7863 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7864 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 7869 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = cpu_to_be32(val); c 7880 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); c 7906 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 7907 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_raw *p = &c.u.raw; c 7910 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7911 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 7916 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = cpu_to_be32(val); c 7932 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); c 7969 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 7978 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ? c 7979 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c rem : ARRAY_SIZE(c.u.exact)); c 7985 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 7986 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 7991 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = c 7995 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { c 8008 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); c 8012 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { c 8054 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 8065 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) c 8067 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c : ARRAY_SIZE(c.u.exact)); c 8073 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8074 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 8079 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = c 8083 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { c 8090 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); c 8094 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { c 8134 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 8135 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_exact *p = c.u.exact; c 8142 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8143 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 8146 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1)); c 8152 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); c 8160 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c (be32_to_cpu(c.op_to_viid)); c 8192 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_mac_cmd c; c 8194 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8195 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | c 8198 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | c 8201 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.hash.hashvec = cpu_to_be64(vec); c 8202 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); c 8220 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_enable_cmd c; c 8222 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8223 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | c 8226 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | c 8229 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c FW_LEN16(c)); c 8230 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); c 8289 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_vi_enable_cmd c; c 8291 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8292 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | c 8295 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); c 8296 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.blinkdur = cpu_to_be16(nblinks); c 8297 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 8319 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_iq_cmd c; c 8321 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8322 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | c 8325 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c)); c 8326 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); c 8327 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.iqid = cpu_to_be16(iqid); c 8328 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.fl0id = cpu_to_be16(fl0id); c 8329 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.fl1id = cpu_to_be16(fl1id); c 8330 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 8350 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_iq_cmd c; c 8352 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8353 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | c 8356 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c)); c 8357 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); c 8358 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.iqid = cpu_to_be16(iqid); c 8359 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.fl0id = cpu_to_be16(fl0id); c 8360 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.fl1id = cpu_to_be16(fl1id); c 8361 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 8377 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_eq_eth_cmd c; c 8379 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8380 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | c 8384 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c)); c 8385 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); c 8386 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 8402 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_eq_ctrl_cmd c; c 8404 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8405 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | c 8409 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c)); c 8410 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid)); c 8411 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 8427 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_eq_ofld_cmd c; c 8429 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 8430 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | c 8434 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c)); c 8435 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid)); c 8436 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); c 10301 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c struct fw_ldst_cmd c; c 10309 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c memset(&c, 0, sizeof(c)); c 10310 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | c 10313 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); c 10314 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c c.u.idctxt.physid = cpu_to_be32(cid); c 10316 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); c 10318 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); c 10319 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); c 10320 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); c 10321 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); c 10322 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); c 10323 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); c 827 drivers/net/ethernet/chelsio/cxgb4/t4_msg.h struct cpl_tx_pkt_core c; c 939 drivers/net/ethernet/chelsio/cxgb4/t4_msg.h struct cpl_tx_pkt_lso_core c; c 1658 drivers/net/ethernet/chelsio/cxgb4vf/sge.c __sum16 c = (__force __sum16)pkt->csum; c 1659 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb->csum = csum_unfold(c); c 184 drivers/net/ethernet/cisco/enic/enic_ethtool.c struct vnic_enet_config *c = &enic->config; c 187 drivers/net/ethernet/cisco/enic/enic_ethtool.c ring->rx_pending = c->rq_desc_count; c 189 drivers/net/ethernet/cisco/enic/enic_ethtool.c ring->tx_pending = c->wq_desc_count; c 196 drivers/net/ethernet/cisco/enic/enic_ethtool.c struct vnic_enet_config *c = &enic->config; c 212 drivers/net/ethernet/cisco/enic/enic_ethtool.c rx_pending = c->rq_desc_count; c 213 drivers/net/ethernet/cisco/enic/enic_ethtool.c tx_pending = c->wq_desc_count; c 230 drivers/net/ethernet/cisco/enic/enic_ethtool.c c->rq_desc_count = c 232 drivers/net/ethernet/cisco/enic/enic_ethtool.c c->wq_desc_count = c 250 drivers/net/ethernet/cisco/enic/enic_ethtool.c c->rq_desc_count = rx_pending; c 251 drivers/net/ethernet/cisco/enic/enic_ethtool.c c->wq_desc_count = tx_pending; c 44 drivers/net/ethernet/cisco/enic/enic_res.c struct vnic_enet_config *c = &enic->config; c 58 drivers/net/ethernet/cisco/enic/enic_res.c sizeof(c->m), &c->m); \ c 76 drivers/net/ethernet/cisco/enic/enic_res.c c->wq_desc_count = c 79 drivers/net/ethernet/cisco/enic/enic_res.c c->wq_desc_count)); c 80 drivers/net/ethernet/cisco/enic/enic_res.c c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c 82 drivers/net/ethernet/cisco/enic/enic_res.c c->rq_desc_count = c 85 drivers/net/ethernet/cisco/enic/enic_res.c c->rq_desc_count)); c 86 drivers/net/ethernet/cisco/enic/enic_res.c c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c 88 drivers/net/ethernet/cisco/enic/enic_res.c if (c->mtu == 0) c 89 drivers/net/ethernet/cisco/enic/enic_res.c c->mtu = 1500; c 90 drivers/net/ethernet/cisco/enic/enic_res.c c->mtu = min_t(u16, ENIC_MAX_MTU, c 92 drivers/net/ethernet/cisco/enic/enic_res.c c->mtu)); c 94 drivers/net/ethernet/cisco/enic/enic_res.c c->intr_timer_usec = min_t(u32, c->intr_timer_usec, c 99 drivers/net/ethernet/cisco/enic/enic_res.c enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); c 109 drivers/net/ethernet/cisco/enic/enic_res.c c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : c 110 drivers/net/ethernet/cisco/enic/enic_res.c c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : c 111 drivers/net/ethernet/cisco/enic/enic_res.c c->intr_mode == VENET_INTR_MODE_ANY ? "any" : c 113 drivers/net/ethernet/cisco/enic/enic_res.c c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : c 114 drivers/net/ethernet/cisco/enic/enic_res.c c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : c 116 drivers/net/ethernet/cisco/enic/enic_res.c c->intr_timer_usec, c 117 drivers/net/ethernet/cisco/enic/enic_res.c c->loop_tag); c 613 drivers/net/ethernet/cortina/gemini.c unsigned int c = txq->cptr; c 625 drivers/net/ethernet/cortina/gemini.c if (c == r) c 628 drivers/net/ethernet/cortina/gemini.c while (c != r) { c 629 drivers/net/ethernet/cortina/gemini.c txd = txq->ring + c; c 639 drivers/net/ethernet/cortina/gemini.c dev_kfree_skb(txq->skb[c]); c 641 drivers/net/ethernet/cortina/gemini.c c++; c 642 drivers/net/ethernet/cortina/gemini.c c &= m; c 676 drivers/net/ethernet/cortina/gemini.c txq->cptr = c; c 533 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c struct ethtool_coalesce *c) c 543 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c c->rx_coalesce_usecs = period; c 544 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c c->rx_max_coalesced_frames = thresh; c 545 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c c->use_adaptive_rx_coalesce = false; c 551 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c struct ethtool_coalesce *c) c 560 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c if (c->use_adaptive_rx_coalesce) c 563 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c period = c->rx_coalesce_usecs; c 564 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c thresh = c->rx_max_coalesced_frames; c 1055 drivers/net/ethernet/google/gve/gve_main.c const char *c = gve_version_prefix; c 1057 drivers/net/ethernet/google/gve/gve_main.c while (*c) { c 1058 drivers/net/ethernet/google/gve/gve_main.c writeb(*c, driver_version_register); c 1059 drivers/net/ethernet/google/gve/gve_main.c c++; c 1062 drivers/net/ethernet/google/gve/gve_main.c c = gve_version_str; c 1063 drivers/net/ethernet/google/gve/gve_main.c while (*c) { c 1064 drivers/net/ethernet/google/gve/gve_main.c writeb(*c, driver_version_register); c 1065 drivers/net/ethernet/google/gve/gve_main.c c++; c 933 drivers/net/ethernet/hisilicon/hns/hns_enet.c int c = ring->next_to_clean; c 939 drivers/net/ethernet/hisilicon/hns/hns_enet.c assert(c > 0 && c < ring->desc_num); c 940 drivers/net/ethernet/hisilicon/hns/hns_enet.c assert(u != c && h != c); /* must be checked before call this func */ c 942 drivers/net/ethernet/hisilicon/hns/hns_enet.c return u > c ? (h > c && h <= u) : (h > c || h <= u); c 2322 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c int c = ring->next_to_clean; c 2327 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return u > c ? (h > c && h <= u) : (h > c || h <= u); c 392 drivers/net/ethernet/i825xx/82596.c static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) c 397 drivers/net/ethernet/i825xx/82596.c p->porthi = ((c) | (u32) (x)) & 0xffff; c 398 drivers/net/ethernet/i825xx/82596.c p->portlo = ((c) | (u32) (x)) >> 16; c 403 drivers/net/ethernet/i825xx/82596.c u32 v = (u32) (c) | (u32) (x); c 443 drivers/net/ethernet/i825xx/82596.c volatile struct i596_cmd *c = cmd; c 445 drivers/net/ethernet/i825xx/82596.c while (--delcnt && c->command) c 130 drivers/net/ethernet/i825xx/lasi_82596.c static void mpu_port(struct net_device *dev, int c, dma_addr_t x) c 134 drivers/net/ethernet/i825xx/lasi_82596.c u32 v = (u32) (c) | (u32) (x); c 358 drivers/net/ethernet/i825xx/lib82596.c static void mpu_port(struct net_device *dev, int c, dma_addr_t x); c 58 drivers/net/ethernet/i825xx/sni_82596.c static void mpu_port(struct net_device *dev, int c, dma_addr_t x) c 62 drivers/net/ethernet/i825xx/sni_82596.c u32 v = (u32) (c) | (u32) (x); c 1075 drivers/net/ethernet/intel/e100.c u8 *c = (u8 *)config; c 1154 drivers/net/ethernet/intel/e100.c c + 0); c 1156 drivers/net/ethernet/intel/e100.c c + 8); c 1158 drivers/net/ethernet/intel/e100.c c + 16); c 214 drivers/net/ethernet/intel/e1000e/netdev.c __le64 c; c 381 drivers/net/ethernet/intel/e1000e/netdev.c (unsigned long long)le64_to_cpu(u1->c), c 389 drivers/net/ethernet/intel/e1000e/netdev.c (unsigned long long)le64_to_cpu(u1->c), c 1585 drivers/net/ethernet/marvell/mv643xx_eth.c struct ethtool_link_ksettings c = *cmd; c 1596 drivers/net/ethernet/marvell/mv643xx_eth.c c.link_modes.advertising); c 1598 drivers/net/ethernet/marvell/mv643xx_eth.c ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising, c 1601 drivers/net/ethernet/marvell/mv643xx_eth.c ret = phy_ethtool_ksettings_set(dev->phydev, &c); c 3980 drivers/net/ethernet/marvell/mvneta.c struct ethtool_coalesce *c) c 3987 drivers/net/ethernet/marvell/mvneta.c rxq->time_coal = c->rx_coalesce_usecs; c 3988 drivers/net/ethernet/marvell/mvneta.c rxq->pkts_coal = c->rx_max_coalesced_frames; c 3995 drivers/net/ethernet/marvell/mvneta.c txq->done_pkts_coal = c->tx_max_coalesced_frames; c 4004 drivers/net/ethernet/marvell/mvneta.c struct ethtool_coalesce *c) c 4008 drivers/net/ethernet/marvell/mvneta.c c->rx_coalesce_usecs = pp->rxqs[0].time_coal; c 4009 drivers/net/ethernet/marvell/mvneta.c c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; c 4011 drivers/net/ethernet/marvell/mvneta.c c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; c 4040 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c struct ethtool_coalesce *c) c 4048 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c rxq->time_coal = c->rx_coalesce_usecs; c 4049 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c rxq->pkts_coal = c->rx_max_coalesced_frames; c 4055 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c port->tx_time_coal = c->tx_coalesce_usecs; c 4062 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c txq->done_pkts_coal = c->tx_max_coalesced_frames; c 4073 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c struct ethtool_coalesce *c) c 4077 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c c->rx_coalesce_usecs = port->rxqs[0]->time_coal; c 4078 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; c 4079 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; c 4080 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c c->tx_coalesce_usecs = port->tx_time_coal; c 181 drivers/net/ethernet/marvell/octeontx2/af/common.h #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) c 460 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h #define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \ c 461 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h (0x100000 | (a) << 14 | (b) << 6 | (c) << 3) c 468 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h #define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \ c 469 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3) c 470 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h #define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \ c 471 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h (0x980000 | (a) << 16 | (b) << 12 | (c) << 3) c 472 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h #define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \ c 473 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3) c 474 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h #define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \ c 475 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3) c 476 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h #define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \ c 477 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3) c 746 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_channel **c; c 1011 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, c 1019 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, c 1022 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, c 1028 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, c 317 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c generic_rq = &priv->channels.c[0]->rq; c 358 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c struct mlx5e_rq *rq = &priv->channels.c[i]->rq; c 257 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_channel *c = priv->channels.c[i]; c 260 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_txqsq *sq = &c->sq[tc]; c 492 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq = &priv->channels.c[sq_num]->xdpsq; c 66 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c struct mlx5e_channel *c) c 81 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq); c 85 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq); c 89 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq); c 99 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true); c 103 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq); c 110 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq); c 116 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c spin_lock_init(&c->xskicosq_lock); c 118 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); c 123 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskicosq.cq); c 126 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_xdpsq(&c->xsksq); c 129 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xsksq.cq); c 132 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_rq(&c->xskrq); c 135 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskrq.cq); c 143 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c void mlx5e_close_xsk(struct mlx5e_channel *c) c 145 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); c 146 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c napi_synchronize(&c->napi); c 149 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_rq(&c->xskrq); c 150 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskrq.cq); c 151 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_icosq(&c->xskicosq); c 152 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskicosq.cq); c 153 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_xdpsq(&c->xsksq); c 154 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xsksq.cq); c 156 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c memset(&c->xskrq, 0, sizeof(c->xskrq)); c 157 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c memset(&c->xsksq, 0, sizeof(c->xsksq)); c 158 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c memset(&c->xskicosq, 0, sizeof(c->xskicosq)); c 161 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c void mlx5e_activate_xsk(struct mlx5e_channel *c) c 163 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_activate_icosq(&c->xskicosq); c 164 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); c 167 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c spin_lock(&c->xskicosq_lock); c 168 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_trigger_irq(&c->xskicosq); c 169 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c spin_unlock(&c->xskicosq_lock); c 172 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c void mlx5e_deactivate_xsk(struct mlx5e_channel *c) c 174 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_deactivate_rq(&c->xskrq); c 176 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_deactivate_icosq(&c->xskicosq); c 193 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) c 195 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); c 211 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c struct mlx5e_channel *c = chs->c[i]; c 213 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) c 216 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); c 225 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) c 242 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) c 16 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h struct mlx5e_channel *c); c 17 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h void mlx5e_close_xsk(struct mlx5e_channel *c); c 18 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h void mlx5e_activate_xsk(struct mlx5e_channel *c); c 19 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h void mlx5e_deactivate_xsk(struct mlx5e_channel *c); c 20 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c); c 14 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c struct mlx5e_channel *c; c 23 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c c = priv->channels.c[ix]; c 25 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))) c 28 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c if (!napi_if_scheduled_mark_missed(&c->napi)) { c 33 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) c 36 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state)) c 39 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c spin_lock(&c->xskicosq_lock); c 40 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c mlx5e_trigger_irq(&c->xskicosq); c 41 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c spin_unlock(&c->xskicosq_lock); c 107 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c struct mlx5e_channel *c; c 138 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c c = priv->channels.c[ix]; c 140 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c err = mlx5e_open_xsk(priv, params, &xsk, umem, c); c 144 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c mlx5e_activate_xsk(c); c 150 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]); c 157 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c mlx5e_deactivate_xsk(c); c 158 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c mlx5e_close_xsk(c); c 184 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c struct mlx5e_channel *c; c 196 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c c = priv->channels.c[ix]; c 198 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c mlx5e_deactivate_xsk(c); c 199 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c mlx5e_close_xsk(c); c 521 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c struct mlx5e_channel *c = priv->channels.c[i]; c 523 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c for (tc = 0; tc < c->num_tc; tc++) { c 525 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c &c->sq[tc].cq.mcq, c 530 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, c 1850 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c struct mlx5e_channel *c; c 1858 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c c = channels->c[i]; c 1860 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); c 1862 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); c 252 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c) c 258 drivers/net/ethernet/mellanox/mlx5/core/en_main.c GFP_KERNEL, cpu_to_node(c->cpu)); c 262 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe); c 373 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_rq(struct mlx5e_channel *c, c 381 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->mdev; c 391 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rqp->wq.db_numa_node = cpu_to_node(c->cpu); c 394 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->pdev = c->pdev; c 395 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->netdev = c->netdev; c 396 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->tstamp = c->tstamp; c 398 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->channel = c; c 399 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->ix = c->ix; c 402 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->xdpsq = &c->rq_xdpsq; c 406 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->stats = &c->priv->channel_stats[c->ix].xskrq; c 408 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->stats = &c->priv->channel_stats[c->ix].rq; c 451 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; c 455 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); c 461 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err); c 480 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_rq_alloc_mpwqe_info(rq, c); c 501 drivers/net/ethernet/mellanox/mlx5/core/en_main.c GFP_KERNEL, cpu_to_node(c->cpu)); c 507 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_init_di_list(rq, wq_sz, c->cpu); c 515 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->priv->ipsec) c 519 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; c 522 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); c 531 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->mkey_be = c->mkey_be; c 551 drivers/net/ethernet/mellanox/mlx5/core/en_main.c pp_params.nid = cpu_to_node(c->cpu); c 552 drivers/net/ethernet/mellanox/mlx5/core/en_main.c pp_params.dev = c->pdev; c 743 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = rq->channel; c 744 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_priv *priv = c->priv; c 774 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = rq->channel; c 775 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->mdev; c 809 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = rq->channel; c 820 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", c 821 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); c 882 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, c 888 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq); c 900 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full)) c 901 drivers/net/ethernet/mellanox/mlx5/core/en_main.c __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state); c 904 drivers/net/ethernet/mellanox/mlx5/core/en_main.c __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); c 910 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) c 911 drivers/net/ethernet/mellanox/mlx5/core/en_main.c __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); c 988 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, c 996 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->mdev; c 1000 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->pdev = c->pdev; c 1001 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->mkey_be = c->mkey_be; c 1002 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->channel = c; c 1009 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &c->priv->channel_stats[c->ix].xsksq : c 1011 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &c->priv->channel_stats[c->ix].xdpsq : c 1012 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &c->priv->channel_stats[c->ix].rq_xdpsq; c 1014 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); c 1020 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); c 1064 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_icosq(struct mlx5e_channel *c, c 1069 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->mdev; c 1073 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->channel = c; c 1076 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); c 1082 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); c 1130 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, c 1138 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->mdev; c 1142 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->pdev = c->pdev; c 1143 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->tstamp = c->tstamp; c 1145 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->mkey_be = c->mkey_be; c 1146 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->channel = c; c 1147 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->ch_ix = c->ix; c 1152 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; c 1157 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (MLX5_IPSEC_DEV(c->priv->mdev)) c 1160 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (mlx5_accel_is_tls_device(c->priv->mdev)) { c 1168 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); c 1174 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); c 1308 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_open_txqsq(struct mlx5e_channel *c, c 1320 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); c 1329 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); c 1333 drivers/net/ethernet/mellanox/mlx5/core/en_main.c tx_rate = c->priv->tx_rates[sq->txq_ix]; c 1335 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); c 1365 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; c 1370 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_synchronize(&c->napi); c 1391 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; c 1392 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->mdev; c 1414 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, c 1420 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_icosq(c, param, sq); c 1427 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); c 1446 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = icosq->channel; c 1449 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_synchronize(&c->napi); c 1454 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; c 1456 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_sq(c->mdev, sq->sqn); c 1460 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, c 1467 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect); c 1472 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ c 1477 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); c 1523 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; c 1526 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_synchronize(&c->napi); c 1528 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_sq(c->mdev, sq->sqn); c 1573 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_cq(struct mlx5e_channel *c, c 1577 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->priv->mdev; c 1580 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.buf_numa_node = cpu_to_node(c->cpu); c 1581 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); c 1582 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->eq_ix = c->ix; c 1586 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cq->napi = &c->napi; c 1587 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cq->channel = c; c 1651 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, c 1654 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = c->mdev; c 1657 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_cq(c, param, cq); c 1681 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, c 1688 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) { c 1689 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_cq(c, params->tx_cq_moderation, c 1690 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &cparam->tx_cq, &c->sq[tc].cq); c 1699 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); c 1704 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) c 1708 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) c 1709 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); c 1712 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_open_sqs(struct mlx5e_channel *c, c 1719 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int txq_ix = c->ix + tc * params->num_channels; c 1721 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, c 1722 drivers/net/ethernet/mellanox/mlx5/core/en_main.c params, &cparam->sq, &c->sq[tc], tc); c 1731 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_txqsq(&c->sq[tc]); c 1736 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_close_sqs(struct mlx5e_channel *c) c 1740 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) c 1741 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_txqsq(&c->sq[tc]); c 1825 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c, c 1828 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int num_comp_vectors = mlx5_comp_vectors_count(c->mdev); c 1831 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL)) c 1834 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) { c 1835 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq)); c 1837 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cpumask_set_cpu(cpu, c->xps_cpumask); c 1843 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c) c 1845 drivers/net/ethernet/mellanox/mlx5/core/en_main.c free_cpumask_var(c->xps_cpumask); c 1848 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_open_queues(struct mlx5e_channel *c, c 1855 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); c 1859 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_tx_cqs(c, params, cparam); c 1863 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq); c 1867 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); c 1872 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, c 1873 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &cparam->tx_cq, &c->rq_xdpsq.cq) : 0; c 1877 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_enable(&c->napi); c 1879 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); c 1883 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_sqs(c, params, cparam); c 1887 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) { c 1888 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, c 1889 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &c->rq_xdpsq, false); c 1894 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq); c 1898 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true); c 1905 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_rq(&c->rq); c 1908 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) c 1909 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_xdpsq(&c->rq_xdpsq); c 1912 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_sqs(c); c 1915 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_icosq(&c->icosq); c 1918 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_disable(&c->napi); c 1920 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) c 1921 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq_xdpsq.cq); c 1924 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq.cq); c 1927 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->xdpsq.cq); c 1930 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_tx_cqs(c); c 1933 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->icosq.cq); c 1938 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_close_queues(struct mlx5e_channel *c) c 1940 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_xdpsq(&c->xdpsq); c 1941 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_rq(&c->rq); c 1942 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) c 1943 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_xdpsq(&c->rq_xdpsq); c 1944 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_sqs(c); c 1945 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_icosq(&c->icosq); c 1946 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_disable(&c->napi); c 1947 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) c 1948 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq_xdpsq.cq); c 1949 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq.cq); c 1950 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->xdpsq.cq); c 1951 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_tx_cqs(c); c 1952 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->icosq.cq); c 1971 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c; c 1980 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); c 1981 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (!c) c 1984 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->priv = priv; c 1985 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->mdev = priv->mdev; c 1986 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->tstamp = &priv->tstamp; c 1987 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->ix = ix; c 1988 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->cpu = cpu; c 1989 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->pdev = priv->mdev->device; c 1990 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->netdev = priv->netdev; c 1991 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c 1992 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->num_tc = params->num_tc; c 1993 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->xdp = !!params->xdp_prog; c 1994 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->stats = &priv->channel_stats[ix].ch; c 1995 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->irq_desc = irq_to_desc(irq); c 1996 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); c 1998 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_xps_cpumask(c, params); c 2002 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); c 2004 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_queues(c, params, cparam); c 2010 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_xsk(priv, params, &xsk, umem, c); c 2015 drivers/net/ethernet/mellanox/mlx5/core/en_main.c *cp = c; c 2020 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_queues(c); c 2023 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netif_napi_del(&c->napi); c 2024 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_xps_cpumask(c); c 2027 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kvfree(c); c 2032 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_activate_channel(struct mlx5e_channel *c) c 2036 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) c 2037 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_activate_txqsq(&c->sq[tc]); c 2038 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_activate_icosq(&c->icosq); c 2039 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_activate_rq(&c->rq); c 2040 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix); c 2042 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) c 2043 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_activate_xsk(c); c 2046 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_deactivate_channel(struct mlx5e_channel *c) c 2050 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) c 2051 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_deactivate_xsk(c); c 2053 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_deactivate_rq(&c->rq); c 2054 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_deactivate_icosq(&c->icosq); c 2055 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (tc = 0; tc < c->num_tc; tc++) c 2056 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_deactivate_txqsq(&c->sq[tc]); c 2059 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_close_channel(struct mlx5e_channel *c) c 2061 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) c 2062 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_xsk(c); c 2063 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_queues(c); c 2064 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netif_napi_del(&c->napi); c 2065 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_xps_cpumask(c); c 2067 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kvfree(c); c 2355 drivers/net/ethernet/mellanox/mlx5/core/en_main.c chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); c 2357 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (!chs->c || !cparam) c 2367 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]); c 2378 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_channel(chs->c[i]); c 2381 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kfree(chs->c); c 2392 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_activate_channel(chs->c[i]); c 2405 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout); c 2420 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_deactivate_channel(chs->c[i]); c 2428 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_channel(chs->c[i]); c 2430 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kfree(chs->c); c 2544 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rqn = rrp.rss.channels->c[ix]->rq.rqn; c 2586 drivers/net/ethernet/mellanox/mlx5/core/en_main.c return rrp.rss.channels->c[ix]->rq.rqn; c 2933 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = priv->channels.c[i]; c 2934 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq = &c->sq[tc]; c 3438 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); c 3452 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd); c 4476 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = priv->channels.c[i]; c 4477 drivers/net/ethernet/mellanox/mlx5/core/en_main.c bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); c 4479 drivers/net/ethernet/mellanox/mlx5/core/en_main.c clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); c 4481 drivers/net/ethernet/mellanox/mlx5/core/en_main.c clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); c 4482 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_synchronize(&c->napi); c 4485 drivers/net/ethernet/mellanox/mlx5/core/en_main.c old_prog = xchg(&c->rq.xdp_prog, prog); c 4490 drivers/net/ethernet/mellanox/mlx5/core/en_main.c old_prog = xchg(&c->xskrq.xdp_prog, prog); c 4495 drivers/net/ethernet/mellanox/mlx5/core/en_main.c set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); c 4497 drivers/net/ethernet/mellanox/mlx5/core/en_main.c set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); c 4499 drivers/net/ethernet/mellanox/mlx5/core/en_main.c napi_schedule(&c->napi); c 470 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c struct mlx5e_channel *c; c 480 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c c = priv->channels.c[n]; c 481 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c for (tc = 0; tc < c->num_tc; tc++) c 482 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c sqs[num_sqs++] = c->sq[tc].sqn; c 376 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) c 447 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) c 532 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_802_3_OFF(c) \ c 534 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) c 604 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_2863_OFF(c) \ c 606 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.eth_2863_cntrs_grp_data_layout.c##_high) c 655 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_2819_OFF(c) \ c 657 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.eth_2819_cntrs_grp_data_layout.c##_high) c 719 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_PHY_STATISTICAL_OFF(c) \ c 721 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.phys_layer_statistical_cntrs.c##_high) c 827 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_ETH_EXT_OFF(c) \ c 829 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.eth_extended_cntrs_grp_data_layout.c##_high) c 886 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PCIE_PERF_OFF(c) \ c 887 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) c 893 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PCIE_PERF_OFF64(c) \ c 894 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) c 989 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_PER_TC_PRIO_OFF(c) \ c 991 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.eth_per_tc_prio_grp_data_layout.c##_high) c 999 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \ c 1001 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high) c 1130 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define PPORT_PER_PRIO_OFF(c) \ c 1132 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c counter_set.eth_per_prio_grp_data_layout.c##_high) c 176 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ c 177 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h vstats->query_vport_out, c) c 183 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PPORT_802_3_GET(pstats, c) \ c 185 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) c 186 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PPORT_2863_GET(pstats, c) \ c 188 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.eth_2863_cntrs_grp_data_layout.c##_high) c 189 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PPORT_2819_GET(pstats, c) \ c 191 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.eth_2819_cntrs_grp_data_layout.c##_high) c 192 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PPORT_PHY_STATISTICAL_GET(pstats, c) \ c 194 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.phys_layer_statistical_cntrs.c##_high) c 195 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PPORT_PER_PRIO_GET(pstats, prio, c) \ c 197 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.eth_per_prio_grp_data_layout.c##_high) c 199 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PPORT_ETH_EXT_GET(pstats, c) \ c 201 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.eth_extended_cntrs_grp_data_layout.c##_high) c 215 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PCIE_PERF_GET(pcie_stats, c) \ c 217 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.pcie_perf_cntrs_grp_data_layout.c) c 219 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h #define PCIE_PERF_GET64(pcie_stats, c) \ c 221 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) c 39 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) c 45 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c idata = irq_desc_get_irq_data(c->irq_desc); c 111 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, c 113 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_ch_stats *ch_stats = c->stats; c 114 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_xdpsq *xsksq = &c->xsksq; c 115 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_rq *xskrq = &c->xskrq; c 116 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_rq *rq = &c->rq; c 117 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); c 126 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c for (i = 0; i < c->num_tc; i++) c 127 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); c 129 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); c 131 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c if (c->xdp) c 132 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); c 144 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_poll_ico_cq(&c->icosq.cq); c 148 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c if (mlx5e_poll_ico_cq(&c->xskicosq.cq)) c 152 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state); c 160 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c if (likely(mlx5e_channel_no_affinity_change(c))) c 173 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c for (i = 0; i < c->num_tc; i++) { c 174 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_handle_tx_dim(&c->sq[i]); c 175 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->sq[i].cq); c 181 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->icosq.cq); c 182 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->xdpsq.cq); c 186 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->xskicosq.cq); c 192 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_trigger_irq(&c->icosq); c 211 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_channel *c = cq->channel; c 212 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct net_device *netdev = c->netdev; c 436 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c struct mlx5_fc_cache c; c 438 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c c = counter->cache; c 440 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c *bytes = c.bytes - counter->lastbytes; c 441 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c *packets = c.packets - counter->lastpackets; c 442 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c *lastuse = c.lastuse; c 444 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c counter->lastbytes = c.bytes; c 445 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c counter->lastpackets = c.packets; c 74 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); c 104 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c static u16 mlxsw_sp_acl_bf_crc_byte(u16 crc, u8 c) c 106 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c return (crc << 8) ^ mlxsw_sp_acl_bf_crc_tab[(crc >> 8) ^ c]; c 768 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c const struct mr_mfc *c) c 770 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c struct mfc_cache *mfc = (struct mfc_cache *) c; c 786 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c struct mr_mfc *c) c 788 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c const struct mfc_cache *mfc = (struct mfc_cache *) c; c 815 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c const struct mr_mfc *c) c 817 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c struct mfc6_cache *mfc = (struct mfc6_cache *) c; c 833 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c struct mr_mfc *c) c 835 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c const struct mfc6_cache *mfc = (struct mfc6_cache *) c; c 89 drivers/net/ethernet/microchip/lan743x_ptp.c int c, i; c 92 drivers/net/ethernet/microchip/lan743x_ptp.c c = ptp->tx_ts_skb_queue_size; c 94 drivers/net/ethernet/microchip/lan743x_ptp.c if (c > ptp->tx_ts_queue_size) c 95 drivers/net/ethernet/microchip/lan743x_ptp.c c = ptp->tx_ts_queue_size; c 96 drivers/net/ethernet/microchip/lan743x_ptp.c if (c <= 0) c 99 drivers/net/ethernet/microchip/lan743x_ptp.c for (i = 0; i < c; i++) { c 123 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->tx_ts_ignore_sync_queue >>= c; c 124 drivers/net/ethernet/microchip/lan743x_ptp.c for (i = c; i < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS; i++) { c 125 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->tx_ts_skb_queue[i - c] = ptp->tx_ts_skb_queue[i]; c 126 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->tx_ts_seconds_queue[i - c] = ptp->tx_ts_seconds_queue[i]; c 127 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->tx_ts_nseconds_queue[i - c] = ptp->tx_ts_nseconds_queue[i]; c 128 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->tx_ts_header_queue[i - c] = ptp->tx_ts_header_queue[i]; c 135 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->tx_ts_skb_queue_size -= c; c 136 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->tx_ts_queue_size -= c; c 138 drivers/net/ethernet/microchip/lan743x_ptp.c ptp->pending_tx_timestamps -= c; c 219 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c struct ethtool_link_ksettings *c) c 223 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE); c 225 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE); c 231 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER); c 232 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER); c 236 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS); c 237 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS); c 21 drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h u8 c = total_len & 0xff; c 23 drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h crc = crc32_be(crc, &c, 1); c 41 drivers/net/ethernet/qlogic/netxen/netxen_nic.h #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) c 46 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h #define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) c 2378 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c #define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1)) c 788 drivers/net/ethernet/realtek/r8169_main.c static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, c 795 drivers/net/ethernet/realtek/r8169_main.c if (c->check(tp) == high) c 800 drivers/net/ethernet/realtek/r8169_main.c c->msg, !high, n, d); c 805 drivers/net/ethernet/realtek/r8169_main.c const struct rtl_cond *c, c 808 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); c 812 drivers/net/ethernet/realtek/r8169_main.c const struct rtl_cond *c, c 815 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); c 819 drivers/net/ethernet/realtek/r8169_main.c const struct rtl_cond *c, c 822 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, msleep, d, n, true); c 826 drivers/net/ethernet/realtek/r8169_main.c const struct rtl_cond *c, c 829 drivers/net/ethernet/realtek/r8169_main.c return rtl_loop_wait(tp, c, msleep, d, n, false); c 538 drivers/net/ethernet/sfc/bitfield.h #define EFX_OWORD32(a, b, c, d) \ c 540 drivers/net/ethernet/sfc/bitfield.h cpu_to_le32(c), cpu_to_le32(d) } } c 535 drivers/net/ethernet/sfc/falcon/bitfield.h #define EF4_OWORD32(a, b, c, d) \ c 537 drivers/net/ethernet/sfc/falcon/bitfield.h cpu_to_le32(c), cpu_to_le32(d) } } c 350 drivers/net/ethernet/smsc/smc91c92_cs.c int i, j, da, c; c 357 drivers/net/ethernet/smsc/smc91c92_cs.c c = *s++; c 359 drivers/net/ethernet/smsc/smc91c92_cs.c da += ((c >= '0') && (c <= '9')) ? c 360 drivers/net/ethernet/smsc/smc91c92_cs.c (c - '0') : ((c & 0x0f) + 9); c 29 drivers/net/ethernet/ti/cpts.c #define cpts_read32(c, r) readl_relaxed(&c->reg->r) c 30 drivers/net/ethernet/ti/cpts.c #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) c 734 drivers/net/ethernet/toshiba/ps3_gelic_net.c static unsigned int c; c 738 drivers/net/ethernet/toshiba/ps3_gelic_net.c pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c); c 31 drivers/net/fddi/skfp/h/types.h #define outp(p,c) iowrite8(c,p) c 558 drivers/net/fddi/skfp/pmf.c char c ; c 895 drivers/net/fddi/skfp/pmf.c while ((c = *swap++)) { c 896 drivers/net/fddi/skfp/pmf.c switch(c) { c 910 drivers/net/fddi/skfp/pmf.c if (c == 'r') { c 1081 drivers/net/fddi/skfp/pmf.c char c ; c 1168 drivers/net/fddi/skfp/pmf.c while (swap && (c = *swap++)) { c 1169 drivers/net/fddi/skfp/pmf.c switch(c) { c 1189 drivers/net/fddi/skfp/pmf.c if (c == 'r') { c 1560 drivers/net/fddi/skfp/pmf.c char *c ; c 1612 drivers/net/fddi/skfp/pmf.c c = (char *)(pa+1) ; c 1613 drivers/net/fddi/skfp/pmf.c dump_hex(c,16) ; c 1616 drivers/net/fddi/skfp/pmf.c c += 16 ; c 1626 drivers/net/fddi/skfp/pmf.c dump_hex(c,nn) ; c 1630 drivers/net/fddi/skfp/pmf.c c += 16 ; c 25 drivers/net/fddi/skfp/queue.c #define PRINTF(a,b,c) c 163 drivers/net/hamradio/mkiss.c unsigned char c; c 173 drivers/net/hamradio/mkiss.c switch (c = *s++) { c 183 drivers/net/hamradio/mkiss.c *ptr++ = c; c 202 drivers/net/hamradio/mkiss.c unsigned char c=0; c 207 drivers/net/hamradio/mkiss.c c = *s++; c 209 drivers/net/hamradio/mkiss.c c = crc >> 8; c 211 drivers/net/hamradio/mkiss.c c = crc & 0xff; c 215 drivers/net/hamradio/mkiss.c switch (c) { c 225 drivers/net/hamradio/mkiss.c *ptr++ = c; c 559 drivers/net/hamradio/yam.c unsigned char c = yp->rx_crcl; c 560 drivers/net/hamradio/yam.c yp->rx_crcl = (chktabl[c] ^ yp->rx_crch); c 561 drivers/net/hamradio/yam.c yp->rx_crch = (chktabh[c] ^ rxb); c 479 drivers/net/ieee802154/at86rf230.c struct at86rf2xx_chip_data *c = lp->data; c 505 drivers/net/ieee802154/at86rf230.c tim = c->t_off_to_aack * NSEC_PER_USEC; c 514 drivers/net/ieee802154/at86rf230.c tim = c->t_off_to_tx_on * NSEC_PER_USEC; c 534 drivers/net/ieee802154/at86rf230.c tim = (c->t_frame + c->t_p_ack) * NSEC_PER_USEC; c 546 drivers/net/ieee802154/at86rf230.c tim = c->t_reset_to_off * NSEC_PER_USEC; c 143 drivers/net/phy/icplus.c int c; c 145 drivers/net/phy/icplus.c c = ip1xx_reset(phydev); c 146 drivers/net/phy/icplus.c if (c < 0) c 147 drivers/net/phy/icplus.c return c; c 150 drivers/net/phy/icplus.c c = phy_read(phydev, IP1001_SPEC_CTRL_STATUS_2); c 151 drivers/net/phy/icplus.c if (c < 0) c 152 drivers/net/phy/icplus.c return c; c 153 drivers/net/phy/icplus.c c |= IP1001_APS_ON; c 154 drivers/net/phy/icplus.c c = phy_write(phydev, IP1001_SPEC_CTRL_STATUS_2, c); c 155 drivers/net/phy/icplus.c if (c < 0) c 156 drivers/net/phy/icplus.c return c; c 160 drivers/net/phy/icplus.c c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); c 161 drivers/net/phy/icplus.c if (c < 0) c 162 drivers/net/phy/icplus.c return c; c 164 drivers/net/phy/icplus.c c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL); c 167 drivers/net/phy/icplus.c c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL); c 169 drivers/net/phy/icplus.c c |= IP1001_RXPHASE_SEL; c 171 drivers/net/phy/icplus.c c |= IP1001_TXPHASE_SEL; c 173 drivers/net/phy/icplus.c c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c); c 174 drivers/net/phy/icplus.c if (c < 0) c 175 drivers/net/phy/icplus.c return c; c 234 drivers/net/phy/icplus.c int err, c; c 236 drivers/net/phy/icplus.c c = ip1xx_reset(phydev); c 237 drivers/net/phy/icplus.c if (c < 0) c 238 drivers/net/phy/icplus.c return c; c 269 drivers/net/phy/icplus.c c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); c 270 drivers/net/phy/icplus.c c |= IP101A_G_APS_ON; c 272 drivers/net/phy/icplus.c return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c); c 575 drivers/net/ppp/bsd_comp.c unsigned char c; c 656 drivers/net/ppp/bsd_comp.c c = *rptr++; c 657 drivers/net/ppp/bsd_comp.c fcode = BSD_KEY (ent, c); c 658 drivers/net/ppp/bsd_comp.c hval = BSD_HASH (ent, c, hshift); c 736 drivers/net/ppp/bsd_comp.c ent = c; c 520 drivers/net/ppp/ppp_async.c #define PUT_BYTE(ap, buf, c, islcp) do { \ c 521 drivers/net/ppp/ppp_async.c if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ c 523 drivers/net/ppp/ppp_async.c *buf++ = c ^ PPP_TRANS; \ c 525 drivers/net/ppp/ppp_async.c *buf++ = c; \ c 531 drivers/net/ppp/ppp_async.c int fcs, i, count, c, proto; c 584 drivers/net/ppp/ppp_async.c c = data[i++]; c 585 drivers/net/ppp/ppp_async.c if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) c 587 drivers/net/ppp/ppp_async.c fcs = PPP_FCS(fcs, c); c 588 drivers/net/ppp/ppp_async.c PUT_BYTE(ap, buf, c, islcp); c 605 drivers/net/ppp/ppp_async.c c = fcs & 0xff; c 606 drivers/net/ppp/ppp_async.c PUT_BYTE(ap, buf, c, islcp); c 607 drivers/net/ppp/ppp_async.c c = (fcs >> 8) & 0xff; c 608 drivers/net/ppp/ppp_async.c PUT_BYTE(ap, buf, c, islcp); c 752 drivers/net/ppp/ppp_async.c int i, c; c 755 drivers/net/ppp/ppp_async.c c = buf[i]; c 756 drivers/net/ppp/ppp_async.c if (c == PPP_ESCAPE || c == PPP_FLAG || c 757 drivers/net/ppp/ppp_async.c (c < 0x20 && (ap->raccm & (1 << c)) != 0)) c 835 drivers/net/ppp/ppp_async.c int c, i, j, n, s, f; c 842 drivers/net/ppp/ppp_async.c c = buf[i]; c 845 drivers/net/ppp/ppp_async.c s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; c 846 drivers/net/ppp/ppp_async.c c = ((c >> 4) ^ c) & 0xf; c 847 drivers/net/ppp/ppp_async.c s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; c 904 drivers/net/ppp/ppp_async.c c = buf[n]; c 907 drivers/net/ppp/ppp_async.c } else if (c == PPP_FLAG) { c 909 drivers/net/ppp/ppp_async.c } else if (c == PPP_ESCAPE) { c 912 drivers/net/ppp/ppp_async.c if (c == START_CHAR(ap->tty)) c 914 drivers/net/ppp/ppp_async.c else if (c == STOP_CHAR(ap->tty)) c 1279 drivers/net/ppp/ppp_generic.c ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); c 103 drivers/net/slip/slip.c static void slip_unesc(struct slip *sl, unsigned char c); c 106 drivers/net/slip/slip.c static void slip_unesc6(struct slip *sl, unsigned char c); c 328 drivers/net/slip/slip.c unsigned char c = sl->rbuff[0]; c 329 drivers/net/slip/slip.c if (c & SL_TYPE_COMPRESSED_TCP) { c 344 drivers/net/slip/slip.c } else if (c >= SL_TYPE_UNCOMPRESSED_TCP) { c 924 drivers/net/slip/slip.c unsigned char c; c 940 drivers/net/slip/slip.c switch (c = *s++) { c 950 drivers/net/slip/slip.c *ptr++ = c; c 1007 drivers/net/slip/slip.c unsigned char c; c 1029 drivers/net/slip/slip.c c = 0x30 + ((v >> bits) & 0x3F); c 1030 drivers/net/slip/slip.c *ptr++ = c; c 1034 drivers/net/slip/slip.c c = 0x30 + ((v << (6 - bits)) & 0x3F); c 1035 drivers/net/slip/slip.c *ptr++ = c; c 1043 drivers/net/slip/slip.c unsigned char c; c 1063 drivers/net/slip/slip.c c = (unsigned char)(sl->xdata >> sl->xbits); c 1066 drivers/net/slip/slip.c sl->rbuff[sl->rcount++] = c; c 195 drivers/net/team/team_mode_loadbalance.c unsigned char *c; c 201 drivers/net/team/team_mode_loadbalance.c c = (char *) &lhash; c 202 drivers/net/team/team_mode_loadbalance.c return c[0] ^ c[1] ^ c[2] ^ c[3]; c 227 drivers/net/usb/cdc_mbim.c u8 *c; c 274 drivers/net/usb/cdc_mbim.c c = (u8 *)&sign; c 275 drivers/net/usb/cdc_mbim.c c[3] = tci; c 281 drivers/net/usb/cdc_mbim.c c = (u8 *)&sign; c 282 drivers/net/usb/cdc_mbim.c c[3] = tci; c 423 drivers/net/usb/cdc_mbim.c u8 *c; c 439 drivers/net/usb/cdc_mbim.c c = (u8 *)&ndp16->dwSignature; c 440 drivers/net/usb/cdc_mbim.c tci = c[3]; c 446 drivers/net/usb/cdc_mbim.c c = (u8 *)&ndp16->dwSignature; c 447 drivers/net/usb/cdc_mbim.c tci = c[3] + 256; c 1403 drivers/net/wan/cosa.c char c; c 1405 drivers/net/wan/cosa.c if (get_user(c, microcode)) c 1408 drivers/net/wan/cosa.c c = *microcode; c 1410 drivers/net/wan/cosa.c if (put_wait_data(cosa, c) == -1) c 1475 drivers/net/wan/cosa.c char c; c 1481 drivers/net/wan/cosa.c c=i; c 1483 drivers/net/wan/cosa.c if (put_user(c, microcode)) c 1486 drivers/net/wan/cosa.c *microcode = c; c 61 drivers/net/wan/hostess_sv11.c static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) c 65 drivers/net/wan/hostess_sv11.c skb->protocol = hdlc_type_trans(skb, c->netdevice); c 67 drivers/net/wan/hostess_sv11.c skb->dev = c->netdevice; c 1185 drivers/net/wan/ixp4xx_hss.c static u32 check_clock(u32 rate, u32 a, u32 b, u32 c, c 1192 drivers/net/wan/ixp4xx_hss.c new_rate = ixp4xx_timer_freq * (u64)(c + 1); c 1193 drivers/net/wan/ixp4xx_hss.c do_div(new_rate, a * (c + 1) + b + 1); c 1199 drivers/net/wan/ixp4xx_hss.c *reg = (a << 22) | (b << 12) | c; c 1225 drivers/net/wan/ixp4xx_hss.c u64 c = (b + 1) * (u64)rate; c 1226 drivers/net/wan/ixp4xx_hss.c do_div(c, ixp4xx_timer_freq - rate * a); c 1227 drivers/net/wan/ixp4xx_hss.c c--; c 1228 drivers/net/wan/ixp4xx_hss.c if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */ c 1235 drivers/net/wan/ixp4xx_hss.c if (!check_clock(rate, a, b, c, best, &diff, reg)) c 1237 drivers/net/wan/ixp4xx_hss.c if (!check_clock(rate, a, b, c + 1, best, &diff, reg)) c 810 drivers/net/wan/lmc/lmc_media.c write_av9110_bit (lmc_softc_t * sc, int c) c 816 drivers/net/wan/lmc/lmc_media.c if (c & 0x01) c 135 drivers/net/wan/sbni.h #define CRC32(c,crc) (crc32tab[((size_t)(crc) ^ (c)) & 0xff] ^ (((crc) >> 8) & 0x00FFFFFF)) c 61 drivers/net/wan/sealevel.c static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) c 65 drivers/net/wan/sealevel.c skb->protocol = hdlc_type_trans(skb, c->netdevice); c 67 drivers/net/wan/sealevel.c skb->dev = c->netdevice; c 48 drivers/net/wan/x25_asy.c static void x25_asy_unesc(struct x25_asy *sl, unsigned char c); c 620 drivers/net/wan/x25_asy.c unsigned char c; c 636 drivers/net/wan/x25_asy.c switch (c = *s++) { c 646 drivers/net/wan/x25_asy.c *ptr++ = c; c 108 drivers/net/wan/z85230.c static void z8530_rx_done(struct z8530_channel *c); c 109 drivers/net/wan/z85230.c static void z8530_tx_done(struct z8530_channel *c); c 123 drivers/net/wan/z85230.c static inline u8 read_zsreg(struct z8530_channel *c, u8 reg) c 126 drivers/net/wan/z85230.c z8530_write_port(c->ctrlio, reg); c 127 drivers/net/wan/z85230.c return z8530_read_port(c->ctrlio); c 138 drivers/net/wan/z85230.c static inline u8 read_zsdata(struct z8530_channel *c) c 141 drivers/net/wan/z85230.c r=z8530_read_port(c->dataio); c 157 drivers/net/wan/z85230.c static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val) c 160 drivers/net/wan/z85230.c z8530_write_port(c->ctrlio, reg); c 161 drivers/net/wan/z85230.c z8530_write_port(c->ctrlio, val); c 173 drivers/net/wan/z85230.c static inline void write_zsctrl(struct z8530_channel *c, u8 val) c 175 drivers/net/wan/z85230.c z8530_write_port(c->ctrlio, val); c 187 drivers/net/wan/z85230.c static inline void write_zsdata(struct z8530_channel *c, u8 val) c 189 drivers/net/wan/z85230.c z8530_write_port(c->dataio, val); c 273 drivers/net/wan/z85230.c static void z8530_flush_fifo(struct z8530_channel *c) c 275 drivers/net/wan/z85230.c read_zsreg(c, R1); c 276 drivers/net/wan/z85230.c read_zsreg(c, R1); c 277 drivers/net/wan/z85230.c read_zsreg(c, R1); c 278 drivers/net/wan/z85230.c read_zsreg(c, R1); c 279 drivers/net/wan/z85230.c if(c->dev->type==Z85230) c 281 drivers/net/wan/z85230.c read_zsreg(c, R1); c 282 drivers/net/wan/z85230.c read_zsreg(c, R1); c 283 drivers/net/wan/z85230.c read_zsreg(c, R1); c 284 drivers/net/wan/z85230.c read_zsreg(c, R1); c 299 drivers/net/wan/z85230.c static void z8530_rtsdtr(struct z8530_channel *c, int set) c 302 drivers/net/wan/z85230.c c->regs[5] |= (RTS | DTR); c 304 drivers/net/wan/z85230.c c->regs[5] &= ~(RTS | DTR); c 305 drivers/net/wan/z85230.c write_zsreg(c, R5, c->regs[5]); c 332 drivers/net/wan/z85230.c static void z8530_rx(struct z8530_channel *c) c 339 drivers/net/wan/z85230.c if(!(read_zsreg(c, R0)&1)) c 341 drivers/net/wan/z85230.c ch=read_zsdata(c); c 342 drivers/net/wan/z85230.c stat=read_zsreg(c, R1); c 347 drivers/net/wan/z85230.c if(c->count < c->max) c 349 drivers/net/wan/z85230.c *c->dptr++=ch; c 350 drivers/net/wan/z85230.c c->count++; c 362 drivers/net/wan/z85230.c if(c->skb) c 363 drivers/net/wan/z85230.c c->dptr=c->skb->data; c 364 drivers/net/wan/z85230.c c->count=0; c 367 drivers/net/wan/z85230.c pr_warn("%s: overrun\n", c->dev->name); c 368 drivers/net/wan/z85230.c c->rx_overrun++; c 372 drivers/net/wan/z85230.c c->rx_crc_err++; c 383 drivers/net/wan/z85230.c z8530_rx_done(c); c 384 drivers/net/wan/z85230.c write_zsctrl(c, RES_Rx_CRC); c 391 drivers/net/wan/z85230.c write_zsctrl(c, ERR_RES); c 392 drivers/net/wan/z85230.c write_zsctrl(c, RES_H_IUS); c 406 drivers/net/wan/z85230.c static void z8530_tx(struct z8530_channel *c) c 408 drivers/net/wan/z85230.c while(c->txcount) { c 410 drivers/net/wan/z85230.c if(!(read_zsreg(c, R0)&4)) c 412 drivers/net/wan/z85230.c c->txcount--; c 416 drivers/net/wan/z85230.c write_zsreg(c, R8, *c->tx_ptr++); c 417 drivers/net/wan/z85230.c write_zsctrl(c, RES_H_IUS); c 419 drivers/net/wan/z85230.c if(c->txcount==0) c 421 drivers/net/wan/z85230.c write_zsctrl(c, RES_EOM_L); c 422 drivers/net/wan/z85230.c write_zsreg(c, R10, c->regs[10]&~ABUNDER); c 431 drivers/net/wan/z85230.c write_zsctrl(c, RES_Tx_P); c 433 drivers/net/wan/z85230.c z8530_tx_done(c); c 434 drivers/net/wan/z85230.c write_zsctrl(c, RES_H_IUS); c 625 drivers/net/wan/z85230.c static void z8530_rx_clear(struct z8530_channel *c) c 632 drivers/net/wan/z85230.c read_zsdata(c); c 633 drivers/net/wan/z85230.c stat=read_zsreg(c, R1); c 636 drivers/net/wan/z85230.c write_zsctrl(c, RES_Rx_CRC); c 640 drivers/net/wan/z85230.c write_zsctrl(c, ERR_RES); c 641 drivers/net/wan/z85230.c write_zsctrl(c, RES_H_IUS); c 653 drivers/net/wan/z85230.c static void z8530_tx_clear(struct z8530_channel *c) c 655 drivers/net/wan/z85230.c write_zsctrl(c, RES_Tx_P); c 656 drivers/net/wan/z85230.c write_zsctrl(c, RES_H_IUS); c 785 drivers/net/wan/z85230.c int z8530_sync_open(struct net_device *dev, struct z8530_channel *c) c 789 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, flags); c 791 drivers/net/wan/z85230.c c->sync = 1; c 792 drivers/net/wan/z85230.c c->mtu = dev->mtu+64; c 793 drivers/net/wan/z85230.c c->count = 0; c 794 drivers/net/wan/z85230.c c->skb = NULL; c 795 drivers/net/wan/z85230.c c->skb2 = NULL; c 796 drivers/net/wan/z85230.c c->irqs = &z8530_sync; c 799 drivers/net/wan/z85230.c z8530_rx_done(c); /* Load the frame ring */ c 800 drivers/net/wan/z85230.c z8530_rx_done(c); /* Load the backup frame */ c 801 drivers/net/wan/z85230.c z8530_rtsdtr(c,1); c 802 drivers/net/wan/z85230.c c->dma_tx = 0; c 803 drivers/net/wan/z85230.c c->regs[R1]|=TxINT_ENAB; c 804 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 805 drivers/net/wan/z85230.c write_zsreg(c, R3, c->regs[R3]|RxENABLE); c 807 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, flags); c 823 drivers/net/wan/z85230.c int z8530_sync_close(struct net_device *dev, struct z8530_channel *c) c 828 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, flags); c 829 drivers/net/wan/z85230.c c->irqs = &z8530_nop; c 830 drivers/net/wan/z85230.c c->max = 0; c 831 drivers/net/wan/z85230.c c->sync = 0; c 833 drivers/net/wan/z85230.c chk=read_zsreg(c,R0); c 834 drivers/net/wan/z85230.c write_zsreg(c, R3, c->regs[R3]); c 835 drivers/net/wan/z85230.c z8530_rtsdtr(c,0); c 837 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, flags); c 853 drivers/net/wan/z85230.c int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c) c 857 drivers/net/wan/z85230.c c->sync = 1; c 858 drivers/net/wan/z85230.c c->mtu = dev->mtu+64; c 859 drivers/net/wan/z85230.c c->count = 0; c 860 drivers/net/wan/z85230.c c->skb = NULL; c 861 drivers/net/wan/z85230.c c->skb2 = NULL; c 865 drivers/net/wan/z85230.c c->rxdma_on = 0; c 866 drivers/net/wan/z85230.c c->txdma_on = 0; c 874 drivers/net/wan/z85230.c if(c->mtu > PAGE_SIZE/2) c 877 drivers/net/wan/z85230.c c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); c 878 drivers/net/wan/z85230.c if(c->rx_buf[0]==NULL) c 880 drivers/net/wan/z85230.c c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2; c 882 drivers/net/wan/z85230.c c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); c 883 drivers/net/wan/z85230.c if(c->tx_dma_buf[0]==NULL) c 885 drivers/net/wan/z85230.c free_page((unsigned long)c->rx_buf[0]); c 886 drivers/net/wan/z85230.c c->rx_buf[0]=NULL; c 889 drivers/net/wan/z85230.c c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2; c 891 drivers/net/wan/z85230.c c->tx_dma_used=0; c 892 drivers/net/wan/z85230.c c->dma_tx = 1; c 893 drivers/net/wan/z85230.c c->dma_num=0; c 894 drivers/net/wan/z85230.c c->dma_ready=1; c 900 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, cflags); c 906 drivers/net/wan/z85230.c c->regs[R14]|= DTRREQ; c 907 drivers/net/wan/z85230.c write_zsreg(c, R14, c->regs[R14]); c 909 drivers/net/wan/z85230.c c->regs[R1]&= ~TxINT_ENAB; c 910 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 916 drivers/net/wan/z85230.c c->regs[R1]|= WT_FN_RDYFN; c 917 drivers/net/wan/z85230.c c->regs[R1]|= WT_RDY_RT; c 918 drivers/net/wan/z85230.c c->regs[R1]|= INT_ERR_Rx; c 919 drivers/net/wan/z85230.c c->regs[R1]&= ~TxINT_ENAB; c 920 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 921 drivers/net/wan/z85230.c c->regs[R1]|= WT_RDY_ENAB; c 922 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 934 drivers/net/wan/z85230.c disable_dma(c->rxdma); c 935 drivers/net/wan/z85230.c clear_dma_ff(c->rxdma); c 936 drivers/net/wan/z85230.c set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); c 937 drivers/net/wan/z85230.c set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0])); c 938 drivers/net/wan/z85230.c set_dma_count(c->rxdma, c->mtu); c 939 drivers/net/wan/z85230.c enable_dma(c->rxdma); c 941 drivers/net/wan/z85230.c disable_dma(c->txdma); c 942 drivers/net/wan/z85230.c clear_dma_ff(c->txdma); c 943 drivers/net/wan/z85230.c set_dma_mode(c->txdma, DMA_MODE_WRITE); c 944 drivers/net/wan/z85230.c disable_dma(c->txdma); c 952 drivers/net/wan/z85230.c c->rxdma_on = 1; c 953 drivers/net/wan/z85230.c c->txdma_on = 1; c 954 drivers/net/wan/z85230.c c->tx_dma_used = 1; c 956 drivers/net/wan/z85230.c c->irqs = &z8530_dma_sync; c 957 drivers/net/wan/z85230.c z8530_rtsdtr(c,1); c 958 drivers/net/wan/z85230.c write_zsreg(c, R3, c->regs[R3]|RxENABLE); c 960 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, cflags); c 976 drivers/net/wan/z85230.c int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c) c 981 drivers/net/wan/z85230.c c->irqs = &z8530_nop; c 982 drivers/net/wan/z85230.c c->max = 0; c 983 drivers/net/wan/z85230.c c->sync = 0; c 990 drivers/net/wan/z85230.c disable_dma(c->rxdma); c 991 drivers/net/wan/z85230.c clear_dma_ff(c->rxdma); c 993 drivers/net/wan/z85230.c c->rxdma_on = 0; c 995 drivers/net/wan/z85230.c disable_dma(c->txdma); c 996 drivers/net/wan/z85230.c clear_dma_ff(c->txdma); c 999 drivers/net/wan/z85230.c c->txdma_on = 0; c 1000 drivers/net/wan/z85230.c c->tx_dma_used = 0; c 1002 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, flags); c 1008 drivers/net/wan/z85230.c c->regs[R1]&= ~WT_RDY_ENAB; c 1009 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 1010 drivers/net/wan/z85230.c c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); c 1011 drivers/net/wan/z85230.c c->regs[R1]|= INT_ALL_Rx; c 1012 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 1013 drivers/net/wan/z85230.c c->regs[R14]&= ~DTRREQ; c 1014 drivers/net/wan/z85230.c write_zsreg(c, R14, c->regs[R14]); c 1016 drivers/net/wan/z85230.c if(c->rx_buf[0]) c 1018 drivers/net/wan/z85230.c free_page((unsigned long)c->rx_buf[0]); c 1019 drivers/net/wan/z85230.c c->rx_buf[0]=NULL; c 1021 drivers/net/wan/z85230.c if(c->tx_dma_buf[0]) c 1023 drivers/net/wan/z85230.c free_page((unsigned long)c->tx_dma_buf[0]); c 1024 drivers/net/wan/z85230.c c->tx_dma_buf[0]=NULL; c 1026 drivers/net/wan/z85230.c chk=read_zsreg(c,R0); c 1027 drivers/net/wan/z85230.c write_zsreg(c, R3, c->regs[R3]); c 1028 drivers/net/wan/z85230.c z8530_rtsdtr(c,0); c 1030 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, flags); c 1047 drivers/net/wan/z85230.c int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c) c 1052 drivers/net/wan/z85230.c c->sync = 1; c 1053 drivers/net/wan/z85230.c c->mtu = dev->mtu+64; c 1054 drivers/net/wan/z85230.c c->count = 0; c 1055 drivers/net/wan/z85230.c c->skb = NULL; c 1056 drivers/net/wan/z85230.c c->skb2 = NULL; c 1064 drivers/net/wan/z85230.c if(c->mtu > PAGE_SIZE/2) c 1067 drivers/net/wan/z85230.c c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); c 1068 drivers/net/wan/z85230.c if(c->tx_dma_buf[0]==NULL) c 1071 drivers/net/wan/z85230.c c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2; c 1074 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, cflags); c 1080 drivers/net/wan/z85230.c z8530_rx_done(c); c 1081 drivers/net/wan/z85230.c z8530_rx_done(c); c 1087 drivers/net/wan/z85230.c c->rxdma_on = 0; c 1088 drivers/net/wan/z85230.c c->txdma_on = 0; c 1090 drivers/net/wan/z85230.c c->tx_dma_used=0; c 1091 drivers/net/wan/z85230.c c->dma_num=0; c 1092 drivers/net/wan/z85230.c c->dma_ready=1; c 1093 drivers/net/wan/z85230.c c->dma_tx = 1; c 1102 drivers/net/wan/z85230.c c->regs[R14]|= DTRREQ; c 1103 drivers/net/wan/z85230.c write_zsreg(c, R14, c->regs[R14]); c 1105 drivers/net/wan/z85230.c c->regs[R1]&= ~TxINT_ENAB; c 1106 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 1114 drivers/net/wan/z85230.c disable_dma(c->txdma); c 1115 drivers/net/wan/z85230.c clear_dma_ff(c->txdma); c 1116 drivers/net/wan/z85230.c set_dma_mode(c->txdma, DMA_MODE_WRITE); c 1117 drivers/net/wan/z85230.c disable_dma(c->txdma); c 1125 drivers/net/wan/z85230.c c->rxdma_on = 0; c 1126 drivers/net/wan/z85230.c c->txdma_on = 1; c 1127 drivers/net/wan/z85230.c c->tx_dma_used = 1; c 1129 drivers/net/wan/z85230.c c->irqs = &z8530_txdma_sync; c 1130 drivers/net/wan/z85230.c z8530_rtsdtr(c,1); c 1131 drivers/net/wan/z85230.c write_zsreg(c, R3, c->regs[R3]|RxENABLE); c 1132 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, cflags); c 1148 drivers/net/wan/z85230.c int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c) c 1154 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, cflags); c 1156 drivers/net/wan/z85230.c c->irqs = &z8530_nop; c 1157 drivers/net/wan/z85230.c c->max = 0; c 1158 drivers/net/wan/z85230.c c->sync = 0; c 1166 drivers/net/wan/z85230.c disable_dma(c->txdma); c 1167 drivers/net/wan/z85230.c clear_dma_ff(c->txdma); c 1168 drivers/net/wan/z85230.c c->txdma_on = 0; c 1169 drivers/net/wan/z85230.c c->tx_dma_used = 0; c 1177 drivers/net/wan/z85230.c c->regs[R1]&= ~WT_RDY_ENAB; c 1178 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 1179 drivers/net/wan/z85230.c c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); c 1180 drivers/net/wan/z85230.c c->regs[R1]|= INT_ALL_Rx; c 1181 drivers/net/wan/z85230.c write_zsreg(c, R1, c->regs[R1]); c 1182 drivers/net/wan/z85230.c c->regs[R14]&= ~DTRREQ; c 1183 drivers/net/wan/z85230.c write_zsreg(c, R14, c->regs[R14]); c 1185 drivers/net/wan/z85230.c if(c->tx_dma_buf[0]) c 1187 drivers/net/wan/z85230.c free_page((unsigned long)c->tx_dma_buf[0]); c 1188 drivers/net/wan/z85230.c c->tx_dma_buf[0]=NULL; c 1190 drivers/net/wan/z85230.c chk=read_zsreg(c,R0); c 1191 drivers/net/wan/z85230.c write_zsreg(c, R3, c->regs[R3]); c 1192 drivers/net/wan/z85230.c z8530_rtsdtr(c,0); c 1194 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, cflags); c 1378 drivers/net/wan/z85230.c int z8530_channel_load(struct z8530_channel *c, u8 *rtable) c 1382 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, flags); c 1388 drivers/net/wan/z85230.c write_zsreg(c, R15, c->regs[15]|1); c 1389 drivers/net/wan/z85230.c write_zsreg(c, reg&0x0F, *rtable); c 1391 drivers/net/wan/z85230.c write_zsreg(c, R15, c->regs[15]&~1); c 1392 drivers/net/wan/z85230.c c->regs[reg]=*rtable++; c 1394 drivers/net/wan/z85230.c c->rx_function=z8530_null_rx; c 1395 drivers/net/wan/z85230.c c->skb=NULL; c 1396 drivers/net/wan/z85230.c c->tx_skb=NULL; c 1397 drivers/net/wan/z85230.c c->tx_next_skb=NULL; c 1398 drivers/net/wan/z85230.c c->mtu=1500; c 1399 drivers/net/wan/z85230.c c->max=0; c 1400 drivers/net/wan/z85230.c c->count=0; c 1401 drivers/net/wan/z85230.c c->status=read_zsreg(c, R0); c 1402 drivers/net/wan/z85230.c c->sync=1; c 1403 drivers/net/wan/z85230.c write_zsreg(c, R3, c->regs[R3]|RxENABLE); c 1405 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, flags); c 1426 drivers/net/wan/z85230.c static void z8530_tx_begin(struct z8530_channel *c) c 1429 drivers/net/wan/z85230.c if(c->tx_skb) c 1432 drivers/net/wan/z85230.c c->tx_skb=c->tx_next_skb; c 1433 drivers/net/wan/z85230.c c->tx_next_skb=NULL; c 1434 drivers/net/wan/z85230.c c->tx_ptr=c->tx_next_ptr; c 1436 drivers/net/wan/z85230.c if(c->tx_skb==NULL) c 1439 drivers/net/wan/z85230.c if(c->dma_tx) c 1442 drivers/net/wan/z85230.c disable_dma(c->txdma); c 1446 drivers/net/wan/z85230.c if (get_dma_residue(c->txdma)) c 1448 drivers/net/wan/z85230.c c->netdevice->stats.tx_dropped++; c 1449 drivers/net/wan/z85230.c c->netdevice->stats.tx_fifo_errors++; c 1453 drivers/net/wan/z85230.c c->txcount=0; c 1457 drivers/net/wan/z85230.c c->txcount=c->tx_skb->len; c 1460 drivers/net/wan/z85230.c if(c->dma_tx) c 1470 drivers/net/wan/z85230.c disable_dma(c->txdma); c 1477 drivers/net/wan/z85230.c if(c->dev->type!=Z85230) c 1479 drivers/net/wan/z85230.c write_zsctrl(c, RES_Tx_CRC); c 1480 drivers/net/wan/z85230.c write_zsctrl(c, RES_EOM_L); c 1482 drivers/net/wan/z85230.c write_zsreg(c, R10, c->regs[10]&~ABUNDER); c 1483 drivers/net/wan/z85230.c clear_dma_ff(c->txdma); c 1484 drivers/net/wan/z85230.c set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); c 1485 drivers/net/wan/z85230.c set_dma_count(c->txdma, c->txcount); c 1486 drivers/net/wan/z85230.c enable_dma(c->txdma); c 1488 drivers/net/wan/z85230.c write_zsctrl(c, RES_EOM_L); c 1489 drivers/net/wan/z85230.c write_zsreg(c, R5, c->regs[R5]|TxENAB); c 1495 drivers/net/wan/z85230.c write_zsreg(c, R10, c->regs[10]); c 1496 drivers/net/wan/z85230.c write_zsctrl(c, RES_Tx_CRC); c 1498 drivers/net/wan/z85230.c while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP)) c 1500 drivers/net/wan/z85230.c write_zsreg(c, R8, *c->tx_ptr++); c 1501 drivers/net/wan/z85230.c c->txcount--; c 1509 drivers/net/wan/z85230.c netif_wake_queue(c->netdevice); c 1523 drivers/net/wan/z85230.c static void z8530_tx_done(struct z8530_channel *c) c 1528 drivers/net/wan/z85230.c if (c->tx_skb == NULL) c 1531 drivers/net/wan/z85230.c skb = c->tx_skb; c 1532 drivers/net/wan/z85230.c c->tx_skb = NULL; c 1533 drivers/net/wan/z85230.c z8530_tx_begin(c); c 1534 drivers/net/wan/z85230.c c->netdevice->stats.tx_packets++; c 1535 drivers/net/wan/z85230.c c->netdevice->stats.tx_bytes += skb->len; c 1548 drivers/net/wan/z85230.c void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) c 1568 drivers/net/wan/z85230.c static void z8530_rx_done(struct z8530_channel *c) c 1577 drivers/net/wan/z85230.c if(c->rxdma_on) c 1584 drivers/net/wan/z85230.c int ready=c->dma_ready; c 1585 drivers/net/wan/z85230.c unsigned char *rxb=c->rx_buf[c->dma_num]; c 1594 drivers/net/wan/z85230.c disable_dma(c->rxdma); c 1595 drivers/net/wan/z85230.c clear_dma_ff(c->rxdma); c 1596 drivers/net/wan/z85230.c c->rxdma_on=0; c 1597 drivers/net/wan/z85230.c ct=c->mtu-get_dma_residue(c->rxdma); c 1600 drivers/net/wan/z85230.c c->dma_ready=0; c 1609 drivers/net/wan/z85230.c c->dma_num^=1; c 1610 drivers/net/wan/z85230.c set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); c 1611 drivers/net/wan/z85230.c set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); c 1612 drivers/net/wan/z85230.c set_dma_count(c->rxdma, c->mtu); c 1613 drivers/net/wan/z85230.c c->rxdma_on = 1; c 1614 drivers/net/wan/z85230.c enable_dma(c->rxdma); c 1617 drivers/net/wan/z85230.c write_zsreg(c, R0, RES_Rx_CRC); c 1622 drivers/net/wan/z85230.c netdev_warn(c->netdevice, "DMA flip overrun!\n"); c 1636 drivers/net/wan/z85230.c c->netdevice->stats.rx_dropped++; c 1637 drivers/net/wan/z85230.c netdev_warn(c->netdevice, "Memory squeeze\n"); c 1641 drivers/net/wan/z85230.c c->netdevice->stats.rx_packets++; c 1642 drivers/net/wan/z85230.c c->netdevice->stats.rx_bytes += ct; c 1644 drivers/net/wan/z85230.c c->dma_ready = 1; c 1647 drivers/net/wan/z85230.c skb = c->skb; c 1661 drivers/net/wan/z85230.c ct=c->count; c 1663 drivers/net/wan/z85230.c c->skb = c->skb2; c 1664 drivers/net/wan/z85230.c c->count = 0; c 1665 drivers/net/wan/z85230.c c->max = c->mtu; c 1666 drivers/net/wan/z85230.c if (c->skb) { c 1667 drivers/net/wan/z85230.c c->dptr = c->skb->data; c 1668 drivers/net/wan/z85230.c c->max = c->mtu; c 1670 drivers/net/wan/z85230.c c->count = 0; c 1671 drivers/net/wan/z85230.c c->max = 0; c 1675 drivers/net/wan/z85230.c c->skb2 = dev_alloc_skb(c->mtu); c 1676 drivers/net/wan/z85230.c if (c->skb2 == NULL) c 1677 drivers/net/wan/z85230.c netdev_warn(c->netdevice, "memory squeeze\n"); c 1679 drivers/net/wan/z85230.c skb_put(c->skb2, c->mtu); c 1680 drivers/net/wan/z85230.c c->netdevice->stats.rx_packets++; c 1681 drivers/net/wan/z85230.c c->netdevice->stats.rx_bytes += ct; c 1688 drivers/net/wan/z85230.c c->rx_function(c, skb); c 1690 drivers/net/wan/z85230.c c->netdevice->stats.rx_dropped++; c 1691 drivers/net/wan/z85230.c netdev_err(c->netdevice, "Lost a frame\n"); c 1726 drivers/net/wan/z85230.c netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) c 1730 drivers/net/wan/z85230.c netif_stop_queue(c->netdevice); c 1731 drivers/net/wan/z85230.c if(c->tx_next_skb) c 1742 drivers/net/wan/z85230.c if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb))) c 1751 drivers/net/wan/z85230.c c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; c 1752 drivers/net/wan/z85230.c c->tx_dma_used^=1; /* Flip temp buffer */ c 1753 drivers/net/wan/z85230.c skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); c 1756 drivers/net/wan/z85230.c c->tx_next_ptr=skb->data; c 1758 drivers/net/wan/z85230.c c->tx_next_skb=skb; c 1761 drivers/net/wan/z85230.c spin_lock_irqsave(c->lock, flags); c 1762 drivers/net/wan/z85230.c z8530_tx_begin(c); c 1763 drivers/net/wan/z85230.c spin_unlock_irqrestore(c->lock, flags); c 410 drivers/net/wan/z85230.h netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); c 411 drivers/net/wan/z85230.h void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); c 103 drivers/net/wireless/ath/ath10k/debug.h #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) c 195 drivers/net/wireless/ath/ath10k/debug.h #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) c 4504 drivers/net/wireless/ath/ath10k/wmi.c char buf[101], c; c 4511 drivers/net/wireless/ath/ath10k/wmi.c c = skb->data[i]; c 4513 drivers/net/wireless/ath/ath10k/wmi.c if (c == '\0') c 4516 drivers/net/wireless/ath/ath10k/wmi.c if (isascii(c) && isprint(c)) c 4517 drivers/net/wireless/ath/ath10k/wmi.c buf[i] = c; c 43 drivers/net/wireless/ath/ath5k/attach.c int i, c; c 49 drivers/net/wireless/ath/ath5k/attach.c for (c = 0; c < 2; c++) { c 51 drivers/net/wireless/ath/ath5k/attach.c cur_reg = regs[c]; c 1023 drivers/net/wireless/ath/ath5k/eeprom.c u8 i, c; c 1081 drivers/net/wireless/ath/ath5k/eeprom.c for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) { c 1083 drivers/net/wireless/ath/ath5k/eeprom.c chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff); c 1084 drivers/net/wireless/ath/ath5k/eeprom.c chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff); c 1317 drivers/net/wireless/ath/ath5k/phy.c u16 c; c 1320 drivers/net/wireless/ath/ath5k/phy.c c = channel->center_freq; c 1327 drivers/net/wireless/ath/ath5k/phy.c if (c < 4800) { c 1330 drivers/net/wireless/ath/ath5k/phy.c if (!((c - 2224) % 5)) { c 1332 drivers/net/wireless/ath/ath5k/phy.c data0 = ((2 * (c - 704)) - 3040) / 10; c 1336 drivers/net/wireless/ath/ath5k/phy.c } else if (!((c - 2192) % 5)) { c 1338 drivers/net/wireless/ath/ath5k/phy.c data0 = ((2 * (c - 672)) - 3040) / 10; c 1353 drivers/net/wireless/ath/ath5k/phy.c } else if ((c % 5) != 2 || c > 5435) { c 1354 drivers/net/wireless/ath/ath5k/phy.c if (!(c % 20) && c >= 5120) { c 1355 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); c 1357 drivers/net/wireless/ath/ath5k/phy.c } else if (!(c % 10)) { c 1358 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8); c 1360 drivers/net/wireless/ath/ath5k/phy.c } else if (!(c % 5)) { c 1361 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap((c - 4800) / 5, 8); c 1366 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8); c 1391 drivers/net/wireless/ath/ath5k/phy.c u16 c; c 1394 drivers/net/wireless/ath/ath5k/phy.c c = channel->center_freq; c 1396 drivers/net/wireless/ath/ath5k/phy.c if (c < 4800) { c 1397 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap((c - 2272), 8); c 1400 drivers/net/wireless/ath/ath5k/phy.c } else if ((c % 5) != 2 || c > 5435) { c 1401 drivers/net/wireless/ath/ath5k/phy.c if (!(c % 20) && c < 5120) c 1402 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); c 1403 drivers/net/wireless/ath/ath5k/phy.c else if (!(c % 10)) c 1404 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8); c 1405 drivers/net/wireless/ath/ath5k/phy.c else if (!(c % 5)) c 1406 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap((c - 4800) / 5, 8); c 1411 drivers/net/wireless/ath/ath5k/phy.c data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8); c 406 drivers/net/wireless/ath/ath9k/calib.c struct ieee80211_channel *c = chan->chan; c 418 drivers/net/wireless/ath/ath9k/calib.c if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) c 93 drivers/net/wireless/ath/ath9k/common-debug.c #define RX_PHY_ERR_INC(c) rxstats->phy_err_stats[c]++ c 94 drivers/net/wireless/ath/ath9k/common-debug.c #define RX_CMN_STAT_INC(c) (rxstats->c++) c 28 drivers/net/wireless/ath/ath9k/debug.h #define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0) c 29 drivers/net/wireless/ath/ath9k/debug.h #define RX_STAT_INC(sc, c) do { (sc)->debug.stats.rxstats.c++; } while (0) c 31 drivers/net/wireless/ath/ath9k/debug.h #define ANT_STAT_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].c++; } while (0) c 32 drivers/net/wireless/ath/ath9k/debug.h #define ANT_LNA_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].lna_recv_cnt[c]++; } while (0) c 34 drivers/net/wireless/ath/ath9k/debug.h #define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0) c 35 drivers/net/wireless/ath/ath9k/debug.h #define RX_STAT_INC(sc, c) do { (void)(sc); } while (0) c 37 drivers/net/wireless/ath/ath9k/debug.h #define ANT_STAT_INC(sc, i, c) do { (void)(sc); } while (0) c 38 drivers/net/wireless/ath/ath9k/debug.h #define ANT_LNA_INC(sc, i, c) do { (void)(sc); } while (0) c 58 drivers/net/wireless/ath/ath9k/dfs_debug.h #define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++) c 65 drivers/net/wireless/ath/ath9k/dfs_debug.h #define DFS_STAT_INC(sc, c) do { } while (0) c 329 drivers/net/wireless/ath/ath9k/htc.h #define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) c 330 drivers/net/wireless/ath/ath9k/htc.h #define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) c 331 drivers/net/wireless/ath/ath9k/htc.h #define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++) c 332 drivers/net/wireless/ath/ath9k/htc.h #define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a) c 375 drivers/net/wireless/ath/ath9k/htc.h #define TX_STAT_INC(c) do { } while (0) c 376 drivers/net/wireless/ath/ath9k/htc.h #define TX_STAT_ADD(c, a) do { } while (0) c 377 drivers/net/wireless/ath/ath9k/htc.h #define RX_STAT_INC(c) do { } while (0) c 378 drivers/net/wireless/ath/ath9k/htc.h #define RX_STAT_ADD(c, a) do { } while (0) c 381 drivers/net/wireless/ath/ath9k/htc.h #define TX_QSTAT_INC(c) do { } while (0) c 1320 drivers/net/wireless/ath/carl9170/phy.c #define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n]) c 1352 drivers/net/wireless/ath/carl9170/phy.c u8 c = ctl_grp | modes[i].ctl_mode; c 1354 drivers/net/wireless/ath/carl9170/phy.c if (c == ar->eeprom.ctl_index[ctl_idx]) c 26 drivers/net/wireless/ath/dfs_pri_detector.c #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++) c 27 drivers/net/wireless/ath/dfs_pri_detector.c #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--) c 1050 drivers/net/wireless/ath/wil6210/cfg80211.c struct cfg80211_crypto_settings *c) c 1055 drivers/net/wireless/ath/wil6210/cfg80211.c c->wpa_versions, c->cipher_group); c 1056 drivers/net/wireless/ath/wil6210/cfg80211.c wil_dbg_misc(wil, "Pairwise ciphers [%d] {\n", c->n_ciphers_pairwise); c 1057 drivers/net/wireless/ath/wil6210/cfg80211.c n = min_t(int, c->n_ciphers_pairwise, ARRAY_SIZE(c->ciphers_pairwise)); c 1060 drivers/net/wireless/ath/wil6210/cfg80211.c c->ciphers_pairwise[i]); c 1062 drivers/net/wireless/ath/wil6210/cfg80211.c wil_dbg_misc(wil, "AKM suites [%d] {\n", c->n_akm_suites); c 1063 drivers/net/wireless/ath/wil6210/cfg80211.c n = min_t(int, c->n_akm_suites, ARRAY_SIZE(c->akm_suites)); c 1066 drivers/net/wireless/ath/wil6210/cfg80211.c c->akm_suites[i]); c 1069 drivers/net/wireless/ath/wil6210/cfg80211.c c->control_port, be16_to_cpu(c->control_port_ethertype), c 1070 drivers/net/wireless/ath/wil6210/cfg80211.c c->control_port_no_encrypt); c 1596 drivers/net/wireless/ath/wil6210/debugfs.c struct wil_tid_crypto_rx *c) c 1601 drivers/net/wireless/ath/wil6210/debugfs.c struct wil_tid_crypto_rx_single *cc = &c->key_id[i]; c 1615 drivers/net/wireless/ath/wil6210/debugfs.c struct wil_tid_crypto_rx_single *cc = &c->key_id[i]; c 1668 drivers/net/wireless/ath/wil6210/debugfs.c struct wil_tid_crypto_rx *c = c 1676 drivers/net/wireless/ath/wil6210/debugfs.c wil_print_rxtid_crypto(s, tid, c); c 680 drivers/net/wireless/ath/wil6210/txrx.c struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx : c 682 drivers/net/wireless/ath/wil6210/txrx.c struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id]; c 547 drivers/net/wireless/ath/wil6210/txrx_edma.c struct wil_tid_crypto_rx *c; c 562 drivers/net/wireless/ath/wil6210/txrx_edma.c c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid]; c 563 drivers/net/wireless/ath/wil6210/txrx_edma.c cc = &c->key_id[key_id]; c 703 drivers/net/wireless/atmel/at76c50x-usb.c #define MAKE_CMD_CASE(c) case (c): return #c c 748 drivers/net/wireless/atmel/at76c50x-usb.c #define MAKE_CMD_STATUS_CASE(c) case (c): return #c c 1407 drivers/net/wireless/atmel/atmel.c char *s, *r, *c; c 1428 drivers/net/wireless/atmel/atmel.c c = "Parallel flash"; c 1431 drivers/net/wireless/atmel/atmel.c c = "SPI flash\n"; c 1434 drivers/net/wireless/atmel/atmel.c c = "EEPROM"; c 1437 drivers/net/wireless/atmel/atmel.c c = "<unknown>"; c 1445 drivers/net/wireless/atmel/atmel.c seq_printf(m, "MAC memory type:\t%s\n", c); c 31 drivers/net/wireless/broadcom/b43/lo.c struct b43_lo_calib *c; c 33 drivers/net/wireless/broadcom/b43/lo.c list_for_each_entry(c, &lo->calib_list, list) { c 34 drivers/net/wireless/broadcom/b43/lo.c if (!b43_compare_bbatt(&c->bbatt, bbatt)) c 36 drivers/net/wireless/broadcom/b43/lo.c if (!b43_compare_rfatt(&c->rfatt, rfatt)) c 38 drivers/net/wireless/broadcom/b43/lo.c return c; c 791 drivers/net/wireless/broadcom/b43/lo.c struct b43_lo_calib *c; c 793 drivers/net/wireless/broadcom/b43/lo.c c = b43_find_lo_calib(lo, bbatt, rfatt); c 794 drivers/net/wireless/broadcom/b43/lo.c if (c) c 795 drivers/net/wireless/broadcom/b43/lo.c return c; c 798 drivers/net/wireless/broadcom/b43/lo.c c = b43_calibrate_lo_setting(dev, bbatt, rfatt); c 799 drivers/net/wireless/broadcom/b43/lo.c if (!c) c 801 drivers/net/wireless/broadcom/b43/lo.c list_add(&c->list, &lo->calib_list); c 803 drivers/net/wireless/broadcom/b43/lo.c return c; c 513 drivers/net/wireless/broadcom/b43/phy_common.c unsigned int a, b, c, d; c 520 drivers/net/wireless/broadcom/b43/phy_common.c c = (tmp >> 16) & 0xFF; c 524 drivers/net/wireless/broadcom/b43/phy_common.c c == 0 || c == B43_TSSI_MAX || c 535 drivers/net/wireless/broadcom/b43/phy_common.c c = (c + 32) & 0x3F; c 540 drivers/net/wireless/broadcom/b43/phy_common.c average = (a + b + c + d + 2) / 4; c 631 drivers/net/wireless/broadcom/b43/phy_ht.c int i, c; c 634 drivers/net/wireless/broadcom/b43/phy_ht.c for (c = 0; c < 3; c++) { c 635 drivers/net/wireless/broadcom/b43/phy_ht.c target[c] = sprom->core_pwr_info[c].maxpwr_2g; c 636 drivers/net/wireless/broadcom/b43/phy_ht.c a1[c] = sprom->core_pwr_info[c].pa_2g[0]; c 637 drivers/net/wireless/broadcom/b43/phy_ht.c b0[c] = sprom->core_pwr_info[c].pa_2g[1]; c 638 drivers/net/wireless/broadcom/b43/phy_ht.c b1[c] = sprom->core_pwr_info[c].pa_2g[2]; c 641 drivers/net/wireless/broadcom/b43/phy_ht.c for (c = 0; c < 3; c++) { c 642 drivers/net/wireless/broadcom/b43/phy_ht.c target[c] = sprom->core_pwr_info[c].maxpwr_5gl; c 643 drivers/net/wireless/broadcom/b43/phy_ht.c a1[c] = sprom->core_pwr_info[c].pa_5gl[0]; c 644 drivers/net/wireless/broadcom/b43/phy_ht.c b0[c] = sprom->core_pwr_info[c].pa_5gl[1]; c 645 drivers/net/wireless/broadcom/b43/phy_ht.c b1[c] = sprom->core_pwr_info[c].pa_5gl[2]; c 648 drivers/net/wireless/broadcom/b43/phy_ht.c for (c = 0; c < 3; c++) { c 649 drivers/net/wireless/broadcom/b43/phy_ht.c target[c] = sprom->core_pwr_info[c].maxpwr_5g; c 650 drivers/net/wireless/broadcom/b43/phy_ht.c a1[c] = sprom->core_pwr_info[c].pa_5g[0]; c 651 drivers/net/wireless/broadcom/b43/phy_ht.c b0[c] = sprom->core_pwr_info[c].pa_5g[1]; c 652 drivers/net/wireless/broadcom/b43/phy_ht.c b1[c] = sprom->core_pwr_info[c].pa_5g[2]; c 655 drivers/net/wireless/broadcom/b43/phy_ht.c for (c = 0; c < 3; c++) { c 656 drivers/net/wireless/broadcom/b43/phy_ht.c target[c] = sprom->core_pwr_info[c].maxpwr_5gh; c 657 drivers/net/wireless/broadcom/b43/phy_ht.c a1[c] = sprom->core_pwr_info[c].pa_5gh[0]; c 658 drivers/net/wireless/broadcom/b43/phy_ht.c b0[c] = sprom->core_pwr_info[c].pa_5gh[1]; c 659 drivers/net/wireless/broadcom/b43/phy_ht.c b1[c] = sprom->core_pwr_info[c].pa_5gh[2]; c 715 drivers/net/wireless/broadcom/b43/phy_ht.c for (c = 0; c < 3; c++) { c 720 drivers/net/wireless/broadcom/b43/phy_ht.c num = 8 * (16 * b0[c] + b1[c] * i); c 721 drivers/net/wireless/broadcom/b43/phy_ht.c den = 32768 + a1[c] * i; c 725 drivers/net/wireless/broadcom/b43/phy_ht.c b43_httab_write_bulk(dev, B43_HTTAB16(26 + c, 0), 64, regval); c 4059 drivers/net/wireless/broadcom/b43/phy_n.c u8 i, c; c 4090 drivers/net/wireless/broadcom/b43/phy_n.c for (c = 0; c < 2; c++) { c 4091 drivers/net/wireless/broadcom/b43/phy_n.c idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g; c 4092 drivers/net/wireless/broadcom/b43/phy_n.c target[c] = sprom->core_pwr_info[c].maxpwr_2g; c 4093 drivers/net/wireless/broadcom/b43/phy_n.c a1[c] = sprom->core_pwr_info[c].pa_2g[0]; c 4094 drivers/net/wireless/broadcom/b43/phy_n.c b0[c] = sprom->core_pwr_info[c].pa_2g[1]; c 4095 drivers/net/wireless/broadcom/b43/phy_n.c b1[c] = sprom->core_pwr_info[c].pa_2g[2]; c 4098 drivers/net/wireless/broadcom/b43/phy_n.c for (c = 0; c < 2; c++) { c 4099 drivers/net/wireless/broadcom/b43/phy_n.c idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; c 4100 drivers/net/wireless/broadcom/b43/phy_n.c target[c] = sprom->core_pwr_info[c].maxpwr_5gl; c 4101 drivers/net/wireless/broadcom/b43/phy_n.c a1[c] = sprom->core_pwr_info[c].pa_5gl[0]; c 4102 drivers/net/wireless/broadcom/b43/phy_n.c b0[c] = sprom->core_pwr_info[c].pa_5gl[1]; c 4103 drivers/net/wireless/broadcom/b43/phy_n.c b1[c] = sprom->core_pwr_info[c].pa_5gl[2]; c 4106 drivers/net/wireless/broadcom/b43/phy_n.c for (c = 0; c < 2; c++) { c 4107 drivers/net/wireless/broadcom/b43/phy_n.c idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; c 4108 drivers/net/wireless/broadcom/b43/phy_n.c target[c] = sprom->core_pwr_info[c].maxpwr_5g; c 4109 drivers/net/wireless/broadcom/b43/phy_n.c a1[c] = sprom->core_pwr_info[c].pa_5g[0]; c 4110 drivers/net/wireless/broadcom/b43/phy_n.c b0[c] = sprom->core_pwr_info[c].pa_5g[1]; c 4111 drivers/net/wireless/broadcom/b43/phy_n.c b1[c] = sprom->core_pwr_info[c].pa_5g[2]; c 4114 drivers/net/wireless/broadcom/b43/phy_n.c for (c = 0; c < 2; c++) { c 4115 drivers/net/wireless/broadcom/b43/phy_n.c idle[c] = nphy->pwr_ctl_info[c].idle_tssi_5g; c 4116 drivers/net/wireless/broadcom/b43/phy_n.c target[c] = sprom->core_pwr_info[c].maxpwr_5gh; c 4117 drivers/net/wireless/broadcom/b43/phy_n.c a1[c] = sprom->core_pwr_info[c].pa_5gh[0]; c 4118 drivers/net/wireless/broadcom/b43/phy_n.c b0[c] = sprom->core_pwr_info[c].pa_5gh[1]; c 4119 drivers/net/wireless/broadcom/b43/phy_n.c b1[c] = sprom->core_pwr_info[c].pa_5gh[2]; c 4141 drivers/net/wireless/broadcom/b43/phy_n.c for (c = 0; c < 2; c++) { c 4142 drivers/net/wireless/broadcom/b43/phy_n.c r = c ? 0x190 : 0x170; c 4197 drivers/net/wireless/broadcom/b43/phy_n.c for (c = 0; c < 2; c++) { c 4199 drivers/net/wireless/broadcom/b43/phy_n.c num = 8 * (16 * b0[c] + b1[c] * i); c 4200 drivers/net/wireless/broadcom/b43/phy_n.c den = 32768 + a1[c] * i; c 4202 drivers/net/wireless/broadcom/b43/phy_n.c if (dev->phy.rev < 3 && (i <= (31 - idle[c] + 1))) c 4203 drivers/net/wireless/broadcom/b43/phy_n.c pwr = max(pwr, target[c] + 1); c 4206 drivers/net/wireless/broadcom/b43/phy_n.c b43_ntab_write_bulk(dev, B43_NTAB32(26 + c, 0), 64, regval); c 6476 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c int i, c, n_combos; c 6490 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c c = 0; c 6499 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].num_different_channels = 2; c 6501 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].num_different_channels = 1; c 6511 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].num_different_channels = 1; c 6515 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].max_interfaces = i; c 6516 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].n_limits = i; c 6517 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].limits = c0_limits; c 6520 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c c++; c 6533 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].num_different_channels = 1; c 6534 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].max_interfaces = i; c 6535 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].n_limits = i; c 6536 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].limits = p2p_limits; c 6540 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c c++; c 6547 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].beacon_int_infra_match = true; c 6548 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].num_different_channels = 1; c 6549 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].max_interfaces = 4; c 6550 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].n_limits = i; c 6551 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c combo[c].limits = mbss_limits; c 68 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c static bool is_nvram_char(char c) c 71 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c if (c == '#') c 75 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c return (c >= 0x20 && c < 0x7f); c 78 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c static bool is_whitespace(char c) c 80 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c return (c == ' ' || c == '\r' || c == '\n' || c == '\t'); c 85 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c char c; c 87 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c c = nvp->data[nvp->pos]; c 88 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c if (c == '\n') c 90 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c if (is_whitespace(c) || c == '\0') c 92 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c if (c == '#') c 94 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c if (is_nvram_char(c)) { c 109 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c char c; c 111 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c c = nvp->data[nvp->pos]; c 112 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c if (c == '=') { c 124 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c } else if (!is_nvram_char(c) || c == ' ') { c 138 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c char c; c 143 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c c = nvp->data[nvp->pos]; c 144 drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c if (!is_nvram_char(c)) { c 2812 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct brcmf_console *c = &bus->console; c 2823 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le, c 2824 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sizeof(c->log_le)); c 2829 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (c->buf == NULL) { c 2830 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c c->bufsize = le32_to_cpu(c->log_le.buf_size); c 2831 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c c->buf = kmalloc(c->bufsize, GFP_ATOMIC); c 2832 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (c->buf == NULL) c 2836 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c idx = le32_to_cpu(c->log_le.idx); c 2839 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (idx > c->bufsize) c 2844 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (idx == c->last) c 2848 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c addr = le32_to_cpu(c->log_le.buf); c 2849 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize); c 2853 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c while (c->last != idx) { c 2855 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (c->last == idx) { c 2861 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (c->last >= n) c 2862 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c c->last -= n; c 2864 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c c->last = c->bufsize - n; c 2867 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c ch = c->buf[c->last]; c 2868 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c c->last = (c->last + 1) % c->bufsize; c 55 drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c #define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \ c 56 drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c (((c) < 62) ? 1 : \ c 57 drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c (((c) < 100) ? 2 : \ c 58 drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c (((c) < 149) ? 3 : 4)))) c 259 drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h #define bcma_wflush16(c, o, v) \ c 260 drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); }) c 262 drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h #define bcma_wflush16(c, o, v) bcma_write16(c, o, v) c 256 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c char c; c 262 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c c = (brev & 0xf000) == 0x1000 ? 'P' : 'A'; c 263 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c snprintf(buf, BRCMU_BOARDREV_LEN, "%c%03x", c, brev & 0xfff); c 630 drivers/net/wireless/intel/ipw2x00/ipw2100.c char c; c 647 drivers/net/wireless/intel/ipw2x00/ipw2100.c c = data[(i * 8 + j)]; c 648 drivers/net/wireless/intel/ipw2x00/ipw2100.c if (!isascii(c) || !isprint(c)) c 649 drivers/net/wireless/intel/ipw2x00/ipw2100.c c = '.'; c 651 drivers/net/wireless/intel/ipw2x00/ipw2100.c out += snprintf(buf + out, count - out, "%c", c); c 6643 drivers/net/wireless/intel/ipw2x00/ipw2100.c int c = 0; c 6645 drivers/net/wireless/intel/ipw2x00/ipw2100.c while ((c < REG_MAX_CHANNEL) && c 6646 drivers/net/wireless/intel/ipw2x00/ipw2100.c (f != ipw2100_frequencies[c])) c 6647 drivers/net/wireless/intel/ipw2x00/ipw2100.c c++; c 6651 drivers/net/wireless/intel/ipw2x00/ipw2100.c fwrq->m = c + 1; c 224 drivers/net/wireless/intel/ipw2x00/ipw2200.c char c; c 241 drivers/net/wireless/intel/ipw2x00/ipw2200.c c = data[(i * 8 + j)]; c 242 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (!isascii(c) || !isprint(c)) c 243 drivers/net/wireless/intel/ipw2x00/ipw2200.c c = '.'; c 245 drivers/net/wireless/intel/ipw2x00/ipw2200.c out += snprintf(buf + out, count - out, "%c", c); c 300 drivers/net/wireless/intel/ipw2x00/ipw2200.c static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) c 303 drivers/net/wireless/intel/ipw2x00/ipw2200.c __LINE__, (u32) (b), (u32) (c)); c 304 drivers/net/wireless/intel/ipw2x00/ipw2200.c _ipw_write_reg8(a, b, c); c 309 drivers/net/wireless/intel/ipw2x00/ipw2200.c static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) c 312 drivers/net/wireless/intel/ipw2x00/ipw2200.c __LINE__, (u32) (b), (u32) (c)); c 313 drivers/net/wireless/intel/ipw2x00/ipw2200.c _ipw_write_reg16(a, b, c); c 318 drivers/net/wireless/intel/ipw2x00/ipw2200.c static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) c 321 drivers/net/wireless/intel/ipw2x00/ipw2200.c __LINE__, (u32) (b), (u32) (c)); c 322 drivers/net/wireless/intel/ipw2x00/ipw2200.c _ipw_write_reg32(a, b, c); c 408 drivers/net/wireless/intel/ipw2x00/ipw2200.c #define ipw_read_indirect(a, b, c, d) ({ \ c 411 drivers/net/wireless/intel/ipw2x00/ipw2200.c _ipw_read_indirect(a, b, c, d); \ c 417 drivers/net/wireless/intel/ipw2x00/ipw2200.c #define ipw_write_indirect(a, b, c, d) do { \ c 420 drivers/net/wireless/intel/ipw2x00/ipw2200.c _ipw_write_indirect(a, b, c, d); \ c 289 drivers/net/wireless/intel/iwlegacy/3945.h s32 a, b, c, d, e; /* coefficients for voltage->power c 670 drivers/net/wireless/intel/iwlegacy/4965.c u32 c; c 691 drivers/net/wireless/intel/iwlegacy/4965.c for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { c 694 drivers/net/wireless/intel/iwlegacy/4965.c measurements[c][m]); c 696 drivers/net/wireless/intel/iwlegacy/4965.c measurements[c][m]); c 697 drivers/net/wireless/intel/iwlegacy/4965.c omeas = &(chan_info->measurements[c][m]); c 717 drivers/net/wireless/intel/iwlegacy/4965.c D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, c 720 drivers/net/wireless/intel/iwlegacy/4965.c D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, c 723 drivers/net/wireless/intel/iwlegacy/4965.c D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, c 725 drivers/net/wireless/intel/iwlegacy/4965.c D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c, c 1016 drivers/net/wireless/intel/iwlegacy/4965.c int c; c 1117 drivers/net/wireless/intel/iwlegacy/4965.c for (c = 0; c < 2; c++) { c 1118 drivers/net/wireless/intel/iwlegacy/4965.c measurement = &ch_eeprom_info.measurements[c][1]; c 1126 drivers/net/wireless/intel/iwlegacy/4965.c &temperature_comp[c]); c 1128 drivers/net/wireless/intel/iwlegacy/4965.c factory_gain_idx[c] = measurement->gain_idx; c 1129 drivers/net/wireless/intel/iwlegacy/4965.c factory_actual_pwr[c] = measurement->actual_pow; c 1131 drivers/net/wireless/intel/iwlegacy/4965.c D_TXPOWER("chain = %d\n", c); c 1133 drivers/net/wireless/intel/iwlegacy/4965.c factory_temp, current_temp, temperature_comp[c]); c 1135 drivers/net/wireless/intel/iwlegacy/4965.c D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c], c 1136 drivers/net/wireless/intel/iwlegacy/4965.c factory_actual_pwr[c]); c 1173 drivers/net/wireless/intel/iwlegacy/4965.c for (c = 0; c < 2; c++) { c 1179 drivers/net/wireless/intel/iwlegacy/4965.c tx_atten[txatten_grp][c]); c 1185 drivers/net/wireless/intel/iwlegacy/4965.c (u8) (factory_gain_idx[c] - c 1186 drivers/net/wireless/intel/iwlegacy/4965.c (target_power - factory_actual_pwr[c]) - c 1187 drivers/net/wireless/intel/iwlegacy/4965.c temperature_comp[c] - voltage_compensation + c 1216 drivers/net/wireless/intel/iwlegacy/4965.c tx_power.s.radio_tx_gain[c] = c 1218 drivers/net/wireless/intel/iwlegacy/4965.c tx_power.s.dsp_predis_atten[c] = c 1222 drivers/net/wireless/intel/iwlegacy/4965.c "gain 0x%02x dsp %d\n", c, atten_value, c 1223 drivers/net/wireless/intel/iwlegacy/4965.c power_idx, tx_power.s.radio_tx_gain[c], c 1224 drivers/net/wireless/intel/iwlegacy/4965.c tx_power.s.dsp_predis_atten[c]); c 1305 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c int c = ieee80211_csa_update_counter(csa_vif); c 1311 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c u32 rel_time = (c + 1) * c 121 drivers/net/wireless/intel/iwlwifi/mvm/offloading.c int c; c 141 drivers/net/wireless/intel/iwlwifi/mvm/offloading.c for (i = 0, c = 0; c 143 drivers/net/wireless/intel/iwlwifi/mvm/offloading.c i < n_addrs && c < n_nsc; i++) { c 155 drivers/net/wireless/intel/iwlwifi/mvm/offloading.c for (j = 0; j < c; j++) c 159 drivers/net/wireless/intel/iwlwifi/mvm/offloading.c if (j == c) c 160 drivers/net/wireless/intel/iwlwifi/mvm/offloading.c c++; c 310 drivers/net/wireless/intersil/hostap/hostap_proc.c unsigned char c = p[i]; c 311 drivers/net/wireless/intersil/hostap/hostap_proc.c if (c >= 32 && c < 127) c 312 drivers/net/wireless/intersil/hostap/hostap_proc.c seq_putc(m, c); c 314 drivers/net/wireless/intersil/hostap/hostap_proc.c seq_printf(m, "<%02x>", c); c 606 drivers/net/wireless/intersil/orinoco/orinoco_usb.c struct request_context *c; c 609 drivers/net/wireless/intersil/orinoco/orinoco_usb.c c = list_entry(item, struct request_context, list); c 611 drivers/net/wireless/intersil/orinoco/orinoco_usb.c ezusb_reply_inc(c->buf->req_reply_count); c 613 drivers/net/wireless/intersil/orinoco/orinoco_usb.c && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) { c 614 drivers/net/wireless/intersil/orinoco/orinoco_usb.c ctx = c; c 618 drivers/net/wireless/intersil/orinoco/orinoco_usb.c le16_to_cpu(ans->hermes_rid), c->in_rid, c 294 drivers/net/wireless/intersil/prism54/isl_ioctl.c u32 c; c 298 drivers/net/wireless/intersil/prism54/isl_ioctl.c c = fwrq->m; c 300 drivers/net/wireless/intersil/prism54/isl_ioctl.c c = (fwrq->e == 1) ? channel_of_freq(fwrq->m / 100000) : 0; c 302 drivers/net/wireless/intersil/prism54/isl_ioctl.c rvalue = c ? mgt_set_request(priv, DOT11_OID_CHANNEL, 0, &c) : -EINVAL; c 24 drivers/net/wireless/intersil/prism54/oid_mgt.c int c = 0; c 27 drivers/net/wireless/intersil/prism54/oid_mgt.c while ((c < 14) && (f != frequency_list_bg[c])) c 28 drivers/net/wireless/intersil/prism54/oid_mgt.c c++; c 29 drivers/net/wireless/intersil/prism54/oid_mgt.c return (c >= 14) ? 0 : ++c; c 236 drivers/net/wireless/mac80211_hwsim.c static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c) c 238 drivers/net/wireless/mac80211_hwsim.c struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; c 242 drivers/net/wireless/mac80211_hwsim.c static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c) c 244 drivers/net/wireless/mac80211_hwsim.c struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; c 248 drivers/net/wireless/mac80211_hwsim.c static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) c 250 drivers/net/wireless/mac80211_hwsim.c struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; c 1088 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c u8 c[4]; c 1093 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]); c 606 drivers/net/wireless/mediatek/mt76/mt76.h mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) c 611 drivers/net/wireless/mediatek/mt76/mt76.h if (c->band == NL80211_BAND_2GHZ) c 616 drivers/net/wireless/mediatek/mt76/mt76.h idx = c - &msband->sband.channels[0]; c 2558 drivers/net/wireless/ray_cs.c UCHAR c[33]; c 2581 drivers/net/wireless/ray_cs.c c[i] = local->sparm.b5.a_current_ess_id[i]; c 2582 drivers/net/wireless/ray_cs.c c[32] = 0; c 2584 drivers/net/wireless/ray_cs.c nettype[local->sparm.b5.a_network_type], c); c 2744 drivers/net/wireless/ray_cs.c unsigned int c = *p - '0'; c 2745 drivers/net/wireless/ray_cs.c if (c > 9) c 2747 drivers/net/wireless/ray_cs.c nr = nr * 10 + c; c 164 drivers/net/wireless/realtek/rtlwifi/debug.c RTL_DEBUG_IMPL_BB_SERIES(c, 0x0c00); c 603 drivers/net/wireless/realtek/rtw88/debug.c rtw_debug_impl_bb(c, 0x0c00); c 16 drivers/net/wireless/ti/wl12xx/debugfs.c #define WL12XX_DEBUGFS_FWSTATS_FILE(a, b, c) \ c 17 drivers/net/wireless/ti/wl12xx/debugfs.c DEBUGFS_FWSTATS_FILE(a, b, c, wl12xx_acx_statistics) c 18 drivers/net/wireless/ti/wl12xx/scan.c struct conf_scan_settings *c = &wl->conf.scan; c 52 drivers/net/wireless/ti/wl12xx/scan.c cpu_to_le32(c->min_dwell_time_active); c 54 drivers/net/wireless/ti/wl12xx/scan.c cpu_to_le32(c->max_dwell_time_active); c 57 drivers/net/wireless/ti/wl12xx/scan.c cpu_to_le32(c->dwell_time_passive); c 59 drivers/net/wireless/ti/wl12xx/scan.c cpu_to_le32(c->dwell_time_passive); c 314 drivers/net/wireless/ti/wl12xx/scan.c struct conf_sched_scan_settings *c = &wl->conf.sched_scan; c 325 drivers/net/wireless/ti/wl12xx/scan.c cfg->rssi_threshold = c->rssi_threshold; c 326 drivers/net/wireless/ti/wl12xx/scan.c cfg->snr_threshold = c->snr_threshold; c 327 drivers/net/wireless/ti/wl12xx/scan.c cfg->n_probe_reqs = c->num_probe_reqs; c 21 drivers/net/wireless/ti/wl18xx/debugfs.c #define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \ c 22 drivers/net/wireless/ti/wl18xx/debugfs.c DEBUGFS_FWSTATS_FILE(a, b, c, wl18xx_acx_statistics) c 23 drivers/net/wireless/ti/wl18xx/debugfs.c #define WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c) \ c 24 drivers/net/wireless/ti/wl18xx/debugfs.c DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics) c 162 drivers/net/wireless/ti/wl18xx/scan.c struct conf_sched_scan_settings *c = &wl->conf.sched_scan; c 186 drivers/net/wireless/ti/wl18xx/scan.c cmd->rssi_threshold = c->rssi_threshold; c 187 drivers/net/wireless/ti/wl18xx/scan.c cmd->snr_threshold = c->snr_threshold; c 200 drivers/net/wireless/ti/wl18xx/scan.c cmd->n_probe_reqs = c->num_probe_reqs; c 216 drivers/net/wireless/ti/wl18xx/scan.c if (c->num_short_intervals && c->long_interval && c 217 drivers/net/wireless/ti/wl18xx/scan.c c->long_interval > req->scan_plans[0].interval * MSEC_PER_SEC) { c 220 drivers/net/wireless/ti/wl18xx/scan.c cmd->long_cycles_msec = cpu_to_le16(c->long_interval); c 221 drivers/net/wireless/ti/wl18xx/scan.c cmd->short_cycles_count = c->num_short_intervals; c 314 drivers/net/wireless/ti/wlcore/acx.c struct conf_itrim_settings *c = &wl->conf.itrim; c 325 drivers/net/wireless/ti/wlcore/acx.c dco->enable = c->enable; c 326 drivers/net/wireless/ti/wlcore/acx.c dco->timeout = cpu_to_le32(c->timeout); c 510 drivers/net/wireless/ti/wlcore/acx.c struct conf_sg_settings *c = &wl->conf.sg; c 523 drivers/net/wireless/ti/wlcore/acx.c param->params[i] = cpu_to_le32(c->params[i]); c 725 drivers/net/wireless/ti/wlcore/acx.c struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; c 743 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.short_retry_limit = c->short_retry_limit; c 744 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.long_retry_limit = c->long_retry_limit; c 745 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.aflags = c->aflags; c 759 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.short_retry_limit = c->short_retry_limit; c 760 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.long_retry_limit = c->long_retry_limit; c 761 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.aflags = c->aflags; c 777 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.short_retry_limit = c->short_retry_limit; c 778 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.long_retry_limit = c->long_retry_limit; c 779 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.aflags = c->aflags; c 792 drivers/net/wireless/ti/wlcore/acx.c int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, c 799 drivers/net/wireless/ti/wlcore/acx.c idx, c->enabled_rates); c 807 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates); c 808 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.short_retry_limit = c->short_retry_limit; c 809 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.long_retry_limit = c->long_retry_limit; c 810 drivers/net/wireless/ti/wlcore/acx.c acx->rate_policy.aflags = c->aflags; c 1126 drivers/net/wireless/ti/wlcore/acx.c struct conf_pm_config_settings *c = &wl->conf.pm_config; c 1137 drivers/net/wireless/ti/wlcore/acx.c acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time); c 1138 drivers/net/wireless/ti/wlcore/acx.c acx->host_fast_wakeup_support = c->host_fast_wakeup_support; c 1257 drivers/net/wireless/ti/wlcore/acx.c struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger; c 1269 drivers/net/wireless/ti/wlcore/acx.c acx->rssi_beacon = c->avg_weight_rssi_beacon; c 1270 drivers/net/wireless/ti/wlcore/acx.c acx->rssi_data = c->avg_weight_rssi_data; c 1271 drivers/net/wireless/ti/wlcore/acx.c acx->snr_beacon = c->avg_weight_snr_beacon; c 1272 drivers/net/wireless/ti/wlcore/acx.c acx->snr_data = c->avg_weight_snr_data; c 1066 drivers/net/wireless/ti/wlcore/acx.h int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, c 144 drivers/net/wireless/ti/wlcore/scan.c struct conf_scan_settings *c = &wl->conf.scan; c 148 drivers/net/wireless/ti/wlcore/scan.c c->min_dwell_time_active : c 149 drivers/net/wireless/ti/wlcore/scan.c c->min_dwell_time_active_long; c 151 drivers/net/wireless/ti/wlcore/scan.c c->max_dwell_time_active : c 152 drivers/net/wireless/ti/wlcore/scan.c c->max_dwell_time_active_long; c 153 drivers/net/wireless/ti/wlcore/scan.c dwell_time_passive = c->dwell_time_passive; c 154 drivers/net/wireless/ti/wlcore/scan.c dwell_time_dfs = c->dwell_time_dfs; c 156 drivers/net/wireless/ti/wlcore/scan.c struct conf_sched_scan_settings *c = &wl->conf.sched_scan; c 160 drivers/net/wireless/ti/wlcore/scan.c delta_per_probe = c->dwell_time_delta_per_probe_5; c 162 drivers/net/wireless/ti/wlcore/scan.c delta_per_probe = c->dwell_time_delta_per_probe; c 164 drivers/net/wireless/ti/wlcore/scan.c min_dwell_time_active = c->base_dwell_time + c 165 drivers/net/wireless/ti/wlcore/scan.c n_ssids * c->num_probe_reqs * delta_per_probe; c 168 drivers/net/wireless/ti/wlcore/scan.c c->max_dwell_time_delta; c 169 drivers/net/wireless/ti/wlcore/scan.c dwell_time_passive = c->dwell_time_passive; c 170 drivers/net/wireless/ti/wlcore/scan.c dwell_time_dfs = c->dwell_time_dfs; c 70 drivers/net/wireless/wl3501_cs.c #define wl3501_outsb(a, b, c) { outsb(a, b, c); slow_down_io(); } c 183 drivers/nubus/nubus.c unsigned char c = nubus_get_rom(&p, 1, dirent->mask); c 185 drivers/nubus/nubus.c if (!c) c 187 drivers/nubus/nubus.c *t++ = c; c 806 drivers/nvdimm/dimm_devs.c static int count_dimms(struct device *dev, void *c) c 808 drivers/nvdimm/dimm_devs.c int *count = c; c 482 drivers/nvdimm/security.c #define C(a, b, c) a c 485 drivers/nvdimm/security.c #define C(a, b, c) { b, c } c 504 drivers/nvme/host/core.c struct nvme_command c; c 506 drivers/nvme/host/core.c memset(&c, 0, sizeof(c)); c 508 drivers/nvme/host/core.c c.directive.opcode = nvme_admin_directive_send; c 509 drivers/nvme/host/core.c c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); c 510 drivers/nvme/host/core.c c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; c 511 drivers/nvme/host/core.c c.directive.dtype = NVME_DIR_IDENTIFY; c 512 drivers/nvme/host/core.c c.directive.tdtype = NVME_DIR_STREAMS; c 513 drivers/nvme/host/core.c c.directive.endir = enable ? NVME_DIR_ENDIR : 0; c 515 drivers/nvme/host/core.c return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); c 531 drivers/nvme/host/core.c struct nvme_command c; c 533 drivers/nvme/host/core.c memset(&c, 0, sizeof(c)); c 536 drivers/nvme/host/core.c c.directive.opcode = nvme_admin_directive_recv; c 537 drivers/nvme/host/core.c c.directive.nsid = cpu_to_le32(nsid); c 538 drivers/nvme/host/core.c c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); c 539 drivers/nvme/host/core.c c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; c 540 drivers/nvme/host/core.c c.directive.dtype = NVME_DIR_STREAMS; c 542 drivers/nvme/host/core.c return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); c 1037 drivers/nvme/host/core.c struct nvme_command c = { }; c 1041 drivers/nvme/host/core.c c.identify.opcode = nvme_admin_identify; c 1042 drivers/nvme/host/core.c c.identify.cns = NVME_ID_CNS_CTRL; c 1048 drivers/nvme/host/core.c error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, c 1058 drivers/nvme/host/core.c struct nvme_command c = { }; c 1064 drivers/nvme/host/core.c c.identify.opcode = nvme_admin_identify; c 1065 drivers/nvme/host/core.c c.identify.nsid = cpu_to_le32(nsid); c 1066 drivers/nvme/host/core.c c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; c 1072 drivers/nvme/host/core.c status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, c 1138 drivers/nvme/host/core.c struct nvme_command c = { }; c 1140 drivers/nvme/host/core.c c.identify.opcode = nvme_admin_identify; c 1141 drivers/nvme/host/core.c c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; c 1142 drivers/nvme/host/core.c c.identify.nsid = cpu_to_le32(nsid); c 1143 drivers/nvme/host/core.c return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, c 1150 drivers/nvme/host/core.c struct nvme_command c = { }; c 1154 drivers/nvme/host/core.c c.identify.opcode = nvme_admin_identify; c 1155 drivers/nvme/host/core.c c.identify.nsid = cpu_to_le32(nsid); c 1156 drivers/nvme/host/core.c c.identify.cns = NVME_ID_CNS_NS; c 1162 drivers/nvme/host/core.c error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); c 1175 drivers/nvme/host/core.c struct nvme_command c; c 1178 drivers/nvme/host/core.c memset(&c, 0, sizeof(c)); c 1179 drivers/nvme/host/core.c c.features.opcode = op; c 1180 drivers/nvme/host/core.c c.features.fid = cpu_to_le32(fid); c 1181 drivers/nvme/host/core.c c.features.dword11 = cpu_to_le32(dword11); c 1183 drivers/nvme/host/core.c ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, c 1272 drivers/nvme/host/core.c struct nvme_command c; c 1302 drivers/nvme/host/core.c memset(&c, 0, sizeof(c)); c 1303 drivers/nvme/host/core.c c.rw.opcode = io.opcode; c 1304 drivers/nvme/host/core.c c.rw.flags = io.flags; c 1305 drivers/nvme/host/core.c c.rw.nsid = cpu_to_le32(ns->head->ns_id); c 1306 drivers/nvme/host/core.c c.rw.slba = cpu_to_le64(io.slba); c 1307 drivers/nvme/host/core.c c.rw.length = cpu_to_le16(io.nblocks); c 1308 drivers/nvme/host/core.c c.rw.control = cpu_to_le16(io.control); c 1309 drivers/nvme/host/core.c c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); c 1310 drivers/nvme/host/core.c c.rw.reftag = cpu_to_le32(io.reftag); c 1311 drivers/nvme/host/core.c c.rw.apptag = cpu_to_le16(io.apptag); c 1312 drivers/nvme/host/core.c c.rw.appmask = cpu_to_le16(io.appmask); c 1314 drivers/nvme/host/core.c return nvme_submit_user_cmd(ns->queue, &c, c 1404 drivers/nvme/host/core.c struct nvme_command c; c 1417 drivers/nvme/host/core.c memset(&c, 0, sizeof(c)); c 1418 drivers/nvme/host/core.c c.common.opcode = cmd.opcode; c 1419 drivers/nvme/host/core.c c.common.flags = cmd.flags; c 1420 drivers/nvme/host/core.c c.common.nsid = cpu_to_le32(cmd.nsid); c 1421 drivers/nvme/host/core.c c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); c 1422 drivers/nvme/host/core.c c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); c 1423 drivers/nvme/host/core.c c.common.cdw10 = cpu_to_le32(cmd.cdw10); c 1424 drivers/nvme/host/core.c c.common.cdw11 = cpu_to_le32(cmd.cdw11); c 1425 drivers/nvme/host/core.c c.common.cdw12 = cpu_to_le32(cmd.cdw12); c 1426 drivers/nvme/host/core.c c.common.cdw13 = cpu_to_le32(cmd.cdw13); c 1427 drivers/nvme/host/core.c c.common.cdw14 = cpu_to_le32(cmd.cdw14); c 1428 drivers/nvme/host/core.c c.common.cdw15 = cpu_to_le32(cmd.cdw15); c 1434 drivers/nvme/host/core.c status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, c 1452 drivers/nvme/host/core.c struct nvme_command c; c 1464 drivers/nvme/host/core.c memset(&c, 0, sizeof(c)); c 1465 drivers/nvme/host/core.c c.common.opcode = cmd.opcode; c 1466 drivers/nvme/host/core.c c.common.flags = cmd.flags; c 1467 drivers/nvme/host/core.c c.common.nsid = cpu_to_le32(cmd.nsid); c 1468 drivers/nvme/host/core.c c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); c 1469 drivers/nvme/host/core.c c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); c 1470 drivers/nvme/host/core.c c.common.cdw10 = cpu_to_le32(cmd.cdw10); c 1471 drivers/nvme/host/core.c c.common.cdw11 = cpu_to_le32(cmd.cdw11); c 1472 drivers/nvme/host/core.c c.common.cdw12 = cpu_to_le32(cmd.cdw12); c 1473 drivers/nvme/host/core.c c.common.cdw13 = cpu_to_le32(cmd.cdw13); c 1474 drivers/nvme/host/core.c c.common.cdw14 = cpu_to_le32(cmd.cdw14); c 1475 drivers/nvme/host/core.c c.common.cdw15 = cpu_to_le32(cmd.cdw15); c 1481 drivers/nvme/host/core.c status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, c 1937 drivers/nvme/host/core.c struct nvme_command c; c 1948 drivers/nvme/host/core.c memset(&c, 0, sizeof(c)); c 1949 drivers/nvme/host/core.c c.common.opcode = op; c 1950 drivers/nvme/host/core.c c.common.nsid = cpu_to_le32(ns->head->ns_id); c 1951 drivers/nvme/host/core.c c.common.cdw10 = cpu_to_le32(cdw10); c 1953 drivers/nvme/host/core.c ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); c 2702 drivers/nvme/host/core.c struct nvme_command c = { }; c 2705 drivers/nvme/host/core.c c.get_log_page.opcode = nvme_admin_get_log_page; c 2706 drivers/nvme/host/core.c c.get_log_page.nsid = cpu_to_le32(nsid); c 2707 drivers/nvme/host/core.c c.get_log_page.lid = log_page; c 2708 drivers/nvme/host/core.c c.get_log_page.lsp = lsp; c 2709 drivers/nvme/host/core.c c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); c 2710 drivers/nvme/host/core.c c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); c 2711 drivers/nvme/host/core.c c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); c 2712 drivers/nvme/host/core.c c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); c 2714 drivers/nvme/host/core.c return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); c 435 drivers/nvme/host/lightnvm.c struct nvme_nvm_command c = {}; c 438 drivers/nvme/host/lightnvm.c c.identity.opcode = nvme_nvm_admin_identity; c 439 drivers/nvme/host/lightnvm.c c.identity.nsid = cpu_to_le32(ns->head->ns_id); c 445 drivers/nvme/host/lightnvm.c ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c, c 482 drivers/nvme/host/lightnvm.c struct nvme_nvm_command c = {}; c 488 drivers/nvme/host/lightnvm.c c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; c 489 drivers/nvme/host/lightnvm.c c.get_bb.nsid = cpu_to_le32(ns->head->ns_id); c 490 drivers/nvme/host/lightnvm.c c.get_bb.spba = cpu_to_le64(ppa.ppa); c 496 drivers/nvme/host/lightnvm.c ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c, c 535 drivers/nvme/host/lightnvm.c struct nvme_nvm_command c = {}; c 538 drivers/nvme/host/lightnvm.c c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; c 539 drivers/nvme/host/lightnvm.c c.set_bb.nsid = cpu_to_le32(ns->head->ns_id); c 540 drivers/nvme/host/lightnvm.c c.set_bb.spba = cpu_to_le64(ppas->ppa); c 541 drivers/nvme/host/lightnvm.c c.set_bb.nlb = cpu_to_le16(nr_ppas - 1); c 542 drivers/nvme/host/lightnvm.c c.set_bb.value = type; c 544 drivers/nvme/host/lightnvm.c ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c, c 625 drivers/nvme/host/lightnvm.c struct nvme_nvm_command *c) c 627 drivers/nvme/host/lightnvm.c c->ph_rw.opcode = rqd->opcode; c 628 drivers/nvme/host/lightnvm.c c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id); c 629 drivers/nvme/host/lightnvm.c c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa); c 630 drivers/nvme/host/lightnvm.c c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list); c 631 drivers/nvme/host/lightnvm.c c->ph_rw.control = cpu_to_le16(rqd->flags); c 632 drivers/nvme/host/lightnvm.c c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1); c 857 drivers/nvme/host/lightnvm.c struct nvme_nvm_command c; c 866 drivers/nvme/host/lightnvm.c memset(&c, 0, sizeof(c)); c 867 drivers/nvme/host/lightnvm.c c.ph_rw.opcode = vio.opcode; c 868 drivers/nvme/host/lightnvm.c c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id); c 869 drivers/nvme/host/lightnvm.c c.ph_rw.control = cpu_to_le16(vio.control); c 870 drivers/nvme/host/lightnvm.c c.ph_rw.length = cpu_to_le16(vio.nppas); c 874 drivers/nvme/host/lightnvm.c ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c, c 891 drivers/nvme/host/lightnvm.c struct nvme_nvm_command c; c 903 drivers/nvme/host/lightnvm.c memset(&c, 0, sizeof(c)); c 904 drivers/nvme/host/lightnvm.c c.common.opcode = vcmd.opcode; c 905 drivers/nvme/host/lightnvm.c c.common.nsid = cpu_to_le32(ns->head->ns_id); c 906 drivers/nvme/host/lightnvm.c c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2); c 907 drivers/nvme/host/lightnvm.c c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3); c 909 drivers/nvme/host/lightnvm.c c.ph_rw.length = cpu_to_le16(vcmd.nppas); c 910 drivers/nvme/host/lightnvm.c c.ph_rw.control = cpu_to_le16(vcmd.control); c 911 drivers/nvme/host/lightnvm.c c.common.cdw13 = cpu_to_le32(vcmd.cdw13); c 912 drivers/nvme/host/lightnvm.c c.common.cdw14 = cpu_to_le32(vcmd.cdw14); c 913 drivers/nvme/host/lightnvm.c c.common.cdw15 = cpu_to_le32(vcmd.cdw15); c 921 drivers/nvme/host/lightnvm.c (struct nvme_nvm_command *)&c, c 284 drivers/nvme/host/pci.c struct nvme_command c; c 289 drivers/nvme/host/pci.c memset(&c, 0, sizeof(c)); c 290 drivers/nvme/host/pci.c c.dbbuf.opcode = nvme_admin_dbbuf; c 291 drivers/nvme/host/pci.c c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); c 292 drivers/nvme/host/pci.c c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); c 294 drivers/nvme/host/pci.c if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { c 1097 drivers/nvme/host/pci.c struct nvme_command c; c 1099 drivers/nvme/host/pci.c memset(&c, 0, sizeof(c)); c 1100 drivers/nvme/host/pci.c c.common.opcode = nvme_admin_async_event; c 1101 drivers/nvme/host/pci.c c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; c 1102 drivers/nvme/host/pci.c nvme_submit_cmd(nvmeq, &c, true); c 1107 drivers/nvme/host/pci.c struct nvme_command c; c 1109 drivers/nvme/host/pci.c memset(&c, 0, sizeof(c)); c 1110 drivers/nvme/host/pci.c c.delete_queue.opcode = opcode; c 1111 drivers/nvme/host/pci.c c.delete_queue.qid = cpu_to_le16(id); c 1113 drivers/nvme/host/pci.c return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); c 1119 drivers/nvme/host/pci.c struct nvme_command c; c 1129 drivers/nvme/host/pci.c memset(&c, 0, sizeof(c)); c 1130 drivers/nvme/host/pci.c c.create_cq.opcode = nvme_admin_create_cq; c 1131 drivers/nvme/host/pci.c c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); c 1132 drivers/nvme/host/pci.c c.create_cq.cqid = cpu_to_le16(qid); c 1133 drivers/nvme/host/pci.c c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); c 1134 drivers/nvme/host/pci.c c.create_cq.cq_flags = cpu_to_le16(flags); c 1135 drivers/nvme/host/pci.c c.create_cq.irq_vector = cpu_to_le16(vector); c 1137 drivers/nvme/host/pci.c return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); c 1144 drivers/nvme/host/pci.c struct nvme_command c; c 1159 drivers/nvme/host/pci.c memset(&c, 0, sizeof(c)); c 1160 drivers/nvme/host/pci.c c.create_sq.opcode = nvme_admin_create_sq; c 1161 drivers/nvme/host/pci.c c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); c 1162 drivers/nvme/host/pci.c c.create_sq.sqid = cpu_to_le16(qid); c 1163 drivers/nvme/host/pci.c c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); c 1164 drivers/nvme/host/pci.c c.create_sq.sq_flags = cpu_to_le16(flags); c 1165 drivers/nvme/host/pci.c c.create_sq.cqid = cpu_to_le16(qid); c 1167 drivers/nvme/host/pci.c return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); c 1851 drivers/nvme/host/pci.c struct nvme_command c; c 1854 drivers/nvme/host/pci.c memset(&c, 0, sizeof(c)); c 1855 drivers/nvme/host/pci.c c.features.opcode = nvme_admin_set_features; c 1856 drivers/nvme/host/pci.c c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); c 1857 drivers/nvme/host/pci.c c.features.dword11 = cpu_to_le32(bits); c 1858 drivers/nvme/host/pci.c c.features.dword12 = cpu_to_le32(dev->host_mem_size >> c 1860 drivers/nvme/host/pci.c c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); c 1861 drivers/nvme/host/pci.c c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); c 1862 drivers/nvme/host/pci.c c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); c 1864 drivers/nvme/host/pci.c ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); c 1170 drivers/nvme/host/rdma.c static int nvme_rdma_set_sg_null(struct nvme_command *c) c 1172 drivers/nvme/host/rdma.c struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; c 1182 drivers/nvme/host/rdma.c struct nvme_rdma_request *req, struct nvme_command *c, c 1185 drivers/nvme/host/rdma.c struct nvme_sgl_desc *sg = &c->common.dptr.sgl; c 1207 drivers/nvme/host/rdma.c struct nvme_rdma_request *req, struct nvme_command *c) c 1209 drivers/nvme/host/rdma.c struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; c 1219 drivers/nvme/host/rdma.c struct nvme_rdma_request *req, struct nvme_command *c, c 1222 drivers/nvme/host/rdma.c struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; c 1265 drivers/nvme/host/rdma.c struct request *rq, struct nvme_command *c) c 1275 drivers/nvme/host/rdma.c c->common.flags |= NVME_CMD_SGL_METABUF; c 1278 drivers/nvme/host/rdma.c return nvme_rdma_set_sg_null(c); c 1301 drivers/nvme/host/rdma.c ret = nvme_rdma_map_sg_inline(queue, req, c, count); c 1306 drivers/nvme/host/rdma.c ret = nvme_rdma_map_sg_single(queue, req, c); c 1311 drivers/nvme/host/rdma.c ret = nvme_rdma_map_sg_fr(queue, req, c, count); c 1740 drivers/nvme/host/rdma.c struct nvme_command *c = sqe->data; c 1763 drivers/nvme/host/rdma.c ret = nvme_setup_cmd(ns, rq, c); c 1769 drivers/nvme/host/rdma.c err = nvme_rdma_map_data(queue, rq, c); c 1986 drivers/nvme/host/tcp.c static void nvme_tcp_set_sg_null(struct nvme_command *c) c 1988 drivers/nvme/host/tcp.c struct nvme_sgl_desc *sg = &c->common.dptr.sgl; c 1997 drivers/nvme/host/tcp.c struct nvme_command *c, u32 data_len) c 1999 drivers/nvme/host/tcp.c struct nvme_sgl_desc *sg = &c->common.dptr.sgl; c 2006 drivers/nvme/host/tcp.c static void nvme_tcp_set_sg_host_data(struct nvme_command *c, c 2009 drivers/nvme/host/tcp.c struct nvme_sgl_desc *sg = &c->common.dptr.sgl; c 2087 drivers/nvme/host/tcp.c struct nvme_command *c = &pdu->cmd; c 2089 drivers/nvme/host/tcp.c c->common.flags |= NVME_CMD_SGL_METABUF; c 2092 drivers/nvme/host/tcp.c nvme_tcp_set_sg_null(c); c 2095 drivers/nvme/host/tcp.c nvme_tcp_set_sg_inline(queue, c, req->data_len); c 2097 drivers/nvme/host/tcp.c nvme_tcp_set_sg_host_data(c, req->data_len); c 104 drivers/nvme/target/fabrics-cmd.c struct nvmf_connect_command *c = &req->cmd->connect; c 105 drivers/nvme/target/fabrics-cmd.c u16 qid = le16_to_cpu(c->qid); c 106 drivers/nvme/target/fabrics-cmd.c u16 sqsize = le16_to_cpu(c->sqsize); c 127 drivers/nvme/target/fabrics-cmd.c if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) { c 150 drivers/nvme/target/fabrics-cmd.c struct nvmf_connect_command *c = &req->cmd->connect; c 168 drivers/nvme/target/fabrics-cmd.c if (c->recfmt != 0) { c 170 drivers/nvme/target/fabrics-cmd.c le16_to_cpu(c->recfmt)); c 185 drivers/nvme/target/fabrics-cmd.c le32_to_cpu(c->kato), &ctrl); c 213 drivers/nvme/target/fabrics-cmd.c struct nvmf_connect_command *c = &req->cmd->connect; c 216 drivers/nvme/target/fabrics-cmd.c u16 qid = le16_to_cpu(c->qid); c 232 drivers/nvme/target/fabrics-cmd.c if (c->recfmt != 0) { c 234 drivers/nvme/target/fabrics-cmd.c le16_to_cpu(c->recfmt)); c 215 drivers/nvme/target/rdma.c struct nvmet_rdma_cmd *c) c 224 drivers/nvme/target/rdma.c sg = c->inline_sg; c 225 drivers/nvme/target/rdma.c sge = &c->sge[1]; c 237 drivers/nvme/target/rdma.c struct nvmet_rdma_cmd *c) c 248 drivers/nvme/target/rdma.c sg = c->inline_sg; c 250 drivers/nvme/target/rdma.c sge = &c->sge[1]; c 280 drivers/nvme/target/rdma.c struct nvmet_rdma_cmd *c, bool admin) c 283 drivers/nvme/target/rdma.c c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); c 284 drivers/nvme/target/rdma.c if (!c->nvme_cmd) c 287 drivers/nvme/target/rdma.c c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, c 288 drivers/nvme/target/rdma.c sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); c 289 drivers/nvme/target/rdma.c if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) c 292 drivers/nvme/target/rdma.c c->sge[0].length = sizeof(*c->nvme_cmd); c 293 drivers/nvme/target/rdma.c c->sge[0].lkey = ndev->pd->local_dma_lkey; c 295 drivers/nvme/target/rdma.c if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) c 298 drivers/nvme/target/rdma.c c->cqe.done = nvmet_rdma_recv_done; c 300 drivers/nvme/target/rdma.c c->wr.wr_cqe = &c->cqe; c 301 drivers/nvme/target/rdma.c c->wr.sg_list = c->sge; c 302 drivers/nvme/target/rdma.c c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; c 307 drivers/nvme/target/rdma.c ib_dma_unmap_single(ndev->device, c->sge[0].addr, c 308 drivers/nvme/target/rdma.c sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); c 310 drivers/nvme/target/rdma.c kfree(c->nvme_cmd); c 317 drivers/nvme/target/rdma.c struct nvmet_rdma_cmd *c, bool admin) c 320 drivers/nvme/target/rdma.c nvmet_rdma_free_inline_pages(ndev, c); c 321 drivers/nvme/target/rdma.c ib_dma_unmap_single(ndev->device, c->sge[0].addr, c 322 drivers/nvme/target/rdma.c sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); c 323 drivers/nvme/target/rdma.c kfree(c->nvme_cmd); c 147 drivers/nvme/target/tcp.c static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); c 1210 drivers/nvme/target/tcp.c struct nvmet_tcp_cmd *c) c 1214 drivers/nvme/target/tcp.c c->queue = queue; c 1215 drivers/nvme/target/tcp.c c->req.port = queue->port->nport; c 1217 drivers/nvme/target/tcp.c c->cmd_pdu = page_frag_alloc(&queue->pf_cache, c 1218 drivers/nvme/target/tcp.c sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); c 1219 drivers/nvme/target/tcp.c if (!c->cmd_pdu) c 1221 drivers/nvme/target/tcp.c c->req.cmd = &c->cmd_pdu->cmd; c 1223 drivers/nvme/target/tcp.c c->rsp_pdu = page_frag_alloc(&queue->pf_cache, c 1224 drivers/nvme/target/tcp.c sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); c 1225 drivers/nvme/target/tcp.c if (!c->rsp_pdu) c 1227 drivers/nvme/target/tcp.c c->req.cqe = &c->rsp_pdu->cqe; c 1229 drivers/nvme/target/tcp.c c->data_pdu = page_frag_alloc(&queue->pf_cache, c 1230 drivers/nvme/target/tcp.c sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); c 1231 drivers/nvme/target/tcp.c if (!c->data_pdu) c 1234 drivers/nvme/target/tcp.c c->r2t_pdu = page_frag_alloc(&queue->pf_cache, c 1235 drivers/nvme/target/tcp.c sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); c 1236 drivers/nvme/target/tcp.c if (!c->r2t_pdu) c 1239 drivers/nvme/target/tcp.c c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; c 1241 drivers/nvme/target/tcp.c list_add_tail(&c->entry, &queue->free_list); c 1245 drivers/nvme/target/tcp.c page_frag_free(c->data_pdu); c 1247 drivers/nvme/target/tcp.c page_frag_free(c->rsp_pdu); c 1249 drivers/nvme/target/tcp.c page_frag_free(c->cmd_pdu); c 1253 drivers/nvme/target/tcp.c static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) c 1255 drivers/nvme/target/tcp.c page_frag_free(c->r2t_pdu); c 1256 drivers/nvme/target/tcp.c page_frag_free(c->data_pdu); c 1257 drivers/nvme/target/tcp.c page_frag_free(c->rsp_pdu); c 1258 drivers/nvme/target/tcp.c page_frag_free(c->cmd_pdu); c 868 drivers/nvmem/core.c struct nvmem_cell **c = res; c 870 drivers/nvmem/core.c if (WARN_ON(!c || !*c)) c 873 drivers/nvmem/core.c return *c == data; c 74 drivers/nvmem/imx-ocotp.c u32 c, mask; c 79 drivers/nvmem/imx-ocotp.c c = readl(base + IMX_OCOTP_ADDR_CTRL); c 80 drivers/nvmem/imx-ocotp.c if (!(c & mask)) c 100 drivers/nvmem/imx-ocotp.c if (c & IMX_OCOTP_BM_CTRL_ERROR) c 110 drivers/nvmem/imx-ocotp.c u32 c; c 112 drivers/nvmem/imx-ocotp.c c = readl(base + IMX_OCOTP_ADDR_CTRL); c 113 drivers/nvmem/imx-ocotp.c if (!(c & IMX_OCOTP_BM_CTRL_ERROR)) c 1431 drivers/of/base.c int c; c 1433 drivers/of/base.c c = of_phandle_iterator_args(&it, c 1437 drivers/of/base.c out_args->args_count = c; c 203 drivers/of/device.c char *c; c 227 drivers/of/device.c for (c = str; c; ) { c 228 drivers/of/device.c c = strchr(c, ' '); c 229 drivers/of/device.c if (c) c 230 drivers/of/device.c *c++ = '_'; c 80 drivers/parisc/eisa_enumerator.c u_int8_t c; c 87 drivers/parisc/eisa_enumerator.c c = get_8(buf+len); c 106 drivers/parisc/eisa_enumerator.c if (!(c & HPEE_MEMORY_MORE)) { c 118 drivers/parisc/eisa_enumerator.c u_int8_t c; c 124 drivers/parisc/eisa_enumerator.c c = get_8(buf+len); c 126 drivers/parisc/eisa_enumerator.c pr_cont("IRQ %d ", c & HPEE_IRQ_CHANNEL_MASK); c 127 drivers/parisc/eisa_enumerator.c if (c & HPEE_IRQ_TRIG_LEVEL) { c 128 drivers/parisc/eisa_enumerator.c eisa_make_irq_level(c & HPEE_IRQ_CHANNEL_MASK); c 130 drivers/parisc/eisa_enumerator.c eisa_make_irq_edge(c & HPEE_IRQ_CHANNEL_MASK); c 137 drivers/parisc/eisa_enumerator.c if (!(c & HPEE_IRQ_MORE)) { c 149 drivers/parisc/eisa_enumerator.c u_int8_t c; c 155 drivers/parisc/eisa_enumerator.c c = get_8(buf+len); c 156 drivers/parisc/eisa_enumerator.c pr_cont("DMA %d ", c&HPEE_DMA_CHANNEL_MASK); c 159 drivers/parisc/eisa_enumerator.c if (!(c & HPEE_DMA_MORE)) { c 171 drivers/parisc/eisa_enumerator.c u_int8_t c; c 179 drivers/parisc/eisa_enumerator.c c = get_8(buf+len); c 184 drivers/parisc/eisa_enumerator.c res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1; c 195 drivers/parisc/eisa_enumerator.c if (!(c & HPEE_PORT_MORE)) { c 212 drivers/parisc/eisa_enumerator.c u_int8_t c; c 216 drivers/parisc/eisa_enumerator.c c = get_8(buf+len); c 218 drivers/parisc/eisa_enumerator.c switch (c & HPEE_PORT_INIT_WIDTH_MASK) { c 221 drivers/parisc/eisa_enumerator.c if (c & HPEE_PORT_INIT_MASK) { c 234 drivers/parisc/eisa_enumerator.c if (c & HPEE_PORT_INIT_MASK) { c 246 drivers/parisc/eisa_enumerator.c if (c & HPEE_PORT_INIT_MASK) { c 257 drivers/parisc/eisa_enumerator.c printk(KERN_ERR "Invalid port init word %02x\n", c); c 261 drivers/parisc/eisa_enumerator.c if (c & HPEE_PORT_INIT_MASK) { c 266 drivers/parisc/eisa_enumerator.c if (!(c & HPEE_PORT_INIT_MORE)) { c 50 drivers/parisc/gsc.c int c = irq; c 56 drivers/parisc/gsc.c printk("cannot claim irq %d\n", c); c 748 drivers/parisc/pdc_stable.c char c; c 773 drivers/parisc/pdc_stable.c c = *temp++ - '0'; c 774 drivers/parisc/pdc_stable.c if ((c != 0) && (c != 1)) c 776 drivers/parisc/pdc_stable.c if (c == 0) c 802 drivers/parport/parport_ip32.c unsigned int c) c 805 drivers/parport/parport_ip32.c writeb(c, priv->regs.ecr); c 821 drivers/parport/parport_ip32.c unsigned int c; c 822 drivers/parport/parport_ip32.c c = (parport_ip32_read_econtrol(p) & ~mask) ^ val; c 823 drivers/parport/parport_ip32.c parport_ip32_write_econtrol(p, c); c 899 drivers/parport/parport_ip32.c unsigned int c) c 902 drivers/parport/parport_ip32.c CHECK_EXTRA_BITS(p, c, priv->dcr_writable); c 903 drivers/parport/parport_ip32.c c &= priv->dcr_writable; /* only writable bits */ c 904 drivers/parport/parport_ip32.c writeb(c, priv->regs.dcr); c 905 drivers/parport/parport_ip32.c priv->dcr_cache = c; /* update soft copy */ c 922 drivers/parport/parport_ip32.c unsigned int c; c 923 drivers/parport/parport_ip32.c c = (__parport_ip32_read_control(p) & ~mask) ^ val; c 924 drivers/parport/parport_ip32.c __parport_ip32_write_control(p, c); c 950 drivers/parport/parport_ip32.c unsigned char c) c 954 drivers/parport/parport_ip32.c CHECK_EXTRA_BITS(p, c, wm); c 955 drivers/parport/parport_ip32.c __parport_ip32_frob_control(p, wm, c & wm); c 258 drivers/parport/parport_pc.c register unsigned char c = s->u.pc.ctr & priv->ctr_writable; c 259 drivers/parport/parport_pc.c outb(c, CONTROL(p)); c 260 drivers/parport/parport_pc.c priv->ctr = c; c 60 drivers/parport/share.c unsigned char c) { return 0; } c 54 drivers/pci/controller/pcie-cadence.h #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ c 55 drivers/pci/controller/pcie-cadence.h (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) c 66 drivers/pci/controller/pcie-cadence.h #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ c 67 drivers/pci/controller/pcie-cadence.h (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) c 72 drivers/pci/controller/pcie-cadence.h #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ c 73 drivers/pci/controller/pcie-cadence.h (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) c 274 drivers/pci/controller/pcie-rockchip.h #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ c 275 drivers/pci/controller/pcie-rockchip.h (((c) << ((b) * 8 + 5)) & \ c 639 drivers/pci/hotplug/ibmphp.h #define CTLR_WORKING(c) ((u8) ((c & HPC_CTLR_WORKING) \ c 641 drivers/pci/hotplug/ibmphp.h #define CTLR_FINISHED(c) ((u8) ((c & HPC_CTLR_FINISHED) \ c 643 drivers/pci/hotplug/ibmphp.h #define CTLR_RESULT(c) ((u8) ((c & HPC_CTLR_RESULT1) \ c 644 drivers/pci/hotplug/ibmphp.h ? ((c & HPC_CTLR_RESULT0) ? HPC_CTLR_RESULT_NORESP \ c 646 drivers/pci/hotplug/ibmphp.h : ((c & HPC_CTLR_RESULT0) ? HPC_CTLR_RESULT_FAILED \ c 650 drivers/pci/hotplug/ibmphp.h #define NEEDTOCHECK_CMDSTATUS(c) ((c == HPC_SLOT_OFF) || \ c 651 drivers/pci/hotplug/ibmphp.h (c == HPC_SLOT_ON) || \ c 652 drivers/pci/hotplug/ibmphp.h (c == HPC_CTLR_RESET) || \ c 653 drivers/pci/hotplug/ibmphp.h (c == HPC_BUS_33CONVMODE) || \ c 654 drivers/pci/hotplug/ibmphp.h (c == HPC_BUS_66CONVMODE) || \ c 655 drivers/pci/hotplug/ibmphp.h (c == HPC_BUS_66PCIXMODE) || \ c 656 drivers/pci/hotplug/ibmphp.h (c == HPC_BUS_100PCIXMODE) || \ c 657 drivers/pci/hotplug/ibmphp.h (c == HPC_BUS_133PCIXMODE) || \ c 658 drivers/pci/hotplug/ibmphp.h (c == HPC_ALLSLOT_OFF) || \ c 659 drivers/pci/hotplug/ibmphp.h (c == HPC_ALLSLOT_ON)) c 741 drivers/pcmcia/cs.c int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c) c 748 drivers/pcmcia/cs.c if (c) { c 755 drivers/pcmcia/cs.c s->callback = c; c 118 drivers/pcmcia/cs_internal.h int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); c 232 drivers/pcmcia/ds.c struct config_t *c = container_of(ref, struct config_t, ref); c 234 drivers/pcmcia/ds.c kfree(c); c 551 drivers/pcmcia/ds.c config_t *c; c 553 drivers/pcmcia/ds.c c = kzalloc(sizeof(struct config_t), GFP_KERNEL); c 554 drivers/pcmcia/ds.c if (!c) { c 558 drivers/pcmcia/ds.c p_dev->function_config = c; c 559 drivers/pcmcia/ds.c kref_init(&c->ref); c 561 drivers/pcmcia/ds.c c->io[i].name = p_dev->devname; c 562 drivers/pcmcia/ds.c c->io[i].flags = IORESOURCE_IO; c 565 drivers/pcmcia/ds.c c->mem[i].name = p_dev->devname; c 566 drivers/pcmcia/ds.c c->mem[i].flags = IORESOURCE_MEM; c 162 drivers/pcmcia/pcmcia_resource.c config_t *c; c 169 drivers/pcmcia/pcmcia_resource.c c = p_dev->function_config; c 171 drivers/pcmcia/pcmcia_resource.c if (!(c->state & CONFIG_LOCKED)) { c 350 drivers/pcmcia/pcmcia_resource.c config_t *c; c 354 drivers/pcmcia/pcmcia_resource.c c = p_dev->function_config; c 364 drivers/pcmcia/pcmcia_resource.c if (c->state & CONFIG_LOCKED) { c 365 drivers/pcmcia/pcmcia_resource.c c->state &= ~CONFIG_LOCKED; c 366 drivers/pcmcia/pcmcia_resource.c if (c->state & CONFIG_IO_REQ) c 397 drivers/pcmcia/pcmcia_resource.c config_t *c; c 403 drivers/pcmcia/pcmcia_resource.c c = p_dev->function_config; c 405 drivers/pcmcia/pcmcia_resource.c release_io_space(s, &c->io[0]); c 407 drivers/pcmcia/pcmcia_resource.c if (c->io[1].end) c 408 drivers/pcmcia/pcmcia_resource.c release_io_space(s, &c->io[1]); c 411 drivers/pcmcia/pcmcia_resource.c c->state &= ~CONFIG_IO_REQ; c 485 drivers/pcmcia/pcmcia_resource.c config_t *c; c 496 drivers/pcmcia/pcmcia_resource.c c = p_dev->function_config; c 497 drivers/pcmcia/pcmcia_resource.c if (c->state & CONFIG_LOCKED) { c 576 drivers/pcmcia/pcmcia_resource.c u8 b = c->io[0].start & 0xff; c 578 drivers/pcmcia/pcmcia_resource.c b = (c->io[0].start >> 8) & 0xff; c 582 drivers/pcmcia/pcmcia_resource.c u8 b = resource_size(&c->io[0]) + resource_size(&c->io[1]) - 1; c 587 drivers/pcmcia/pcmcia_resource.c if (c->state & CONFIG_IO_REQ) { c 608 drivers/pcmcia/pcmcia_resource.c c->state |= CONFIG_LOCKED; c 629 drivers/pcmcia/pcmcia_resource.c config_t *c = p_dev->function_config; c 634 drivers/pcmcia/pcmcia_resource.c &c->io[0], &c->io[1]); c 641 drivers/pcmcia/pcmcia_resource.c if (c->state & CONFIG_LOCKED) { c 645 drivers/pcmcia/pcmcia_resource.c if (c->state & CONFIG_IO_REQ) { c 650 drivers/pcmcia/pcmcia_resource.c ret = alloc_io_space(s, &c->io[0], p_dev->io_lines); c 654 drivers/pcmcia/pcmcia_resource.c if (c->io[1].end) { c 655 drivers/pcmcia/pcmcia_resource.c ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); c 657 drivers/pcmcia/pcmcia_resource.c struct resource tmp = c->io[0]; c 659 drivers/pcmcia/pcmcia_resource.c release_io_space(s, &c->io[0]); c 661 drivers/pcmcia/pcmcia_resource.c c->io[0].end = resource_size(&tmp); c 662 drivers/pcmcia/pcmcia_resource.c c->io[0].start = tmp.start; c 663 drivers/pcmcia/pcmcia_resource.c c->io[0].flags = tmp.flags; c 667 drivers/pcmcia/pcmcia_resource.c c->io[1].start = 0; c 669 drivers/pcmcia/pcmcia_resource.c c->state |= CONFIG_IO_REQ; c 673 drivers/pcmcia/pcmcia_resource.c &c->io[0], &c->io[1]); c 115 drivers/perf/arm-cci.c #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) c 131 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c u32 c; c 146 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c c = a - (b * whole); c 148 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b = c; c 354 drivers/phy/mscc/phy-ocelot-serdes.c #define SERDES_MUX_SGMII(i, p, m, c) \ c 355 drivers/phy/mscc/phy-ocelot-serdes.c SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_SGMII, m, c) c 356 drivers/phy/mscc/phy-ocelot-serdes.c #define SERDES_MUX_QSGMII(i, p, m, c) \ c 357 drivers/phy/mscc/phy-ocelot-serdes.c SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c) c 375 drivers/pinctrl/bcm/pinctrl-bcm281xx.c #define BCM281XX_PIN_DESC(a, b, c) \ c 376 drivers/pinctrl/bcm/pinctrl-bcm281xx.c { .number = a, .name = b, .drv_data = &c##_pin } c 47 drivers/pinctrl/freescale/pinctrl-mxs.h #define CONFIG_TO_PULL(c) ((c) >> PULL_SHIFT & 0x1) c 48 drivers/pinctrl/freescale/pinctrl-mxs.h #define CONFIG_TO_VOL(c) ((c) >> VOL_SHIFT & 0x1) c 49 drivers/pinctrl/freescale/pinctrl-mxs.h #define CONFIG_TO_MA(c) ((c) >> MA_SHIFT & 0x3) c 121 drivers/pinctrl/intel/pinctrl-intel.c #define pin_to_padno(c, p) ((p) - (c)->pin_base) c 69 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c const struct mtk_pin_field_calc *c, *e; c 82 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c c = rc->range; c 83 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c e = c + rc->nranges; c 85 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c while (c < e) { c 86 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c if (desc->number >= c->s_pin && desc->number <= c->e_pin) c 88 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c c++; c 91 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c if (c >= e) { c 97 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c if (c->i_base > hw->nbase - 1) { c 108 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c bits = c->fixed ? c->s_bit : c->s_bit + c 109 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c (desc->number - c->s_pin) * (c->x_bits); c 114 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c pfd->index = c->i_base; c 115 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c pfd->offset = c->s_addr + c->x_addrs * (bits / c->sz_reg); c 116 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c pfd->bitpos = bits % c->sz_reg; c 117 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c pfd->mask = (1 << c->x_bits) - 1; c 123 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c pfd->next = pfd->bitpos + c->x_bits > c->sz_reg ? c->x_addrs : 0; c 97 drivers/pinctrl/nomadik/pinctrl-abx500.h #define GPIO_IRQ_CLUSTER(a, b, c) \ c 101 drivers/pinctrl/nomadik/pinctrl-abx500.h .to_irq = c, \ c 135 drivers/pinctrl/nomadik/pinctrl-abx500.h #define ABX500_PINRANGE(a, b, c) { .offset = a, .npins = b, .altfunc = c } c 897 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c #define NPCM7XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k) \ c 898 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c [a] { .fn0 = fn_ ## b, .reg0 = NPCM7XX_GCR_ ## c, .bit0 = d, \ c 41 drivers/pinctrl/pinctrl-falcon.c #define pad_w32_mask(c, clear, set, reg) \ c 42 drivers/pinctrl/pinctrl-falcon.c pad_w32(c, (pad_r32(c, reg) & ~(clear)) | (set), reg) c 352 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,0, R, USB1, R, ENET, LCD, R, R, SDMMC, ADC1|1, ND); c 353 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,1, USB1, R, UART1, ENET, GPIO, R, TIMER3, SDMMC, 0, ND); c 354 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,2, USB1, R, UART1, ENET, GPIO, R, R, SDMMC, 0, ND); c 355 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,3, USB1, R, UART1, ENET, GPIO, R, R, SDMMC, ADC1|0, ND); c 356 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,4, R, USB1, R, ENET, GPIO, R, TIMER3, SDMMC, 0, ND); c 357 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,5, R, USB1, R, ENET, GPIO, R, TIMER3, SDMMC, 0, ND); c 358 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,6, R, USB1, R, ENET, GPIO, R, TIMER3, SDMMC, 0, ND); c 359 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,7, R, USB1, R, ENET, GPIO, R, TIMER3, SDMMC, 0, ND); c 360 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,8, R, USB1, R, ENET, GPIO, R, TIMER3, SDMMC, 0, ND); c 361 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,9, R, USB1, R, ENET, GPIO, R, TIMER3, SDMMC, 0, ND); c 362 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,10, R, USB1, UART1, R, GPIO, R, TIMER3, SDMMC, 0, ND); c 363 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,11, R, USB1, UART1, R, GPIO, R, R, SDMMC, 0, ND); c 364 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,12, R, R, UART1, R, GPIO, SGPIO, I2S0_TX_SDA,SDMMC, 0, ND); c 365 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,13, R, R, UART1, R, GPIO, SGPIO, I2S0_TX_WS, SDMMC, 0, ND); c 366 drivers/pinctrl/pinctrl-lpc18xx.c LPC_P(c,14, R, R, UART1, R, GPIO, SGPIO, ENET, SDMMC, 0, ND); c 562 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,0), c 563 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,1), c 564 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,2), c 565 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,3), c 566 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,4), c 567 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,5), c 568 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,6), c 569 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,7), c 570 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,8), c 571 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,9), c 572 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,10), c 573 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,11), c 574 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,12), c 575 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,13), c 576 drivers/pinctrl/pinctrl-lpc18xx.c LPC18XX_PIN_P(c,14), c 152 drivers/pinctrl/uniphier/pinctrl-uniphier.h #define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g) \ c 156 drivers/pinctrl/uniphier/pinctrl-uniphier.h .drv_data = (void *)UNIPHIER_PIN_ATTR_PACKED(c, d, e, f, g), \ c 326 drivers/platform/x86/eeepc-laptop.c static int get_cpufv(struct eeepc_laptop *eeepc, struct eeepc_cpufv *c) c 328 drivers/platform/x86/eeepc-laptop.c c->cur = get_acpi(eeepc, CM_ASL_CPUFV); c 329 drivers/platform/x86/eeepc-laptop.c if (c->cur < 0) c 332 drivers/platform/x86/eeepc-laptop.c c->num = (c->cur >> 8) & 0xff; c 333 drivers/platform/x86/eeepc-laptop.c c->cur &= 0xff; c 334 drivers/platform/x86/eeepc-laptop.c if (c->num == 0 || c->num > 12) c 344 drivers/platform/x86/eeepc-laptop.c struct eeepc_cpufv c; c 348 drivers/platform/x86/eeepc-laptop.c if (get_cpufv(eeepc, &c)) c 350 drivers/platform/x86/eeepc-laptop.c for (i = 0; i < c.num; i++) c 361 drivers/platform/x86/eeepc-laptop.c struct eeepc_cpufv c; c 363 drivers/platform/x86/eeepc-laptop.c if (get_cpufv(eeepc, &c)) c 365 drivers/platform/x86/eeepc-laptop.c return sprintf(buf, "%#x\n", (c.num << 8) | c.cur); c 373 drivers/platform/x86/eeepc-laptop.c struct eeepc_cpufv c; c 378 drivers/platform/x86/eeepc-laptop.c if (get_cpufv(eeepc, &c)) c 383 drivers/platform/x86/eeepc-laptop.c if (value < 0 || value >= c.num) c 4086 drivers/platform/x86/sony-laptop.c unsigned char c; c 4098 drivers/platform/x86/sony-laptop.c (kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c), c 4099 drivers/platform/x86/sony-laptop.c &sonypi_compat.fifo_lock) == sizeof(c))) { c 4100 drivers/platform/x86/sony-laptop.c if (put_user(c, buf++)) c 597 drivers/platform/x86/thinkpad_acpi.c char c = *(fmt++); c 598 drivers/platform/x86/thinkpad_acpi.c switch (c) { c 606 drivers/platform/x86/thinkpad_acpi.c c); c 23 drivers/pnp/pnpacpi/core.c #define TEST_HEX(c) \ c 24 drivers/pnp/pnpacpi/core.c if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ c 26 drivers/pnp/pnpacpi/core.c #define TEST_ALPHA(c) \ c 27 drivers/pnp/pnpacpi/core.c if (!('A' <= (c) && (c) <= 'Z')) \ c 1293 drivers/powercap/intel_rapl_common.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 1310 drivers/powercap/intel_rapl_common.c "package-%d-die-%d", c->phys_proc_id, c->cpu_die_id); c 1313 drivers/powercap/intel_rapl_common.c c->phys_proc_id); c 41 drivers/pwm/pwm-atmel-hlcdc.c static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, c 44 drivers/pwm/pwm-atmel-hlcdc.c struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c); c 42 drivers/pwm/pwm-crc.c static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm) c 44 drivers/pwm/pwm-crc.c struct crystalcove_pwm *crc_pwm = to_crc_pwm(c); c 51 drivers/pwm/pwm-crc.c static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm) c 53 drivers/pwm/pwm-crc.c struct crystalcove_pwm *crc_pwm = to_crc_pwm(c); c 58 drivers/pwm/pwm-crc.c static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm, c 61 drivers/pwm/pwm-crc.c struct crystalcove_pwm *crc_pwm = to_crc_pwm(c); c 74 drivers/pwm/pwm-crc.c crc_pwm_disable(c, pwm); c 81 drivers/pwm/pwm-crc.c crc_pwm_enable(c, pwm); c 28 drivers/pwm/pwm-cros-ec.c static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c) c 30 drivers/pwm/pwm-cros-ec.c return container_of(c, struct cros_ec_pwm_device, chip); c 66 drivers/pwm/pwm-ep93xx.c unsigned long long c; c 82 drivers/pwm/pwm-ep93xx.c c = clk_get_rate(ep93xx_pwm->clk); c 83 drivers/pwm/pwm-ep93xx.c c *= period_ns; c 84 drivers/pwm/pwm-ep93xx.c do_div(c, 1000000000); c 85 drivers/pwm/pwm-ep93xx.c period_cycles = c; c 87 drivers/pwm/pwm-ep93xx.c c = period_cycles; c 88 drivers/pwm/pwm-ep93xx.c c *= duty_ns; c 89 drivers/pwm/pwm-ep93xx.c do_div(c, period_ns); c 90 drivers/pwm/pwm-ep93xx.c duty_cycles = c; c 23 drivers/pwm/pwm-fsl-ftm.c #define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_MASK_SHIFT) c 138 drivers/pwm/pwm-fsl-ftm.c unsigned long long c; c 141 drivers/pwm/pwm-fsl-ftm.c c = clk_get_rate(fpc->clk[index]); c 142 drivers/pwm/pwm-fsl-ftm.c c = c * period_ns; c 143 drivers/pwm/pwm-fsl-ftm.c do_div(c, 1000000000UL); c 145 drivers/pwm/pwm-fsl-ftm.c if (c == 0) c 148 drivers/pwm/pwm-fsl-ftm.c for (ps = 0; ps < 8 ; ++ps, c >>= 1) { c 149 drivers/pwm/pwm-fsl-ftm.c if (c <= 0x10000) { c 152 drivers/pwm/pwm-fsl-ftm.c periodcfg->mod_period = c - 1; c 182 drivers/pwm/pwm-imx-tpm.c struct pwm_state c; c 218 drivers/pwm/pwm-imx-tpm.c pwm_imx_tpm_get_state(chip, pwm, &c); c 221 drivers/pwm/pwm-imx-tpm.c if (c.enabled && c.polarity != state->polarity) c 224 drivers/pwm/pwm-imx-tpm.c if (state->duty_cycle != c.duty_cycle) { c 275 drivers/pwm/pwm-imx-tpm.c if (state->enabled != c.enabled) { c 217 drivers/pwm/pwm-imx27.c unsigned long long c; c 224 drivers/pwm/pwm-imx27.c c = clk_get_rate(imx->clk_per); c 225 drivers/pwm/pwm-imx27.c c *= state->period; c 227 drivers/pwm/pwm-imx27.c do_div(c, 1000000000); c 228 drivers/pwm/pwm-imx27.c period_cycles = c; c 233 drivers/pwm/pwm-imx27.c c = (unsigned long long)period_cycles * state->duty_cycle; c 234 drivers/pwm/pwm-imx27.c do_div(c, state->period); c 235 drivers/pwm/pwm-imx27.c duty_cycles = c; c 33 drivers/pwm/pwm-lpc32xx.c unsigned long long c; c 36 drivers/pwm/pwm-lpc32xx.c c = clk_get_rate(lpc32xx->clk); c 39 drivers/pwm/pwm-lpc32xx.c period_cycles = div64_u64(c * period_ns, c 86 drivers/pwm/pwm-lpss.c unsigned long c = lpwm->info->clk_rate, base_unit_range; c 99 drivers/pwm/pwm-lpss.c base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); c 204 drivers/pwm/pwm-lpss.c unsigned long c; c 220 drivers/pwm/pwm-lpss.c c = lpwm->info->clk_rate; c 221 drivers/pwm/pwm-lpss.c if (!c) c 51 drivers/pwm/pwm-mxs.c unsigned long long c; c 55 drivers/pwm/pwm-mxs.c c = rate / cdiv[div]; c 56 drivers/pwm/pwm-mxs.c c = c * period_ns; c 57 drivers/pwm/pwm-mxs.c do_div(c, 1000000000); c 58 drivers/pwm/pwm-mxs.c if (c < PERIOD_PERIOD_MAX) c 65 drivers/pwm/pwm-mxs.c period_cycles = c; c 66 drivers/pwm/pwm-mxs.c c *= duty_ns; c 67 drivers/pwm/pwm-mxs.c do_div(c, period_ns); c 68 drivers/pwm/pwm-mxs.c duty_cycles = c; c 43 drivers/pwm/pwm-puv3.c unsigned long long c; c 45 drivers/pwm/pwm-puv3.c c = clk_get_rate(puv3->clk); c 46 drivers/pwm/pwm-puv3.c c = c * period_ns; c 47 drivers/pwm/pwm-puv3.c do_div(c, 1000000000); c 48 drivers/pwm/pwm-puv3.c period_cycles = c; c 64 drivers/pwm/pwm-pxa.c unsigned long long c; c 71 drivers/pwm/pwm-pxa.c c = clk_get_rate(pc->clk); c 72 drivers/pwm/pwm-pxa.c c = c * period_ns; c 73 drivers/pwm/pwm-pxa.c do_div(c, 1000000000); c 74 drivers/pwm/pwm-pxa.c period_cycles = c; c 90 drivers/pwm/pwm-renesas-tpu.c #define to_tpu_device(c) container_of(c, struct tpu_device, chip) c 55 drivers/pwm/pwm-rockchip.c static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) c 57 drivers/pwm/pwm-rockchip.c return container_of(c, struct rockchip_pwm_chip, chip); c 56 drivers/pwm/pwm-sifive.c struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *c) c 58 drivers/pwm/pwm-sifive.c return container_of(c, struct pwm_sifive_ddata, chip); c 70 drivers/pwm/pwm-tegra.c unsigned long long c = duty_ns, hz; c 80 drivers/pwm/pwm-tegra.c c *= (1 << PWM_DUTY_WIDTH); c 81 drivers/pwm/pwm-tegra.c c = DIV_ROUND_CLOSEST_ULL(c, period_ns); c 83 drivers/pwm/pwm-tegra.c val = (u32)c << PWM_DUTY_SHIFT; c 55 drivers/pwm/pwm-tiecap.c unsigned long long c; c 61 drivers/pwm/pwm-tiecap.c c = pc->clk_rate; c 62 drivers/pwm/pwm-tiecap.c c = c * period_ns; c 63 drivers/pwm/pwm-tiecap.c do_div(c, NSEC_PER_SEC); c 64 drivers/pwm/pwm-tiecap.c period_cycles = (u32)c; c 70 drivers/pwm/pwm-tiecap.c c = pc->clk_rate; c 71 drivers/pwm/pwm-tiecap.c c = c * duty_ns; c 72 drivers/pwm/pwm-tiecap.c do_div(c, NSEC_PER_SEC); c 73 drivers/pwm/pwm-tiecap.c duty_cycles = (u32)c; c 225 drivers/pwm/pwm-tiehrpwm.c unsigned long long c; c 230 drivers/pwm/pwm-tiehrpwm.c c = pc->clk_rate; c 231 drivers/pwm/pwm-tiehrpwm.c c = c * period_ns; c 232 drivers/pwm/pwm-tiehrpwm.c do_div(c, NSEC_PER_SEC); c 233 drivers/pwm/pwm-tiehrpwm.c period_cycles = (unsigned long)c; c 239 drivers/pwm/pwm-tiehrpwm.c c = pc->clk_rate; c 240 drivers/pwm/pwm-tiehrpwm.c c = c * duty_ns; c 241 drivers/pwm/pwm-tiehrpwm.c do_div(c, NSEC_PER_SEC); c 242 drivers/pwm/pwm-tiehrpwm.c duty_cycles = (unsigned long)c; c 76 drivers/pwm/pwm-vt8500.c unsigned long long c; c 87 drivers/pwm/pwm-vt8500.c c = clk_get_rate(vt8500->clk); c 88 drivers/pwm/pwm-vt8500.c c = c * period_ns; c 89 drivers/pwm/pwm-vt8500.c do_div(c, 1000000000); c 90 drivers/pwm/pwm-vt8500.c period_cycles = c; c 104 drivers/pwm/pwm-vt8500.c c = (unsigned long long)pv * duty_ns; c 105 drivers/pwm/pwm-vt8500.c do_div(c, period_ns); c 106 drivers/pwm/pwm-vt8500.c dc = c; c 102 drivers/pwm/pwm-zx.c unsigned long long c; c 110 drivers/pwm/pwm-zx.c c = rate / div; c 111 drivers/pwm/pwm-zx.c c = c * period_ns; c 112 drivers/pwm/pwm-zx.c do_div(c, NSEC_PER_SEC); c 114 drivers/pwm/pwm-zx.c if (c < ZX_PWM_PERIOD_MAX) c 124 drivers/pwm/pwm-zx.c period_cycles = c; c 125 drivers/pwm/pwm-zx.c c *= duty_ns; c 126 drivers/pwm/pwm-zx.c do_div(c, period_ns); c 127 drivers/pwm/pwm-zx.c duty_cycles = c; c 1528 drivers/regulator/core.c const struct regulation_constraints *c = regulator->rdev->constraints; c 1532 drivers/regulator/core.c if (!c) c 1547 drivers/regulator/core.c c->always_on, c 1548 drivers/regulator/core.c c->boot_on, c 1549 drivers/regulator/core.c c->apply_uV, c 1550 drivers/regulator/core.c c->ramp_disable, c 1551 drivers/regulator/core.c c->soft_start, c 1552 drivers/regulator/core.c c->pull_down, c 1553 drivers/regulator/core.c c->over_current_protection); c 5446 drivers/regulator/core.c struct regulation_constraints *c; c 5465 drivers/regulator/core.c c = rdev->constraints; c 5466 drivers/regulator/core.c if (c) { c 5470 drivers/regulator/core.c c->min_uV / 1000, c->max_uV / 1000); c 5474 drivers/regulator/core.c c->min_uA / 1000, c->max_uA / 1000); c 5675 drivers/regulator/core.c struct regulation_constraints *c = rdev->constraints; c 5678 drivers/regulator/core.c if (c && c->always_on) c 505 drivers/regulator/ti-abb-regulator.c struct regulation_constraints *c = &rinit_data->constraints; c 616 drivers/regulator/ti-abb-regulator.c c->min_uV = min_uV; c 617 drivers/regulator/ti-abb-regulator.c c->max_uV = max_uV; c 695 drivers/regulator/ti-abb-regulator.c struct regulation_constraints *c; c 854 drivers/regulator/ti-abb-regulator.c c = &initdata->constraints; c 856 drivers/regulator/ti-abb-regulator.c c->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE; c 857 drivers/regulator/ti-abb-regulator.c c->always_on = true; c 590 drivers/regulator/twl-regulator.c struct regulation_constraints *c; c 611 drivers/regulator/twl-regulator.c c = &initdata->constraints; c 612 drivers/regulator/twl-regulator.c c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY; c 613 drivers/regulator/twl-regulator.c c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE c 624 drivers/regulator/twl-regulator.c c->always_on = true; c 675 drivers/regulator/twl6030-regulator.c struct regulation_constraints *c; c 696 drivers/regulator/twl6030-regulator.c c = &initdata->constraints; c 697 drivers/regulator/twl6030-regulator.c c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY; c 698 drivers/regulator/twl6030-regulator.c c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE c 193 drivers/remoteproc/remoteproc_debugfs.c struct fw_rsc_carveout *c; c 211 drivers/remoteproc/remoteproc_debugfs.c c = rsc; c 213 drivers/remoteproc/remoteproc_debugfs.c seq_printf(seq, " Device Address 0x%x\n", c->da); c 214 drivers/remoteproc/remoteproc_debugfs.c seq_printf(seq, " Physical Address 0x%x\n", c->pa); c 215 drivers/remoteproc/remoteproc_debugfs.c seq_printf(seq, " Length 0x%x Bytes\n", c->len); c 216 drivers/remoteproc/remoteproc_debugfs.c seq_printf(seq, " Flags 0x%x\n", c->flags); c 217 drivers/remoteproc/remoteproc_debugfs.c seq_printf(seq, " Reserved (should be zero) [%d]\n", c->reserved); c 218 drivers/remoteproc/remoteproc_debugfs.c seq_printf(seq, " Name %s\n\n", c->name); c 45 drivers/rtc/rtc-ds1216.c unsigned char c; c 49 drivers/rtc/rtc-ds1216.c c = 0; c 51 drivers/rtc/rtc-ds1216.c c |= (readb(ioaddr) & 0x1) << j; c 52 drivers/rtc/rtc-ds1216.c buf[i] = c; c 58 drivers/rtc/rtc-ds1216.c unsigned char c; c 62 drivers/rtc/rtc-ds1216.c c = buf[i]; c 64 drivers/rtc/rtc-ds1216.c writeb(c, ioaddr); c 65 drivers/rtc/rtc-ds1216.c c = c >> 1; c 24 drivers/rtc/rtc-fsl-ftm-alarm.c #define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT) c 103 drivers/rtc/rtc-ls1x.c unsigned long v, t, c; c 113 drivers/rtc/rtc-ls1x.c c = 0x10000; c 115 drivers/rtc/rtc-ls1x.c while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TS) && --c) c 118 drivers/rtc/rtc-ls1x.c if (!c) { c 125 drivers/rtc/rtc-ls1x.c c = 0x10000; c 126 drivers/rtc/rtc-ls1x.c while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TS) && --c) c 129 drivers/rtc/rtc-ls1x.c if (!c) { c 44 drivers/s390/block/dasd_fba.h __u8 c; c 54 drivers/s390/block/dasd_fba.h __u8 c; c 885 drivers/s390/block/dasd_int.h #define dasd_eer_write(d,c,i) do { } while (0) c 526 drivers/s390/char/con3215.c int c, count; c 538 drivers/s390/char/con3215.c c = min_t(int, count, c 541 drivers/s390/char/con3215.c if (c <= 0) c 543 drivers/s390/char/con3215.c memcpy(raw->buffer + raw->head, str, c); c 544 drivers/s390/char/con3215.c ASCEBC(raw->buffer + raw->head, c); c 545 drivers/s390/char/con3215.c raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1); c 546 drivers/s390/char/con3215.c raw->count += c; c 547 drivers/s390/char/con3215.c raw->line_pos += c; c 548 drivers/s390/char/con3215.c str += c; c 549 drivers/s390/char/con3215.c count -= c; c 845 drivers/s390/char/con3215.c static struct tty_driver *con3215_device(struct console *c, int *index) c 847 drivers/s390/char/con3215.c *index = c->index; c 449 drivers/s390/char/con3270.c con3270_cline_insert(struct con3270 *cp, unsigned char c) c 452 drivers/s390/char/con3270.c cp->view.ascebc[(c < ' ') ? ' ' : c]; c 495 drivers/s390/char/con3270.c unsigned char c; c 500 drivers/s390/char/con3270.c c = *str++; c 503 drivers/s390/char/con3270.c if (c != '\n') c 504 drivers/s390/char/con3270.c con3270_cline_insert(cp, c); c 505 drivers/s390/char/con3270.c if (c == '\n' || cp->cline->len >= cp->view.cols) c 516 drivers/s390/char/con3270.c con3270_device(struct console *c, int *index) c 518 drivers/s390/char/con3270.c *index = c->index; c 286 drivers/s390/char/keyboard.c to_utf8(struct tty_port *port, ushort c) c 288 drivers/s390/char/keyboard.c if (c < 0x80) c 290 drivers/s390/char/keyboard.c kbd_put_queue(port, c); c 291 drivers/s390/char/keyboard.c else if (c < 0x800) { c 293 drivers/s390/char/keyboard.c kbd_put_queue(port, 0xc0 | (c >> 6)); c 294 drivers/s390/char/keyboard.c kbd_put_queue(port, 0x80 | (c & 0x3f)); c 297 drivers/s390/char/keyboard.c kbd_put_queue(port, 0xe0 | (c >> 12)); c 298 drivers/s390/char/keyboard.c kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f)); c 299 drivers/s390/char/keyboard.c kbd_put_queue(port, 0x80 | (c & 0x3f)); c 221 drivers/s390/char/sclp_con.c sclp_console_device(struct console *c, int *index) c 223 drivers/s390/char/sclp_con.c *index = c->index; c 315 drivers/s390/char/sclp_vt220.c unsigned char c; c 333 drivers/s390/char/sclp_vt220.c c = msg[from]; c 335 drivers/s390/char/sclp_vt220.c if (c == 0x0a) { c 337 drivers/s390/char/sclp_vt220.c ((unsigned char *) buffer)[to++] = c; c 343 drivers/s390/char/sclp_vt220.c ((unsigned char *) buffer)[to++] = c; c 847 drivers/s390/char/sclp_vt220.c sclp_vt220_con_device(struct console *c, int *index) c 899 drivers/s390/cio/chsc.c int c, int m, void *page) c 904 drivers/s390/cio/chsc.c if ((rfmt == 1 || rfmt == 0) && c == 1 && c 920 drivers/s390/cio/chsc.c scpd_area->c = c; c 936 drivers/s390/cio/chsc.c #define chsc_det_chp_desc(FMT, c) \ c 946 drivers/s390/cio/chsc.c ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \ c 111 drivers/s390/cio/chsc.h u32 c:1; c 154 drivers/s390/cio/chsc.h int c, int m, void *page); c 804 drivers/s390/cio/chsc_sch.c chpd->rfmt, chpd->c, chpd->m, c 1087 drivers/s390/cio/cmf.c size_t c) c 1108 drivers/s390/cio/cmf.c return ret ? ret : c; c 282 drivers/s390/cio/device_pgid.c char *c; c 284 drivers/s390/cio/device_pgid.c for (c = (char *)p + 1; c < (char *)(p + 1); c++) { c 285 drivers/s390/cio/device_pgid.c if (*c != 0) c 202 drivers/s390/net/ctcm_main.c struct channel **c = &channels; c 212 drivers/s390/net/ctcm_main.c while (*c) { c 213 drivers/s390/net/ctcm_main.c if (*c == ch) { c 214 drivers/s390/net/ctcm_main.c *c = ch->next; c 236 drivers/s390/net/ctcm_main.c c = &((*c)->next); c 1349 drivers/s390/net/ctcm_main.c struct channel **c = &channels; c 1463 drivers/s390/net/ctcm_main.c while (*c && ctcm_less_than((*c)->id, ch->id)) c 1464 drivers/s390/net/ctcm_main.c c = &(*c)->next; c 1466 drivers/s390/net/ctcm_main.c if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) { c 1469 drivers/s390/net/ctcm_main.c __func__, (*c)->id); c 1484 drivers/s390/net/ctcm_main.c ch->next = *c; c 1485 drivers/s390/net/ctcm_main.c *c = ch; c 190 drivers/s390/net/qeth_core.h #define qeth_adp_supported(c, f) \ c 191 drivers/s390/net/qeth_core.h qeth_is_adp_supported(&c->options.adp, f) c 192 drivers/s390/net/qeth_core.h #define qeth_is_supported(c, f) \ c 193 drivers/s390/net/qeth_core.h qeth_is_ipa_supported(&c->options.ipa4, f) c 194 drivers/s390/net/qeth_core.h #define qeth_is_enabled(c, f) \ c 195 drivers/s390/net/qeth_core.h qeth_is_ipa_enabled(&c->options.ipa4, f) c 196 drivers/s390/net/qeth_core.h #define qeth_is_supported6(c, f) \ c 197 drivers/s390/net/qeth_core.h qeth_is_ipa_supported(&c->options.ipa6, f) c 198 drivers/s390/net/qeth_core.h #define qeth_is_enabled6(c, f) \ c 199 drivers/s390/net/qeth_core.h qeth_is_ipa_enabled(&c->options.ipa6, f) c 200 drivers/s390/net/qeth_core.h #define qeth_is_ipafunc_supported(c, prot, f) \ c 202 drivers/s390/net/qeth_core.h qeth_is_supported6(c, f) : qeth_is_supported(c, f)) c 203 drivers/s390/net/qeth_core.h #define qeth_is_ipafunc_enabled(c, prot, f) \ c 205 drivers/s390/net/qeth_core.h qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) c 403 drivers/s390/net/qeth_core_main.c struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; c 405 drivers/s390/net/qeth_core_main.c while (c) { c 407 drivers/s390/net/qeth_core_main.c atomic_read(&c->state) == c 409 drivers/s390/net/qeth_core_main.c struct qeth_qdio_out_buffer *f = c; c 415 drivers/s390/net/qeth_core_main.c qeth_tx_complete_buf(c, forced_cleanup, 0); c 417 drivers/s390/net/qeth_core_main.c c = f->next_pending; c 419 drivers/s390/net/qeth_core_main.c head->next_pending = c; c 422 drivers/s390/net/qeth_core_main.c head = c; c 423 drivers/s390/net/qeth_core_main.c c = c->next_pending; c 93 drivers/sbus/char/openprom.c char c; c 105 drivers/sbus/char/openprom.c if (get_user(c, &info->oprom_array[bufsize])) { c 109 drivers/sbus/char/openprom.c if (c == '\0') c 111 drivers/sbus/char/openprom.c (*opp_p)->oprom_array[bufsize++] = c; c 461 drivers/scsi/3w-9xxx.h #define TW_PRINTK(h,a,b,c) { \ c 463 drivers/scsi/3w-9xxx.h printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ c 465 drivers/scsi/3w-9xxx.h printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ c 191 drivers/scsi/3w-sas.h #define TW_PRINTK(h,a,b,c) { \ c 193 drivers/scsi/3w-sas.h printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ c 195 drivers/scsi/3w-sas.h printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ c 1208 drivers/scsi/3w-xxxx.c unsigned char c = 1; c 1243 drivers/scsi/3w-xxxx.c error = tw_setfeature(tw_dev, 2, 1, &c); c 1247 drivers/scsi/NCR5380.c int c = *count; c 1292 drivers/scsi/NCR5380.c if (!((p & SR_MSG) && c > 1)) { c 1326 drivers/scsi/NCR5380.c if (!(p == PHASE_MSGIN && c == 1)) { c 1327 drivers/scsi/NCR5380.c if (p == PHASE_MSGOUT && c > 1) c 1332 drivers/scsi/NCR5380.c } while (--c); c 1334 drivers/scsi/NCR5380.c dsprintk(NDEBUG_PIO, instance, "residual %d\n", c); c 1336 drivers/scsi/NCR5380.c *count = c; c 1343 drivers/scsi/NCR5380.c if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0)) c 1348 drivers/scsi/NCR5380.c if (!c || (*phase == p)) c 1468 drivers/scsi/NCR5380.c int c = *count; c 1483 drivers/scsi/NCR5380.c c -= hostdata->read_overruns; c 1485 drivers/scsi/NCR5380.c --c; c 1489 drivers/scsi/NCR5380.c (p & SR_IO) ? "receive" : "send", c, d); c 1493 drivers/scsi/NCR5380.c sun3scsi_dma_start(c, *data); c 1505 drivers/scsi/NCR5380.c result = NCR5380_dma_recv_setup(hostdata, d, c); c 1507 drivers/scsi/NCR5380.c result = NCR5380_dma_send_setup(hostdata, d, c); c 1539 drivers/scsi/NCR5380.c result = NCR5380_dma_recv_setup(hostdata, d, c); c 1541 drivers/scsi/NCR5380.c result = NCR5380_dma_send_setup(hostdata, d, c); c 1555 drivers/scsi/NCR5380.c hostdata->dma_len = c; c 902 drivers/scsi/aacraid/aachba.c int c; c 914 drivers/scsi/aacraid/aachba.c c = sizeof(str->vid); c 915 drivers/scsi/aacraid/aachba.c while (*cp && *cp != ' ' && --c) c 917 drivers/scsi/aacraid/aachba.c c = *cp; c 920 drivers/scsi/aacraid/aachba.c *cp = c; c 3218 drivers/scsi/advansys.c ASC_DVC_CFG *c; c 3223 drivers/scsi/advansys.c c = &boardp->dvc_cfg.asc_dvc_cfg; c 3224 drivers/scsi/advansys.c chip_scsi_id = c->chip_scsi_id; c 3232 drivers/scsi/advansys.c c->chip_version, c->mcode_date, c->mcode_version, c 3348 drivers/scsi/advansys.c ADV_DVC_CFG *c; c 3360 drivers/scsi/advansys.c c = &boardp->dvc_cfg.adv_dvc_cfg; c 3375 drivers/scsi/advansys.c "mcode_version 0x%x\n", c->chip_version, c 3376 drivers/scsi/advansys.c c->mcode_date, c->mcode_version); c 366 drivers/scsi/aic7xxx/aic79xx_osm.c static int aic79xx_setup(char *c); c 160 drivers/scsi/aic7xxx/aiclib.h #define ID_C(x, c) \ c 163 drivers/scsi/aic7xxx/aiclib.h (c) << 8, 0xFFFF00, 0 \ c 550 drivers/scsi/aic94xx/aic94xx_sds.c int c; c 553 drivers/scsi/aic94xx/aic94xx_sds.c for (c = 5000; c > 0; c--) { c 64 drivers/scsi/arm/scsi.h char c = *SCp->ptr; c 69 drivers/scsi/arm/scsi.h return c; c 72 drivers/scsi/arm/scsi.h static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c) c 74 drivers/scsi/arm/scsi.h *SCp->ptr = c; c 44 drivers/scsi/atp870u.c static void send_s870(struct atp_unit *dev,unsigned char c); c 45 drivers/scsi/atp870u.c static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode); c 122 drivers/scsi/atp870u.c unsigned char i, j, c, target_id, lun,cmdp; c 132 drivers/scsi/atp870u.c for (c = 0; c < 2; c++) { c 133 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x1f); c 136 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 143 drivers/scsi/atp870u.c dev->in_int[c] = 1; c 144 drivers/scsi/atp870u.c cmdp = atp_readb_io(dev, c, 0x10); c 145 drivers/scsi/atp870u.c if (dev->working[c] != 0) { c 147 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x16) & 0x80) == 0) c 148 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x16, (atp_readb_io(dev, c, 0x16) | 0x80)); c 150 drivers/scsi/atp870u.c if ((atp_readb_pci(dev, c, 0x00) & 0x08) != 0) c 153 drivers/scsi/atp870u.c if ((atp_readb_pci(dev, c, 2) & 0x08) == 0) c 155 drivers/scsi/atp870u.c if ((atp_readb_pci(dev, c, 2) & 0x01) == 0) c 159 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0, 0x00); c 161 drivers/scsi/atp870u.c i = atp_readb_io(dev, c, 0x17); c 164 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x06); c 166 drivers/scsi/atp870u.c target_id = atp_readb_io(dev, c, 0x15); c 179 drivers/scsi/atp870u.c if (dev->last_cmd[c] == 0xff) { c 180 drivers/scsi/atp870u.c dev->last_cmd[c] = target_id; c 182 drivers/scsi/atp870u.c dev->last_cmd[c] |= 0x40; c 185 drivers/scsi/atp870u.c dev->r1f[c][target_id] |= j; c 190 drivers/scsi/atp870u.c if ((dev->last_cmd[c] & 0xf0) != 0x40) { c 191 drivers/scsi/atp870u.c dev->last_cmd[c] = 0xff; c 195 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); c 196 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); c 197 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); c 198 drivers/scsi/atp870u.c if (dev->id[c][target_id].last_len != adrcnt) { c 199 drivers/scsi/atp870u.c k = dev->id[c][target_id].last_len; c 201 drivers/scsi/atp870u.c dev->id[c][target_id].tran_len = k; c 202 drivers/scsi/atp870u.c dev->id[c][target_id].last_len = adrcnt; c 205 drivers/scsi/atp870u.c printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len); c 212 drivers/scsi/atp870u.c if (dev->wide_id[c] != 0) { c 213 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, 0x01); c 214 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) c 215 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, 0x01); c 221 drivers/scsi/atp870u.c if (((dev->quhd[c] != dev->quend[c]) || (dev->last_cmd[c] != 0xff)) && c 222 drivers/scsi/atp870u.c (dev->in_snd[c] == 0)) { c 226 drivers/scsi/atp870u.c send_s870(dev,c); c 232 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 240 drivers/scsi/atp870u.c dev->last_cmd[c] |= 0x40; c 241 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 246 drivers/scsi/atp870u.c if ((dev->last_cmd[c] & 0xf0) != 0x40) { c 247 drivers/scsi/atp870u.c dev->last_cmd[c] = 0xff; c 250 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); c 251 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); c 252 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); c 253 drivers/scsi/atp870u.c k = dev->id[c][target_id].last_len; c 255 drivers/scsi/atp870u.c dev->id[c][target_id].tran_len = k; c 256 drivers/scsi/atp870u.c dev->id[c][target_id].last_len = adrcnt; c 257 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x41); c 258 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 259 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 278 drivers/scsi/atp870u.c lun = atp_readb_io(dev, c, 0x1d) & 0x07; c 280 drivers/scsi/atp870u.c if ((dev->last_cmd[c] & 0xf0) != 0x40) { c 281 drivers/scsi/atp870u.c dev->last_cmd[c] = 0xff; c 288 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12); c 289 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13); c 290 drivers/scsi/atp870u.c ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14); c 291 drivers/scsi/atp870u.c k = dev->id[c][target_id].last_len; c 293 drivers/scsi/atp870u.c dev->id[c][target_id].tran_len = k; c 294 drivers/scsi/atp870u.c dev->id[c][target_id].last_len = adrcnt; c 295 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 296 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 302 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x46); c 303 drivers/scsi/atp870u.c dev->id[c][target_id].dirct = 0x00; c 304 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0x00); c 305 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, 0x00); c 306 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x00); c 307 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 308 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 312 drivers/scsi/atp870u.c if (dev->last_cmd[c] != 0xff) { c 313 drivers/scsi/atp870u.c dev->last_cmd[c] |= 0x40; c 319 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x45); c 321 drivers/scsi/atp870u.c target_id = atp_readb_io(dev, c, 0x16); c 331 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x45); c 332 drivers/scsi/atp870u.c workreq = dev->id[c][target_id].curr_req; c 340 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x0f, lun); c 341 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); c 342 drivers/scsi/atp870u.c adrcnt = dev->id[c][target_id].tran_len; c 343 drivers/scsi/atp870u.c k = dev->id[c][target_id].last_len; c 345 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, ((unsigned char *) &k)[2]); c 346 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, ((unsigned char *) &k)[1]); c 347 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, ((unsigned char *) &k)[0]); c 349 drivers/scsi/atp870u.c printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, atp_readb_io(dev, c, 0x14), atp_readb_io(dev, c, 0x13), atp_readb_io(dev, c, 0x12)); c 357 drivers/scsi/atp870u.c j |= dev->id[c][target_id].dirct; c 358 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x15, j); c 359 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x16, 0x80); c 363 drivers/scsi/atp870u.c i = atp_readb_pci(dev, c, 1) & 0xf3; c 368 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 1, i); c 386 drivers/scsi/atp870u.c if ((id & dev->wide_id[c]) != 0) { c 389 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, j); c 390 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) c 391 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, j); c 392 drivers/scsi/atp870u.c if (dev->id[c][target_id].last_len == 0) { c 393 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 394 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 403 drivers/scsi/atp870u.c prd = dev->id[c][target_id].prd_pos; c 416 drivers/scsi/atp870u.c dev->id[c][target_id].prd_pos = prd; c 419 drivers/scsi/atp870u.c dev->id[c][target_id].prdaddr += 0x08; c 422 drivers/scsi/atp870u.c dev->id[c][target_id].prd_pos = prd; c 426 drivers/scsi/atp870u.c atp_writel_pci(dev, c, 0x04, dev->id[c][target_id].prdaddr); c 428 drivers/scsi/atp870u.c printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr); c 431 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x06); c 432 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x00); c 437 drivers/scsi/atp870u.c if (dev->id[c][target_id].dirct != 0) { c 438 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 439 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0, 0x01); c 440 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 446 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 447 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0, 0x09); c 448 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 459 drivers/scsi/atp870u.c workreq = dev->id[c][target_id].curr_req; c 462 drivers/scsi/atp870u.c if ((dev->last_cmd[c] & 0xf0) != 0x40) { c 463 drivers/scsi/atp870u.c dev->last_cmd[c] = 0xff; c 466 drivers/scsi/atp870u.c workreq->result = atp_readb_io(dev, c, 0x0f); c 467 drivers/scsi/atp870u.c if (((dev->r1f[c][target_id] & 0x10) != 0) && is885(dev)) { c 491 drivers/scsi/atp870u.c dev->id[c][target_id].curr_req = NULL; c 492 drivers/scsi/atp870u.c dev->working[c]--; c 497 drivers/scsi/atp870u.c if (dev->wide_id[c] != 0) { c 498 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, 0x01); c 499 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01) c 500 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, 0x01); c 506 drivers/scsi/atp870u.c if (((dev->last_cmd[c] != 0xff) || (dev->quhd[c] != dev->quend[c])) && c 507 drivers/scsi/atp870u.c (dev->in_snd[c] == 0)) { c 511 drivers/scsi/atp870u.c send_s870(dev,c); c 514 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 517 drivers/scsi/atp870u.c if ((dev->last_cmd[c] & 0xf0) != 0x40) { c 518 drivers/scsi/atp870u.c dev->last_cmd[c] = 0xff; c 525 drivers/scsi/atp870u.c atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); c 526 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x06); c 527 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x00); c 528 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x41); c 530 drivers/scsi/atp870u.c k = dev->id[c][target_id].last_len; c 531 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); c 532 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); c 533 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); c 534 drivers/scsi/atp870u.c dev->id[c][target_id].dirct = 0x00; c 536 drivers/scsi/atp870u.c dev->id[c][target_id].dirct = 0x00; c 538 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 539 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0, 0x09); c 540 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 544 drivers/scsi/atp870u.c atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); c 545 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x06); c 546 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x00); c 547 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x41); c 549 drivers/scsi/atp870u.c k = dev->id[c][target_id].last_len; c 550 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]); c 551 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]); c 552 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]); c 554 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x15, atp_readb_io(dev, c, 0x15) | 0x20); c 555 drivers/scsi/atp870u.c dev->id[c][target_id].dirct = 0x20; c 556 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 557 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0, 0x01); c 558 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 562 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x30); c 564 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x46); c 565 drivers/scsi/atp870u.c dev->id[c][target_id].dirct = 0x00; c 566 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0x00); c 567 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, 0x00); c 568 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x00); c 569 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 571 drivers/scsi/atp870u.c dev->in_int[c] = 0; c 585 drivers/scsi/atp870u.c unsigned char c; c 590 drivers/scsi/atp870u.c c = scmd_channel(req_p); c 614 drivers/scsi/atp870u.c if ((m & dev->active_id[c]) == 0) { c 634 drivers/scsi/atp870u.c dev->quend[c]++; c 635 drivers/scsi/atp870u.c if (dev->quend[c] >= qcnt) { c 636 drivers/scsi/atp870u.c dev->quend[c] = 0; c 642 drivers/scsi/atp870u.c if (dev->quhd[c] == dev->quend[c]) { c 643 drivers/scsi/atp870u.c if (dev->quend[c] == 0) { c 644 drivers/scsi/atp870u.c dev->quend[c] = qcnt; c 649 drivers/scsi/atp870u.c dev->quend[c]--; c 654 drivers/scsi/atp870u.c dev->quereq[c][dev->quend[c]] = req_p; c 656 drivers/scsi/atp870u.c printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],atp_readb_io(dev, c, 0x1c),c,dev->in_int[c],c,dev->in_snd[c]); c 658 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1c) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) { c 662 drivers/scsi/atp870u.c send_s870(dev,c); c 681 drivers/scsi/atp870u.c static void send_s870(struct atp_unit *dev,unsigned char c) c 691 drivers/scsi/atp870u.c if (dev->in_snd[c] != 0) { c 700 drivers/scsi/atp870u.c dev->in_snd[c] = 1; c 701 drivers/scsi/atp870u.c if ((dev->last_cmd[c] != 0xff) && ((dev->last_cmd[c] & 0x40) != 0)) { c 702 drivers/scsi/atp870u.c dev->last_cmd[c] &= 0x0f; c 703 drivers/scsi/atp870u.c workreq = dev->id[c][dev->last_cmd[c]].curr_req; c 705 drivers/scsi/atp870u.c dev->last_cmd[c] = 0xff; c 706 drivers/scsi/atp870u.c if (dev->quhd[c] == dev->quend[c]) { c 707 drivers/scsi/atp870u.c dev->in_snd[c] = 0; c 713 drivers/scsi/atp870u.c if ((dev->last_cmd[c] != 0xff) && (dev->working[c] != 0)) { c 714 drivers/scsi/atp870u.c dev->in_snd[c] = 0; c 717 drivers/scsi/atp870u.c dev->working[c]++; c 718 drivers/scsi/atp870u.c j = dev->quhd[c]; c 719 drivers/scsi/atp870u.c dev->quhd[c]++; c 720 drivers/scsi/atp870u.c if (dev->quhd[c] >= qcnt) c 721 drivers/scsi/atp870u.c dev->quhd[c] = 0; c 722 drivers/scsi/atp870u.c workreq = dev->quereq[c][dev->quhd[c]]; c 723 drivers/scsi/atp870u.c if (dev->id[c][scmd_id(workreq)].curr_req != NULL) { c 724 drivers/scsi/atp870u.c dev->quhd[c] = j; c 725 drivers/scsi/atp870u.c dev->working[c]--; c 726 drivers/scsi/atp870u.c dev->in_snd[c] = 0; c 729 drivers/scsi/atp870u.c dev->id[c][scmd_id(workreq)].curr_req = workreq; c 730 drivers/scsi/atp870u.c dev->last_cmd[c] = scmd_id(workreq); c 732 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 || atp_readb_io(dev, c, 0x1c) != 0) { c 736 drivers/scsi/atp870u.c dev->last_cmd[c] |= 0x40; c 737 drivers/scsi/atp870u.c dev->in_snd[c] = 0; c 753 drivers/scsi/atp870u.c dev->r1f[c][scmd_id(workreq)] = 0; c 772 drivers/scsi/atp870u.c if ((w & dev->wide_id[c]) != 0) { c 775 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, j); c 776 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1b) & 0x01) != j) { c 777 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0x1b, j); c 786 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x00, workreq->cmd_len); c 787 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x01, 0x2c); c 789 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x02, 0x7f); c 791 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x02, 0xcf); c 793 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x03 + i, workreq->cmnd[i]); c 794 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x0f, workreq->device->lun); c 798 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp); c 800 drivers/scsi/atp870u.c printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp); c 807 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]); c 808 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]); c 809 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]); c 811 drivers/scsi/atp870u.c dev->id[c][j].last_len = l; c 812 drivers/scsi/atp870u.c dev->id[c][j].tran_len = 0; c 814 drivers/scsi/atp870u.c printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len); c 826 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x15, j | 0x20); c 828 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x15, j); c 829 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x16, atp_readb_io(dev, c, 0x16) | 0x80); c 830 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x16, 0x80); c 831 drivers/scsi/atp870u.c dev->id[c][target_id].dirct = 0; c 833 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x1c) == 0) { c 837 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 839 drivers/scsi/atp870u.c dev->last_cmd[c] |= 0x40; c 840 drivers/scsi/atp870u.c dev->in_snd[c] = 0; c 843 drivers/scsi/atp870u.c prd = dev->id[c][target_id].prd_table; c 844 drivers/scsi/atp870u.c dev->id[c][target_id].prd_pos = prd; c 880 drivers/scsi/atp870u.c printk("send_s870: prdaddr_2 0x%8x target_id %d\n", dev->id[c][target_id].prdaddr,target_id); c 882 drivers/scsi/atp870u.c dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus; c 883 drivers/scsi/atp870u.c atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr); c 884 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x06); c 885 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 2, 0x00); c 887 drivers/scsi/atp870u.c j = atp_readb_pci(dev, c, 1) & 0xf3; c 892 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 1, j); c 906 drivers/scsi/atp870u.c dev->id[c][target_id].dirct = 0x20; c 907 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x1c) == 0) { c 908 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 909 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0, 0x01); c 914 drivers/scsi/atp870u.c dev->last_cmd[c] |= 0x40; c 916 drivers/scsi/atp870u.c dev->in_snd[c] = 0; c 919 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x1c) == 0) { c 920 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 921 drivers/scsi/atp870u.c atp_writeb_pci(dev, c, 0, 0x09); c 926 drivers/scsi/atp870u.c dev->last_cmd[c] |= 0x40; c 928 drivers/scsi/atp870u.c dev->in_snd[c] = 0; c 1205 drivers/scsi/atp870u.c int c,k; c 1206 drivers/scsi/atp870u.c for(c=0;c < 2;c++) { c 1208 drivers/scsi/atp870u.c atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL); c 1209 drivers/scsi/atp870u.c if (!atp_dev->id[c][k].prd_table) { c 1214 drivers/scsi/atp870u.c atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus; c 1215 drivers/scsi/atp870u.c atp_dev->id[c][k].devsp=0x20; c 1216 drivers/scsi/atp870u.c atp_dev->id[c][k].devtype = 0x7f; c 1217 drivers/scsi/atp870u.c atp_dev->id[c][k].curr_req = NULL; c 1220 drivers/scsi/atp870u.c atp_dev->active_id[c] = 0; c 1221 drivers/scsi/atp870u.c atp_dev->wide_id[c] = 0; c 1222 drivers/scsi/atp870u.c atp_dev->host_id[c] = 0x07; c 1223 drivers/scsi/atp870u.c atp_dev->quhd[c] = 0; c 1224 drivers/scsi/atp870u.c atp_dev->quend[c] = 0; c 1225 drivers/scsi/atp870u.c atp_dev->last_cmd[c] = 0xff; c 1226 drivers/scsi/atp870u.c atp_dev->in_snd[c] = 0; c 1227 drivers/scsi/atp870u.c atp_dev->in_int[c] = 0; c 1230 drivers/scsi/atp870u.c atp_dev->quereq[c][k] = NULL; c 1233 drivers/scsi/atp870u.c atp_dev->id[c][k].curr_req = NULL; c 1234 drivers/scsi/atp870u.c atp_dev->sp[c][k] = 0x04; c 1240 drivers/scsi/atp870u.c static void atp_set_host_id(struct atp_unit *atp, u8 c, u8 host_id) c 1242 drivers/scsi/atp870u.c atp_writeb_io(atp, c, 0, host_id | 0x08); c 1243 drivers/scsi/atp870u.c atp_writeb_io(atp, c, 0x18, 0); c 1244 drivers/scsi/atp870u.c while ((atp_readb_io(atp, c, 0x1f) & 0x80) == 0) c 1246 drivers/scsi/atp870u.c atp_readb_io(atp, c, 0x17); c 1247 drivers/scsi/atp870u.c atp_writeb_io(atp, c, 1, 8); c 1248 drivers/scsi/atp870u.c atp_writeb_io(atp, c, 2, 0x7f); c 1249 drivers/scsi/atp870u.c atp_writeb_io(atp, c, 0x11, 0x20); c 1392 drivers/scsi/atp870u.c unsigned char k, m, c; c 1404 drivers/scsi/atp870u.c c = atp_readb_base(atpdev, 0x29); c 1405 drivers/scsi/atp870u.c atp_writeb_base(atpdev, 0x29, c | 0x04); c 1425 drivers/scsi/atp870u.c c = atp_readb_base(atpdev, 0x29); c 1426 drivers/scsi/atp870u.c atp_writeb_base(atpdev, 0x29, c & 0xfb); c 1427 drivers/scsi/atp870u.c for (c = 0; c < 2; c++) { c 1428 drivers/scsi/atp870u.c atpdev->ultra_map[c] = 0; c 1429 drivers/scsi/atp870u.c atpdev->async[c] = 0; c 1432 drivers/scsi/atp870u.c if (atpdev->sp[c][k] > 1) c 1433 drivers/scsi/atp870u.c atpdev->ultra_map[c] |= n; c 1435 drivers/scsi/atp870u.c if (atpdev->sp[c][k] == 0) c 1436 drivers/scsi/atp870u.c atpdev->async[c] |= n; c 1438 drivers/scsi/atp870u.c atpdev->async[c] = ~(atpdev->async[c]); c 1440 drivers/scsi/atp870u.c if (atpdev->global_map[c] == 0) { c 1441 drivers/scsi/atp870u.c k = setupdata[c][1]; c 1443 drivers/scsi/atp870u.c atpdev->global_map[c] |= 0x20; c 1445 drivers/scsi/atp870u.c atpdev->global_map[c] |= k; c 1446 drivers/scsi/atp870u.c if ((setupdata[c][2] & 0x04) != 0) c 1447 drivers/scsi/atp870u.c atpdev->global_map[c] |= 0x08; c 1448 drivers/scsi/atp870u.c atpdev->host_id[c] = setupdata[c][0] & 0x07; c 1587 drivers/scsi/atp870u.c unsigned char j, k, c; c 1594 drivers/scsi/atp870u.c c = scmd_channel(SCpnt); c 1595 drivers/scsi/atp870u.c printk(" atp870u: abort Channel = %x \n", c); c 1596 drivers/scsi/atp870u.c printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]); c 1597 drivers/scsi/atp870u.c printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]); c 1599 drivers/scsi/atp870u.c printk(" r%2x=%2x", j, atp_readb_io(dev, c, j)); c 1601 drivers/scsi/atp870u.c printk(" r1c=%2x", atp_readb_io(dev, c, 0x1c)); c 1602 drivers/scsi/atp870u.c printk(" r1f=%2x in_snd=%2x ", atp_readb_io(dev, c, 0x1f), dev->in_snd[c]); c 1603 drivers/scsi/atp870u.c printk(" d00=%2x", atp_readb_pci(dev, c, 0x00)); c 1604 drivers/scsi/atp870u.c printk(" d02=%2x", atp_readb_pci(dev, c, 0x02)); c 1606 drivers/scsi/atp870u.c if (dev->id[c][j].curr_req != NULL) { c 1607 drivers/scsi/atp870u.c workrequ = dev->id[c][j].curr_req; c 1612 drivers/scsi/atp870u.c printk(" last_lenu= %x ",(unsigned int)dev->id[c][j].last_len); c 1712 drivers/scsi/atp870u.c static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode) c 1732 drivers/scsi/atp870u.c if ((m & dev->active_id[c]) != 0) { c 1735 drivers/scsi/atp870u.c if (i == dev->host_id[c]) { c 1736 drivers/scsi/atp870u.c printk(KERN_INFO " ID: %2d Host Adapter\n", dev->host_id[c]); c 1739 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, wide_chip ? 0x01 : 0x00); c 1740 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 1, 0x08); c 1741 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 2, 0x7f); c 1742 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 3, satn[0]); c 1743 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 4, satn[1]); c 1744 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 5, satn[2]); c 1745 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 6, satn[3]); c 1746 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 7, satn[4]); c 1747 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 8, satn[5]); c 1748 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x0f, 0); c 1749 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); c 1750 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0); c 1751 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, satn[6]); c 1752 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, satn[7]); c 1757 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x15, j); c 1758 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, satn[8]); c 1760 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 1763 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) c 1766 drivers/scsi/atp870u.c while (atp_readb_io(dev, c, 0x17) != 0x8e) c 1769 drivers/scsi/atp870u.c dev->active_id[c] |= m; c 1771 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x30); c 1773 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x00); c 1775 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x04, 0x00); c 1778 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 1780 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 1783 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17); c 1785 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x41); c 1789 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 3, inqd[0]); c 1790 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 4, inqd[1]); c 1791 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 5, inqd[2]); c 1792 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 6, inqd[3]); c 1793 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 7, inqd[4]); c 1794 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 8, inqd[5]); c 1795 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x0f, 0); c 1796 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); c 1797 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0); c 1798 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, inqd[6]); c 1799 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, inqd[7]); c 1800 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, inqd[8]); c 1802 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 1805 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) c 1808 drivers/scsi/atp870u.c while (atp_readb_io(dev, c, 0x17) != 0x8e) c 1812 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, 0x00); c 1814 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 1817 drivers/scsi/atp870u.c k = atp_readb_io(dev, c, 0x1f); c 1819 drivers/scsi/atp870u.c mbuf[j++] = atp_readb_io(dev, c, 0x19); c 1825 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17); c 1829 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x46); c 1830 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0); c 1831 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, 0); c 1832 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0); c 1833 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 1835 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 1838 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x17) != 0x16) c 1844 drivers/scsi/atp870u.c dev->id[c][i].devtype = mbuf[0]; c 1853 drivers/scsi/atp870u.c if ((i < 8) && ((dev->global_map[c] & 0x20) == 0)) c 1856 drivers/scsi/atp870u.c if ((dev->global_map[c] & 0x20) == 0) c 1862 drivers/scsi/atp870u.c if (dev->sp[c][i] != 0x04) // force u2 c 1867 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, 0x01); c 1868 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 3, satn[0]); c 1869 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 4, satn[1]); c 1870 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 5, satn[2]); c 1871 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 6, satn[3]); c 1872 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 7, satn[4]); c 1873 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 8, satn[5]); c 1874 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x0f, 0); c 1875 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); c 1876 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0); c 1877 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, satn[6]); c 1878 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, satn[7]); c 1879 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, satn[8]); c 1881 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 1884 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) c 1887 drivers/scsi/atp870u.c while (atp_readb_io(dev, c, 0x17) != 0x8e) c 1892 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x09); c 1893 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 1895 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { c 1896 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) c 1897 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, u3[j++]); c 1901 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) c 1904 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17) & 0x0f; c 1916 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 1917 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { c 1918 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) c 1919 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, 0); c 1922 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17) & 0x0f; c 1934 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x09); c 1935 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 1938 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x1f); c 1940 drivers/scsi/atp870u.c mbuf[k++] = atp_readb_io(dev, c, 0x19); c 1946 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17) & 0x0f; c 1958 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x30); c 1959 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x00); c 1960 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 1962 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00); c 1964 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17); c 1983 drivers/scsi/atp870u.c dev->wide_id[c] |= m; c 1984 drivers/scsi/atp870u.c dev->id[c][i].devsp = 0xce; c 1986 drivers/scsi/atp870u.c printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp); c 1991 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, 0x01); c 1992 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 3, satn[0]); c 1993 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 4, satn[1]); c 1994 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 5, satn[2]); c 1995 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 6, satn[3]); c 1996 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 7, satn[4]); c 1997 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 8, satn[5]); c 1998 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x0f, 0); c 1999 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); c 2000 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0); c 2001 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, satn[6]); c 2002 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, satn[7]); c 2003 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, satn[8]); c 2005 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 2008 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) c 2011 drivers/scsi/atp870u.c while (atp_readb_io(dev, c, 0x17) != 0x8e) c 2016 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x05); c 2017 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 2019 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { c 2020 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) c 2021 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, wide[j++]); c 2025 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) c 2028 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17) & 0x0f; c 2040 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 2041 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { c 2042 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) c 2043 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, 0); c 2046 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17) & 0x0f; c 2058 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0xff); c 2059 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 2062 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x1f); c 2064 drivers/scsi/atp870u.c mbuf[k++] = atp_readb_io(dev, c, 0x19); c 2070 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17) & 0x0f; c 2082 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x30); c 2083 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x00); c 2084 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 2086 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 2089 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17); c 2110 drivers/scsi/atp870u.c dev->wide_id[c] |= m; c 2112 drivers/scsi/atp870u.c if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) { c 2115 drivers/scsi/atp870u.c if ((dev->async[c] & m) != 0) { c 2121 drivers/scsi/atp870u.c if ((!is885(dev) && !is880(dev)) || (dev->sp[c][i] == 0x02)) { c 2125 drivers/scsi/atp870u.c if (dev->sp[c][i] >= 0x03) { c 2131 drivers/scsi/atp870u.c if ((m & dev->wide_id[c]) != 0) { c 2134 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x1b, j); c 2135 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 3, satn[0]); c 2136 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 4, satn[1]); c 2137 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 5, satn[2]); c 2138 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 6, satn[3]); c 2139 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 7, satn[4]); c 2140 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 8, satn[5]); c 2141 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x0f, 0); c 2142 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x11, dev->id[c][i].devsp); c 2143 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, 0); c 2144 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, satn[6]); c 2145 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, satn[7]); c 2146 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, satn[8]); c 2148 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 2151 drivers/scsi/atp870u.c if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e) c 2154 drivers/scsi/atp870u.c while (atp_readb_io(dev, c, 0x17) != 0x8e) c 2159 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x06); c 2160 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 2162 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0) { c 2163 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0) { c 2164 drivers/scsi/atp870u.c if ((m & dev->wide_id[c]) != 0) { c 2166 drivers/scsi/atp870u.c if ((m & dev->ultra_map[c]) != 0) { c 2167 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, synuw[j++]); c 2169 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, synw[j++]); c 2172 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, synw_870[j++]); c 2174 drivers/scsi/atp870u.c if ((m & dev->ultra_map[c]) != 0) { c 2175 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, synu[j++]); c 2177 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, synn[j++]); c 2183 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00) c 2186 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17) & 0x0f; c 2198 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 2199 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) { c 2200 drivers/scsi/atp870u.c if ((atp_readb_io(dev, c, 0x1f) & 0x01) != 0x00) c 2201 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x19, 0x00); c 2204 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17); c 2221 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x06); c 2223 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0xff); c 2224 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x20); c 2227 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x1f); c 2229 drivers/scsi/atp870u.c mbuf[k++] = atp_readb_io(dev, c, 0x19); c 2236 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x17) & 0x80) == 0x00); c 2238 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17); c 2254 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x10, 0x30); c 2256 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, 0x00); c 2257 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x18, 0x08); c 2259 drivers/scsi/atp870u.c while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00) c 2262 drivers/scsi/atp870u.c j = atp_readb_io(dev, c, 0x17); c 2287 drivers/scsi/atp870u.c dev->id[c][i].devsp = mbuf[4]; c 2311 drivers/scsi/atp870u.c dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j; c 2313 drivers/scsi/atp870u.c printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp); c 309 drivers/scsi/constants.c #define SENSE_CODE(c, s) {c, sizeof(s)}, c 315 drivers/scsi/constants.c #define SENSE_CODE(c, s) s "\0" c 532 drivers/scsi/csiostor/csio_hw.c uint32_t i, c, left, val, offset = addr & 0xff; c 547 drivers/scsi/csiostor/csio_hw.c for (left = n; left; left -= c) { c 548 drivers/scsi/csiostor/csio_hw.c c = min(left, 4U); c 549 drivers/scsi/csiostor/csio_hw.c for (val = 0, i = 0; i < c; ++i) c 552 drivers/scsi/csiostor/csio_hw.c ret = csio_hw_sf1_write(hw, c, c != left, 1, val); c 2246 drivers/scsi/csiostor/csio_hw.c int k, int c) c 2255 drivers/scsi/csiostor/csio_hw.c if (k > c) { c 2265 drivers/scsi/csiostor/csio_hw.c FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), c 2266 drivers/scsi/csiostor/csio_hw.c FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, c 2370 drivers/scsi/csiostor/csio_hw.c uint32_t d, c, k; c 2373 drivers/scsi/csiostor/csio_hw.c c = be32_to_cpu(card_fw->fw_ver); c 2383 drivers/scsi/csiostor/csio_hw.c FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), c 2384 drivers/scsi/csiostor/csio_hw.c FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), c 329 drivers/scsi/cxlflash/common.h int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode); c 734 drivers/scsi/fnic/fcpio.h u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; c 737 drivers/scsi/fnic/fcpio.h *c |= 0x80; c 739 drivers/scsi/fnic/fcpio.h *c &= ~0x80; c 744 drivers/scsi/fnic/fcpio.h u8 *c = ((u8 *) fw_req) + sizeof(struct fcpio_fw_req) - 1; c 746 drivers/scsi/fnic/fcpio.h *color = *c >> 7; c 36 drivers/scsi/fnic/fnic_res.c struct vnic_fc_config *c = &fnic->config; c 43 drivers/scsi/fnic/fnic_res.c sizeof(c->m), &c->m); \ c 73 drivers/scsi/fnic/fnic_res.c c->wq_enet_desc_count = c 76 drivers/scsi/fnic/fnic_res.c c->wq_enet_desc_count)); c 77 drivers/scsi/fnic/fnic_res.c c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16); c 79 drivers/scsi/fnic/fnic_res.c c->wq_copy_desc_count = c 82 drivers/scsi/fnic/fnic_res.c c->wq_copy_desc_count)); c 83 drivers/scsi/fnic/fnic_res.c c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16); c 85 drivers/scsi/fnic/fnic_res.c c->rq_desc_count = c 88 drivers/scsi/fnic/fnic_res.c c->rq_desc_count)); c 89 drivers/scsi/fnic/fnic_res.c c->rq_desc_count = ALIGN(c->rq_desc_count, 16); c 91 drivers/scsi/fnic/fnic_res.c c->maxdatafieldsize = c 94 drivers/scsi/fnic/fnic_res.c c->maxdatafieldsize)); c 95 drivers/scsi/fnic/fnic_res.c c->ed_tov = c 98 drivers/scsi/fnic/fnic_res.c c->ed_tov)); c 100 drivers/scsi/fnic/fnic_res.c c->ra_tov = c 103 drivers/scsi/fnic/fnic_res.c c->ra_tov)); c 105 drivers/scsi/fnic/fnic_res.c c->flogi_retries = c 106 drivers/scsi/fnic/fnic_res.c min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries); c 108 drivers/scsi/fnic/fnic_res.c c->flogi_timeout = c 111 drivers/scsi/fnic/fnic_res.c c->flogi_timeout)); c 113 drivers/scsi/fnic/fnic_res.c c->plogi_retries = c 114 drivers/scsi/fnic/fnic_res.c min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries); c 116 drivers/scsi/fnic/fnic_res.c c->plogi_timeout = c 119 drivers/scsi/fnic/fnic_res.c c->plogi_timeout)); c 121 drivers/scsi/fnic/fnic_res.c c->io_throttle_count = c 124 drivers/scsi/fnic/fnic_res.c c->io_throttle_count)); c 126 drivers/scsi/fnic/fnic_res.c c->link_down_timeout = c 128 drivers/scsi/fnic/fnic_res.c c->link_down_timeout); c 130 drivers/scsi/fnic/fnic_res.c c->port_down_timeout = c 132 drivers/scsi/fnic/fnic_res.c c->port_down_timeout); c 134 drivers/scsi/fnic/fnic_res.c c->port_down_io_retries = c 136 drivers/scsi/fnic/fnic_res.c c->port_down_io_retries); c 138 drivers/scsi/fnic/fnic_res.c c->luns_per_tgt = c 141 drivers/scsi/fnic/fnic_res.c c->luns_per_tgt)); c 143 drivers/scsi/fnic/fnic_res.c c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); c 144 drivers/scsi/fnic/fnic_res.c c->intr_timer_type = c->intr_timer_type; c 150 drivers/scsi/fnic/fnic_res.c c->wq_enet_desc_count, c->wq_copy_desc_count, c 151 drivers/scsi/fnic/fnic_res.c c->rq_desc_count); c 154 drivers/scsi/fnic/fnic_res.c c->node_wwn, c->port_wwn); c 157 drivers/scsi/fnic/fnic_res.c c->ed_tov, c->ra_tov); c 160 drivers/scsi/fnic/fnic_res.c c->maxdatafieldsize, c->intr_timer); c 163 drivers/scsi/fnic/fnic_res.c c->flags, c->luns_per_tgt); c 166 drivers/scsi/fnic/fnic_res.c c->flogi_retries, c->flogi_timeout); c 169 drivers/scsi/fnic/fnic_res.c c->plogi_retries, c->plogi_timeout); c 172 drivers/scsi/fnic/fnic_res.c c->io_throttle_count, c->link_down_timeout); c 175 drivers/scsi/fnic/fnic_res.c c->port_down_io_retries, c->port_down_timeout); c 3184 drivers/scsi/gdth.c int val = 0, c = *++cur_str; c 3186 drivers/scsi/gdth.c if (c == 'n' || c == 'N') c 3188 drivers/scsi/gdth.c else if (c == 'y' || c == 'Y') c 127 drivers/scsi/hisi_sas/hisi_sas.h #define HISI_SAS_RST_WORK_INIT(r, c) \ c 129 drivers/scsi/hisi_sas/hisi_sas.h .completion = &c, \ c 136 drivers/scsi/hisi_sas/hisi_sas.h DECLARE_COMPLETION_ONSTACK(c); \ c 137 drivers/scsi/hisi_sas/hisi_sas.h struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c) c 3199 drivers/scsi/hisi_sas/hisi_sas_main.c int c; c 3220 drivers/scsi/hisi_sas/hisi_sas_main.c for (c = 0; c < hisi_hba->queue_count; c++) { c 3221 drivers/scsi/hisi_sas/hisi_sas_main.c snprintf(name, 256, "%d", c); c 3223 drivers/scsi/hisi_sas/hisi_sas_main.c debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], c 3720 drivers/scsi/hisi_sas/hisi_sas_main.c int p, c, d; c 3754 drivers/scsi/hisi_sas/hisi_sas_main.c for (c = 0; c < hisi_hba->queue_count; c++) { c 3755 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_hba->debugfs_complete_hdr[c] = c 3758 drivers/scsi/hisi_sas/hisi_sas_main.c if (!hisi_hba->debugfs_complete_hdr[c]) c 263 drivers/scsi/hpsa.c static void cmd_free(struct ctlr_info *h, struct CommandList *c); c 265 drivers/scsi/hpsa.c static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); c 268 drivers/scsi/hpsa.c static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c 288 drivers/scsi/hpsa.c struct CommandList *c); c 290 drivers/scsi/hpsa.c struct CommandList *c); c 309 drivers/scsi/hpsa.c static inline void finish_cmd(struct CommandList *c); c 316 drivers/scsi/hpsa.c struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, c 327 drivers/scsi/hpsa.c static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, c 343 drivers/scsi/hpsa.c static inline bool hpsa_is_cmd_idle(struct CommandList *c) c 345 drivers/scsi/hpsa.c return c->scsi_cmd == SCSI_CMD_IDLE; c 371 drivers/scsi/hpsa.c struct CommandList *c) c 376 drivers/scsi/hpsa.c if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) c 377 drivers/scsi/hpsa.c sense_len = sizeof(c->err_info->SenseInfo); c 379 drivers/scsi/hpsa.c sense_len = c->err_info->SenseLen; c 381 drivers/scsi/hpsa.c decode_sense_data(c->err_info->SenseInfo, sense_len, c 423 drivers/scsi/hpsa.c static int check_for_busy(struct ctlr_info *h, struct CommandList *c) c 425 drivers/scsi/hpsa.c if (c->err_info->CommandStatus != CMD_TARGET_STATUS || c 426 drivers/scsi/hpsa.c (c->err_info->ScsiStatus != SAM_STAT_BUSY && c 427 drivers/scsi/hpsa.c c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) c 1037 drivers/scsi/hpsa.c static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, c 1041 drivers/scsi/hpsa.c c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); c 1044 drivers/scsi/hpsa.c c->Header.ReplyQueue = reply_queue; c 1049 drivers/scsi/hpsa.c struct CommandList *c, c 1052 drivers/scsi/hpsa.c struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; c 1065 drivers/scsi/hpsa.c c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | c 1070 drivers/scsi/hpsa.c struct CommandList *c, c 1074 drivers/scsi/hpsa.c &h->ioaccel2_cmd_pool[c->cmdindex]; c 1085 drivers/scsi/hpsa.c c->busaddr |= h->ioaccel2_blockFetchTable[0]; c 1089 drivers/scsi/hpsa.c struct CommandList *c, c 1092 drivers/scsi/hpsa.c struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; c 1105 drivers/scsi/hpsa.c c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); c 1122 drivers/scsi/hpsa.c struct CommandList *c) c 1124 drivers/scsi/hpsa.c if (!is_firmware_flash_cmd(c->Request.CDB)) c 1131 drivers/scsi/hpsa.c struct CommandList *c) c 1133 drivers/scsi/hpsa.c if (is_firmware_flash_cmd(c->Request.CDB) && c 1139 drivers/scsi/hpsa.c struct CommandList *c, int reply_queue) c 1141 drivers/scsi/hpsa.c dial_down_lockup_detection_during_fw_flash(h, c); c 1143 drivers/scsi/hpsa.c if (c->device) c 1144 drivers/scsi/hpsa.c atomic_inc(&c->device->commands_outstanding); c 1147 drivers/scsi/hpsa.c switch (c->cmd_type) { c 1149 drivers/scsi/hpsa.c set_ioaccel1_performant_mode(h, c, reply_queue); c 1150 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); c 1153 drivers/scsi/hpsa.c set_ioaccel2_performant_mode(h, c, reply_queue); c 1154 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); c 1157 drivers/scsi/hpsa.c set_ioaccel2_tmf_performant_mode(h, c, reply_queue); c 1158 drivers/scsi/hpsa.c writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); c 1161 drivers/scsi/hpsa.c set_performant_mode(h, c, reply_queue); c 1162 drivers/scsi/hpsa.c h->access.submit_command(h, c); c 1166 drivers/scsi/hpsa.c static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) c 1168 drivers/scsi/hpsa.c __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); c 1821 drivers/scsi/hpsa.c struct CommandList *c = h->cmd_pool + i; c 1822 drivers/scsi/hpsa.c int refcount = atomic_inc_return(&c->refcount); c 1824 drivers/scsi/hpsa.c if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, c 1829 drivers/scsi/hpsa.c if (!hpsa_is_cmd_idle(c)) c 1834 drivers/scsi/hpsa.c cmd_free(h, c); c 2248 drivers/scsi/hpsa.c struct io_accel2_cmd *cp, struct CommandList *c) c 2254 drivers/scsi/hpsa.c chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; c 2281 drivers/scsi/hpsa.c struct CommandList *c) c 2287 drivers/scsi/hpsa.c chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; c 2288 drivers/scsi/hpsa.c chain_block = h->cmd_sg_list[c->cmdindex]; c 2291 drivers/scsi/hpsa.c (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); c 2305 drivers/scsi/hpsa.c struct CommandList *c) c 2309 drivers/scsi/hpsa.c if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) c 2312 drivers/scsi/hpsa.c chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; c 2323 drivers/scsi/hpsa.c struct CommandList *c, c 2439 drivers/scsi/hpsa.c struct CommandList *c) c 2441 drivers/scsi/hpsa.c struct hpsa_scsi_dev_t *dev = c->device; c 2448 drivers/scsi/hpsa.c c->scsi_cmd = SCSI_CMD_IDLE; c 2459 drivers/scsi/hpsa.c struct CommandList *c) c 2461 drivers/scsi/hpsa.c hpsa_cmd_resolve_events(h, c); c 2462 drivers/scsi/hpsa.c cmd_tagged_free(h, c); c 2466 drivers/scsi/hpsa.c struct CommandList *c, struct scsi_cmnd *cmd) c 2468 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); c 2473 drivers/scsi/hpsa.c static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) c 2475 drivers/scsi/hpsa.c INIT_WORK(&c->work, hpsa_command_resubmit_worker); c 2476 drivers/scsi/hpsa.c queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); c 2480 drivers/scsi/hpsa.c struct CommandList *c, struct scsi_cmnd *cmd, c 2483 drivers/scsi/hpsa.c struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; c 2489 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); c 2508 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); c 2511 drivers/scsi/hpsa.c return hpsa_retry_cmd(h, c); c 2514 drivers/scsi/hpsa.c if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev)) c 2515 drivers/scsi/hpsa.c return hpsa_retry_cmd(h, c); c 2517 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); c 2627 drivers/scsi/hpsa.c struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; c 2630 drivers/scsi/hpsa.c cp->Request.CDBLen = le16_to_cpu(c->io_flags) & c 2632 drivers/scsi/hpsa.c cp->Header.tag = c->tag; c 2633 drivers/scsi/hpsa.c memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); c 2634 drivers/scsi/hpsa.c memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); c 2787 drivers/scsi/hpsa.c static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, c 2793 drivers/scsi/hpsa.c dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr), c 2794 drivers/scsi/hpsa.c le32_to_cpu(c->SG[i].Len), c 2830 drivers/scsi/hpsa.c struct CommandList *c, int reply_queue, unsigned long timeout_msecs) c 2834 drivers/scsi/hpsa.c c->waiting = &wait; c 2835 drivers/scsi/hpsa.c __enqueue_cmd_and_start_io(h, c, reply_queue); c 2849 drivers/scsi/hpsa.c static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, c 2853 drivers/scsi/hpsa.c c->err_info->CommandStatus = CMD_CTLR_LOCKUP; c 2856 drivers/scsi/hpsa.c return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); c 2873 drivers/scsi/hpsa.c struct CommandList *c, enum dma_data_direction data_direction, c 2880 drivers/scsi/hpsa.c memset(c->err_info, 0, sizeof(*c->err_info)); c 2881 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, c 2891 drivers/scsi/hpsa.c } while ((check_for_unit_attention(h, c) || c 2892 drivers/scsi/hpsa.c check_for_busy(h, c)) && c 2894 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, 1, data_direction); c 2901 drivers/scsi/hpsa.c struct CommandList *c) c 2903 drivers/scsi/hpsa.c const u8 *cdb = c->Request.CDB; c 2904 drivers/scsi/hpsa.c const u8 *lun = c->Header.LUN.LunAddrBytes; c 2989 drivers/scsi/hpsa.c struct CommandList *c; c 2992 drivers/scsi/hpsa.c c = cmd_alloc(h); c 2993 drivers/scsi/hpsa.c if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize, c 2998 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3002 drivers/scsi/hpsa.c ei = c->err_info; c 3004 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3008 drivers/scsi/hpsa.c cmd_free(h, c); c 3041 drivers/scsi/hpsa.c struct CommandList *c; c 3044 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3046 drivers/scsi/hpsa.c if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, c 3051 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3055 drivers/scsi/hpsa.c ei = c->err_info; c 3057 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3061 drivers/scsi/hpsa.c cmd_free(h, c); c 3069 drivers/scsi/hpsa.c struct CommandList *c; c 3072 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3073 drivers/scsi/hpsa.c c->device = dev; c 3076 drivers/scsi/hpsa.c (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); c 3077 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); c 3084 drivers/scsi/hpsa.c ei = c->err_info; c 3086 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3090 drivers/scsi/hpsa.c cmd_free(h, c); c 3094 drivers/scsi/hpsa.c static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, c 3100 drivers/scsi/hpsa.c struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; c 3103 drivers/scsi/hpsa.c if (hpsa_is_cmd_idle(c)) c 3106 drivers/scsi/hpsa.c switch (c->cmd_type) { c 3109 drivers/scsi/hpsa.c match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, c 3110 drivers/scsi/hpsa.c sizeof(c->Header.LUN.LunAddrBytes)); c 3115 drivers/scsi/hpsa.c if (c->phys_disk == dev) { c 3128 drivers/scsi/hpsa.c match = dev->phys_disk[i] == c->phys_disk; c 3146 drivers/scsi/hpsa.c c->cmd_type); c 3295 drivers/scsi/hpsa.c struct CommandList *c; c 3298 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3300 drivers/scsi/hpsa.c if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, c 3304 drivers/scsi/hpsa.c cmd_free(h, c); c 3307 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3311 drivers/scsi/hpsa.c ei = c->err_info; c 3313 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3317 drivers/scsi/hpsa.c cmd_free(h, c); c 3328 drivers/scsi/hpsa.c cmd_free(h, c); c 3337 drivers/scsi/hpsa.c struct CommandList *c; c 3340 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3342 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, c 3347 drivers/scsi/hpsa.c c->Request.CDB[2] = bmic_device_index & 0xff; c 3348 drivers/scsi/hpsa.c c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; c 3350 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3354 drivers/scsi/hpsa.c ei = c->err_info; c 3356 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3360 drivers/scsi/hpsa.c cmd_free(h, c); c 3368 drivers/scsi/hpsa.c struct CommandList *c; c 3371 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3373 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, c 3378 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3382 drivers/scsi/hpsa.c ei = c->err_info; c 3384 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3388 drivers/scsi/hpsa.c cmd_free(h, c); c 3397 drivers/scsi/hpsa.c struct CommandList *c; c 3400 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3401 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, c 3406 drivers/scsi/hpsa.c c->Request.CDB[2] = bmic_device_index & 0xff; c 3407 drivers/scsi/hpsa.c c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; c 3409 drivers/scsi/hpsa.c hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3411 drivers/scsi/hpsa.c ei = c->err_info; c 3413 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3417 drivers/scsi/hpsa.c cmd_free(h, c); c 3434 drivers/scsi/hpsa.c struct CommandList *c = NULL; c 3472 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3474 drivers/scsi/hpsa.c rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, c 3481 drivers/scsi/hpsa.c c->Request.CDB[5] = id_phys->box_index; c 3483 drivers/scsi/hpsa.c c->Request.CDB[5] = 0; c 3485 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3490 drivers/scsi/hpsa.c ei = c->err_info; c 3505 drivers/scsi/hpsa.c if (c) c 3506 drivers/scsi/hpsa.c cmd_free(h, c); c 3717 drivers/scsi/hpsa.c struct CommandList *c; c 3721 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3725 drivers/scsi/hpsa.c if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, c 3731 drivers/scsi/hpsa.c c->Request.CDB[1] = extended_response; c 3732 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 3736 drivers/scsi/hpsa.c ei = c->err_info; c 3739 drivers/scsi/hpsa.c hpsa_scsi_interpret_error(h, c); c 3756 drivers/scsi/hpsa.c cmd_free(h, c); c 3853 drivers/scsi/hpsa.c struct CommandList *c; c 3864 drivers/scsi/hpsa.c c = cmd_alloc(h); c 3866 drivers/scsi/hpsa.c (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); c 3867 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, c 3870 drivers/scsi/hpsa.c cmd_free(h, c); c 3873 drivers/scsi/hpsa.c sense = c->err_info->SenseInfo; c 3874 drivers/scsi/hpsa.c if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) c 3875 drivers/scsi/hpsa.c sense_len = sizeof(c->err_info->SenseInfo); c 3877 drivers/scsi/hpsa.c sense_len = c->err_info->SenseLen; c 3879 drivers/scsi/hpsa.c cmd_status = c->err_info->CommandStatus; c 3880 drivers/scsi/hpsa.c scsi_status = c->err_info->ScsiStatus; c 3881 drivers/scsi/hpsa.c cmd_free(h, c); c 4702 drivers/scsi/hpsa.c struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, c 4705 drivers/scsi/hpsa.c struct scsi_cmnd *cmd = c->scsi_cmd; c 4706 drivers/scsi/hpsa.c struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; c 4734 drivers/scsi/hpsa.c c->cmd_type = CMD_IOACCEL1; c 4737 drivers/scsi/hpsa.c c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + c 4738 drivers/scsi/hpsa.c (c->cmdindex * sizeof(*cp)); c 4739 drivers/scsi/hpsa.c BUG_ON(c->busaddr & 0x0000007F); c 4780 drivers/scsi/hpsa.c c->Header.SGList = use_sg; c 4790 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); c 4799 drivers/scsi/hpsa.c struct CommandList *c) c 4801 drivers/scsi/hpsa.c struct scsi_cmnd *cmd = c->scsi_cmd; c 4807 drivers/scsi/hpsa.c c->phys_disk = dev; c 4812 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, c 4820 drivers/scsi/hpsa.c struct CommandList *c, struct io_accel2_cmd *cp) c 4822 drivers/scsi/hpsa.c struct scsi_cmnd *cmd = c->scsi_cmd; c 4876 drivers/scsi/hpsa.c struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, c 4879 drivers/scsi/hpsa.c struct scsi_cmnd *cmd = c->scsi_cmd; c 4880 drivers/scsi/hpsa.c struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; c 4907 drivers/scsi/hpsa.c c->cmd_type = CMD_IOACCEL2; c 4909 drivers/scsi/hpsa.c c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + c 4910 drivers/scsi/hpsa.c (c->cmdindex * sizeof(*cp)); c 4911 drivers/scsi/hpsa.c BUG_ON(c->busaddr & 0x0000007F); c 4926 drivers/scsi/hpsa.c h->ioaccel2_cmd_sg_list[c->cmdindex]->address); c 4934 drivers/scsi/hpsa.c curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; c 4979 drivers/scsi/hpsa.c set_encrypt_ioaccel2(h, c, cp); c 4982 drivers/scsi/hpsa.c cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); c 4986 drivers/scsi/hpsa.c cp->err_ptr = cpu_to_le64(c->busaddr + c 4994 drivers/scsi/hpsa.c if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { c 5007 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); c 5015 drivers/scsi/hpsa.c struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, c 5018 drivers/scsi/hpsa.c if (!c->scsi_cmd->device) c 5021 drivers/scsi/hpsa.c if (!c->scsi_cmd->device->hostdata) c 5034 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, c 5038 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, c 5073 drivers/scsi/hpsa.c struct CommandList *c) c 5075 drivers/scsi/hpsa.c struct scsi_cmnd *cmd = c->scsi_cmd; c 5355 drivers/scsi/hpsa.c c->phys_disk = dev->phys_disk[map_index]; c 5356 drivers/scsi/hpsa.c if (!c->phys_disk) c 5405 drivers/scsi/hpsa.c return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, c 5416 drivers/scsi/hpsa.c struct CommandList *c, struct scsi_cmnd *cmd, c 5419 drivers/scsi/hpsa.c cmd->host_scribble = (unsigned char *) c; c 5420 drivers/scsi/hpsa.c c->cmd_type = CMD_SCSI; c 5421 drivers/scsi/hpsa.c c->scsi_cmd = cmd; c 5422 drivers/scsi/hpsa.c c->Header.ReplyQueue = 0; /* unused in simple mode */ c 5423 drivers/scsi/hpsa.c memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); c 5424 drivers/scsi/hpsa.c c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); c 5428 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 5429 drivers/scsi/hpsa.c BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c 5430 drivers/scsi/hpsa.c c->Request.CDBLen = cmd->cmd_len; c 5431 drivers/scsi/hpsa.c memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); c 5434 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 5438 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 5442 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 5451 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 5470 drivers/scsi/hpsa.c if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ c 5471 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); c 5476 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); c 5480 drivers/scsi/hpsa.c c->device = dev; c 5482 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); c 5488 drivers/scsi/hpsa.c struct CommandList *c) c 5493 drivers/scsi/hpsa.c memset(c, 0, offsetof(struct CommandList, refcount)); c 5494 drivers/scsi/hpsa.c c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); c 5495 drivers/scsi/hpsa.c cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); c 5496 drivers/scsi/hpsa.c c->err_info = h->errinfo_pool + index; c 5497 drivers/scsi/hpsa.c memset(c->err_info, 0, sizeof(*c->err_info)); c 5499 drivers/scsi/hpsa.c + index * sizeof(*c->err_info); c 5500 drivers/scsi/hpsa.c c->cmdindex = index; c 5501 drivers/scsi/hpsa.c c->busaddr = (u32) cmd_dma_handle; c 5502 drivers/scsi/hpsa.c c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); c 5503 drivers/scsi/hpsa.c c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); c 5504 drivers/scsi/hpsa.c c->h = h; c 5505 drivers/scsi/hpsa.c c->scsi_cmd = SCSI_CMD_IDLE; c 5513 drivers/scsi/hpsa.c struct CommandList *c = h->cmd_pool + i; c 5515 drivers/scsi/hpsa.c hpsa_cmd_init(h, i, c); c 5516 drivers/scsi/hpsa.c atomic_set(&c->refcount, 0); c 5521 drivers/scsi/hpsa.c struct CommandList *c) c 5523 drivers/scsi/hpsa.c dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); c 5525 drivers/scsi/hpsa.c BUG_ON(c->cmdindex != index); c 5527 drivers/scsi/hpsa.c memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); c 5528 drivers/scsi/hpsa.c memset(c->err_info, 0, sizeof(*c->err_info)); c 5529 drivers/scsi/hpsa.c c->busaddr = (u32) cmd_dma_handle; c 5533 drivers/scsi/hpsa.c struct CommandList *c, struct scsi_cmnd *cmd) c 5547 drivers/scsi/hpsa.c cmd->host_scribble = (unsigned char *) c; c 5550 drivers/scsi/hpsa.c hpsa_cmd_init(h, c->cmdindex, c); c 5551 drivers/scsi/hpsa.c c->cmd_type = CMD_SCSI; c 5552 drivers/scsi/hpsa.c c->scsi_cmd = cmd; c 5553 drivers/scsi/hpsa.c c->device = dev; c 5554 drivers/scsi/hpsa.c rc = hpsa_scsi_ioaccel_raid_map(h, c); c 5558 drivers/scsi/hpsa.c hpsa_cmd_init(h, c->cmdindex, c); c 5559 drivers/scsi/hpsa.c c->cmd_type = CMD_SCSI; c 5560 drivers/scsi/hpsa.c c->scsi_cmd = cmd; c 5561 drivers/scsi/hpsa.c c->device = dev; c 5562 drivers/scsi/hpsa.c rc = hpsa_scsi_ioaccel_direct_map(h, c); c 5573 drivers/scsi/hpsa.c struct CommandList *c = container_of(work, struct CommandList, work); c 5575 drivers/scsi/hpsa.c cmd = c->scsi_cmd; c 5579 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(c->h, c, cmd); c 5584 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(c->h, c, cmd); c 5587 drivers/scsi/hpsa.c if (c->cmd_type == CMD_IOACCEL2) { c 5588 drivers/scsi/hpsa.c struct ctlr_info *h = c->h; c 5589 drivers/scsi/hpsa.c struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; c 5594 drivers/scsi/hpsa.c rc = hpsa_ioaccel_submit(h, c, cmd); c 5604 drivers/scsi/hpsa.c return hpsa_cmd_free_and_done(h, c, cmd); c 5609 drivers/scsi/hpsa.c hpsa_cmd_partial_init(c->h, c->cmdindex, c); c 5610 drivers/scsi/hpsa.c if (hpsa_ciss_submit(c->h, c, cmd, dev)) { c 5629 drivers/scsi/hpsa.c struct CommandList *c; c 5659 drivers/scsi/hpsa.c c = cmd_tagged_alloc(h, cmd); c 5660 drivers/scsi/hpsa.c if (c == NULL) c 5676 drivers/scsi/hpsa.c rc = hpsa_ioaccel_submit(h, c, cmd); c 5680 drivers/scsi/hpsa.c hpsa_cmd_resolve_and_free(h, c); c 5684 drivers/scsi/hpsa.c return hpsa_ciss_submit(h, c, cmd, dev); c 5852 drivers/scsi/hpsa.c struct CommandList *c, unsigned char lunaddr[], c 5858 drivers/scsi/hpsa.c (void) fill_cmd(c, TEST_UNIT_READY, h, c 5860 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); c 5866 drivers/scsi/hpsa.c if (c->err_info->CommandStatus == CMD_SUCCESS) c 5874 drivers/scsi/hpsa.c if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c 5875 drivers/scsi/hpsa.c c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && c 5876 drivers/scsi/hpsa.c (c->err_info->SenseInfo[2] == NO_SENSE || c 5877 drivers/scsi/hpsa.c c->err_info->SenseInfo[2] == UNIT_ATTENTION)) c 5888 drivers/scsi/hpsa.c struct CommandList *c, c 5904 drivers/scsi/hpsa.c rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); c 5928 drivers/scsi/hpsa.c struct CommandList *c; c 5930 drivers/scsi/hpsa.c c = cmd_alloc(h); c 5946 drivers/scsi/hpsa.c rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); c 5956 drivers/scsi/hpsa.c cmd_free(h, c); c 6076 drivers/scsi/hpsa.c struct CommandList *c = h->cmd_pool + idx; c 6087 drivers/scsi/hpsa.c if (unlikely(!hpsa_is_cmd_idle(c))) { c 6104 drivers/scsi/hpsa.c atomic_inc(&c->refcount); c 6106 drivers/scsi/hpsa.c hpsa_cmd_partial_init(h, idx, c); c 6107 drivers/scsi/hpsa.c return c; c 6110 drivers/scsi/hpsa.c static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) c 6116 drivers/scsi/hpsa.c (void)atomic_dec(&c->refcount); c 6130 drivers/scsi/hpsa.c struct CommandList *c; c 6161 drivers/scsi/hpsa.c c = h->cmd_pool + i; c 6162 drivers/scsi/hpsa.c refcount = atomic_inc_return(&c->refcount); c 6164 drivers/scsi/hpsa.c cmd_free(h, c); /* already in use */ c 6172 drivers/scsi/hpsa.c hpsa_cmd_partial_init(h, i, c); c 6173 drivers/scsi/hpsa.c c->device = NULL; c 6174 drivers/scsi/hpsa.c return c; c 6183 drivers/scsi/hpsa.c static void cmd_free(struct ctlr_info *h, struct CommandList *c) c 6185 drivers/scsi/hpsa.c if (atomic_dec_and_test(&c->refcount)) { c 6188 drivers/scsi/hpsa.c i = c - h->cmd_pool; c 6343 drivers/scsi/hpsa.c struct CommandList *c; c 6373 drivers/scsi/hpsa.c c = cmd_alloc(h); c 6376 drivers/scsi/hpsa.c c->cmd_type = CMD_IOCTL_PEND; c 6377 drivers/scsi/hpsa.c c->scsi_cmd = SCSI_CMD_BUSY; c 6379 drivers/scsi/hpsa.c c->Header.ReplyQueue = 0; /* unused in simple mode */ c 6381 drivers/scsi/hpsa.c c->Header.SGList = 1; c 6382 drivers/scsi/hpsa.c c->Header.SGTotal = cpu_to_le16(1); c 6384 drivers/scsi/hpsa.c c->Header.SGList = 0; c 6385 drivers/scsi/hpsa.c c->Header.SGTotal = cpu_to_le16(0); c 6387 drivers/scsi/hpsa.c memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); c 6390 drivers/scsi/hpsa.c memcpy(&c->Request, &iocommand.Request, c 6391 drivers/scsi/hpsa.c sizeof(c->Request)); c 6398 drivers/scsi/hpsa.c c->SG[0].Addr = cpu_to_le64(0); c 6399 drivers/scsi/hpsa.c c->SG[0].Len = cpu_to_le32(0); c 6403 drivers/scsi/hpsa.c c->SG[0].Addr = cpu_to_le64(temp64); c 6404 drivers/scsi/hpsa.c c->SG[0].Len = cpu_to_le32(iocommand.buf_size); c 6405 drivers/scsi/hpsa.c c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ c 6407 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, c 6410 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); c 6411 drivers/scsi/hpsa.c check_ioctl_unit_attention(h, c); c 6418 drivers/scsi/hpsa.c memcpy(&iocommand.error_info, c->err_info, c 6433 drivers/scsi/hpsa.c cmd_free(h, c); c 6442 drivers/scsi/hpsa.c struct CommandList *c; c 6506 drivers/scsi/hpsa.c c = cmd_alloc(h); c 6508 drivers/scsi/hpsa.c c->cmd_type = CMD_IOCTL_PEND; c 6509 drivers/scsi/hpsa.c c->scsi_cmd = SCSI_CMD_BUSY; c 6510 drivers/scsi/hpsa.c c->Header.ReplyQueue = 0; c 6511 drivers/scsi/hpsa.c c->Header.SGList = (u8) sg_used; c 6512 drivers/scsi/hpsa.c c->Header.SGTotal = cpu_to_le16(sg_used); c 6513 drivers/scsi/hpsa.c memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); c 6514 drivers/scsi/hpsa.c memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); c 6522 drivers/scsi/hpsa.c c->SG[i].Addr = cpu_to_le64(0); c 6523 drivers/scsi/hpsa.c c->SG[i].Len = cpu_to_le32(0); c 6524 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, i, c 6529 drivers/scsi/hpsa.c c->SG[i].Addr = cpu_to_le64(temp64); c 6530 drivers/scsi/hpsa.c c->SG[i].Len = cpu_to_le32(buff_size[i]); c 6531 drivers/scsi/hpsa.c c->SG[i].Ext = cpu_to_le32(0); c 6533 drivers/scsi/hpsa.c c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); c 6535 drivers/scsi/hpsa.c status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, c 6538 drivers/scsi/hpsa.c hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL); c 6539 drivers/scsi/hpsa.c check_ioctl_unit_attention(h, c); c 6546 drivers/scsi/hpsa.c memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); c 6566 drivers/scsi/hpsa.c cmd_free(h, c); c 6581 drivers/scsi/hpsa.c struct CommandList *c) c 6583 drivers/scsi/hpsa.c if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c 6584 drivers/scsi/hpsa.c c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) c 6585 drivers/scsi/hpsa.c (void) check_for_unit_attention(h, c); c 6629 drivers/scsi/hpsa.c struct CommandList *c; c 6631 drivers/scsi/hpsa.c c = cmd_alloc(h); c 6634 drivers/scsi/hpsa.c (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, c 6636 drivers/scsi/hpsa.c c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ c 6637 drivers/scsi/hpsa.c c->waiting = NULL; c 6638 drivers/scsi/hpsa.c enqueue_cmd_and_start_io(h, c); c 6646 drivers/scsi/hpsa.c static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c 6652 drivers/scsi/hpsa.c c->cmd_type = CMD_IOCTL_PEND; c 6653 drivers/scsi/hpsa.c c->scsi_cmd = SCSI_CMD_BUSY; c 6654 drivers/scsi/hpsa.c c->Header.ReplyQueue = 0; c 6656 drivers/scsi/hpsa.c c->Header.SGList = 1; c 6657 drivers/scsi/hpsa.c c->Header.SGTotal = cpu_to_le16(1); c 6659 drivers/scsi/hpsa.c c->Header.SGList = 0; c 6660 drivers/scsi/hpsa.c c->Header.SGTotal = cpu_to_le16(0); c 6662 drivers/scsi/hpsa.c memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); c 6669 drivers/scsi/hpsa.c c->Request.CDB[1] = 0x01; c 6670 drivers/scsi/hpsa.c c->Request.CDB[2] = (page_code & 0xff); c 6672 drivers/scsi/hpsa.c c->Request.CDBLen = 6; c 6673 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6675 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6676 drivers/scsi/hpsa.c c->Request.CDB[0] = HPSA_INQUIRY; c 6677 drivers/scsi/hpsa.c c->Request.CDB[4] = size & 0xFF; c 6680 drivers/scsi/hpsa.c c->Request.CDBLen = 6; c 6681 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6683 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6684 drivers/scsi/hpsa.c c->Request.CDB[0] = cmd; c 6685 drivers/scsi/hpsa.c c->Request.CDB[1] = 1; c 6686 drivers/scsi/hpsa.c c->Request.CDB[2] = 1; c 6687 drivers/scsi/hpsa.c c->Request.CDB[3] = (size >> 8) & 0xFF; c 6688 drivers/scsi/hpsa.c c->Request.CDB[4] = size & 0xFF; c 6695 drivers/scsi/hpsa.c c->Request.CDBLen = 12; c 6696 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6698 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6699 drivers/scsi/hpsa.c c->Request.CDB[0] = cmd; c 6700 drivers/scsi/hpsa.c c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c 6701 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 16) & 0xFF; c 6702 drivers/scsi/hpsa.c c->Request.CDB[8] = (size >> 8) & 0xFF; c 6703 drivers/scsi/hpsa.c c->Request.CDB[9] = size & 0xFF; c 6706 drivers/scsi/hpsa.c c->Request.CDBLen = 16; c 6707 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6709 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6711 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_READ; c 6712 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; c 6715 drivers/scsi/hpsa.c c->Request.CDBLen = 16; c 6716 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6719 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6720 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_WRITE; c 6721 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; c 6724 drivers/scsi/hpsa.c c->Request.CDBLen = 12; c 6725 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6728 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6729 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_WRITE; c 6730 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_CACHE_FLUSH; c 6731 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 8) & 0xFF; c 6732 drivers/scsi/hpsa.c c->Request.CDB[8] = size & 0xFF; c 6735 drivers/scsi/hpsa.c c->Request.CDBLen = 6; c 6736 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6738 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6741 drivers/scsi/hpsa.c c->Request.CDBLen = 12; c 6742 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6744 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6745 drivers/scsi/hpsa.c c->Request.CDB[0] = HPSA_CISS_READ; c 6746 drivers/scsi/hpsa.c c->Request.CDB[1] = cmd; c 6747 drivers/scsi/hpsa.c c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c 6748 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 16) & 0xFF; c 6749 drivers/scsi/hpsa.c c->Request.CDB[8] = (size >> 8) & 0xFF; c 6750 drivers/scsi/hpsa.c c->Request.CDB[9] = size & 0xFF; c 6753 drivers/scsi/hpsa.c c->Request.CDBLen = 10; c 6754 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6756 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6757 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_READ; c 6758 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; c 6759 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 16) & 0xFF; c 6760 drivers/scsi/hpsa.c c->Request.CDB[8] = (size >> 8) & 0xFF; c 6763 drivers/scsi/hpsa.c c->Request.CDBLen = 10; c 6764 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6766 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6767 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_READ; c 6768 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; c 6769 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 16) & 0xFF; c 6770 drivers/scsi/hpsa.c c->Request.CDB[8] = (size >> 8) & 0XFF; c 6773 drivers/scsi/hpsa.c c->Request.CDBLen = 10; c 6774 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6776 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6777 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_READ; c 6778 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; c 6779 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 16) & 0xFF; c 6780 drivers/scsi/hpsa.c c->Request.CDB[8] = (size >> 8) & 0XFF; c 6783 drivers/scsi/hpsa.c c->Request.CDBLen = 10; c 6784 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6786 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6787 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_READ; c 6788 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; c 6789 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 16) & 0xFF; c 6790 drivers/scsi/hpsa.c c->Request.CDB[8] = (size >> 8) & 0XFF; c 6793 drivers/scsi/hpsa.c c->Request.CDBLen = 10; c 6794 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6796 drivers/scsi/hpsa.c c->Request.Timeout = 0; c 6797 drivers/scsi/hpsa.c c->Request.CDB[0] = BMIC_READ; c 6798 drivers/scsi/hpsa.c c->Request.CDB[1] = 0; c 6799 drivers/scsi/hpsa.c c->Request.CDB[2] = 0; c 6800 drivers/scsi/hpsa.c c->Request.CDB[3] = 0; c 6801 drivers/scsi/hpsa.c c->Request.CDB[4] = 0; c 6802 drivers/scsi/hpsa.c c->Request.CDB[5] = 0; c 6803 drivers/scsi/hpsa.c c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; c 6804 drivers/scsi/hpsa.c c->Request.CDB[7] = (size >> 16) & 0xFF; c 6805 drivers/scsi/hpsa.c c->Request.CDB[8] = (size >> 8) & 0XFF; c 6806 drivers/scsi/hpsa.c c->Request.CDB[9] = 0; c 6816 drivers/scsi/hpsa.c c->Request.CDBLen = 16; c 6817 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6819 drivers/scsi/hpsa.c c->Request.Timeout = 0; /* Don't time out */ c 6820 drivers/scsi/hpsa.c memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c 6821 drivers/scsi/hpsa.c c->Request.CDB[0] = HPSA_RESET; c 6822 drivers/scsi/hpsa.c c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; c 6824 drivers/scsi/hpsa.c c->Request.CDB[4] = 0x00; c 6825 drivers/scsi/hpsa.c c->Request.CDB[5] = 0x00; c 6826 drivers/scsi/hpsa.c c->Request.CDB[6] = 0x00; c 6827 drivers/scsi/hpsa.c c->Request.CDB[7] = 0x00; c 6830 drivers/scsi/hpsa.c c->Request.CDBLen = 16; c 6831 drivers/scsi/hpsa.c c->Request.type_attr_dir = c 6833 drivers/scsi/hpsa.c c->Request.Timeout = 0; /* Don't time out */ c 6834 drivers/scsi/hpsa.c memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c 6835 drivers/scsi/hpsa.c c->Request.CDB[0] = cmd; c 6836 drivers/scsi/hpsa.c c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; c 6839 drivers/scsi/hpsa.c c->Request.CDB[4] = 0x00; c 6840 drivers/scsi/hpsa.c c->Request.CDB[5] = 0x00; c 6841 drivers/scsi/hpsa.c c->Request.CDB[6] = 0x00; c 6842 drivers/scsi/hpsa.c c->Request.CDB[7] = 0x00; c 6854 drivers/scsi/hpsa.c switch (GET_DIR(c->Request.type_attr_dir)) { c 6867 drivers/scsi/hpsa.c if (hpsa_map_one(h->pdev, c, buff, size, dir)) c 6911 drivers/scsi/hpsa.c static inline void finish_cmd(struct CommandList *c) c 6913 drivers/scsi/hpsa.c dial_up_lockup_detection_on_fw_flash_complete(c->h, c); c 6914 drivers/scsi/hpsa.c if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI c 6915 drivers/scsi/hpsa.c || c->cmd_type == CMD_IOACCEL2)) c 6916 drivers/scsi/hpsa.c complete_scsi_command(c); c 6917 drivers/scsi/hpsa.c else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) c 6918 drivers/scsi/hpsa.c complete(c->waiting); c 6926 drivers/scsi/hpsa.c struct CommandList *c; c 6930 drivers/scsi/hpsa.c c = h->cmd_pool + tag_index; c 6931 drivers/scsi/hpsa.c finish_cmd(c); c 8189 drivers/scsi/hpsa.c struct CommandList *c; c 8194 drivers/scsi/hpsa.c c = h->cmd_pool + i; c 8195 drivers/scsi/hpsa.c refcount = atomic_inc_return(&c->refcount); c 8197 drivers/scsi/hpsa.c c->err_info->CommandStatus = CMD_CTLR_LOCKUP; c 8198 drivers/scsi/hpsa.c finish_cmd(c); c 8202 drivers/scsi/hpsa.c cmd_free(h, c); c 8877 drivers/scsi/hpsa.c struct CommandList *c; c 8886 drivers/scsi/hpsa.c c = cmd_alloc(h); c 8888 drivers/scsi/hpsa.c if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, c 8892 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, c 8896 drivers/scsi/hpsa.c if (c->err_info->CommandStatus != 0) c 8900 drivers/scsi/hpsa.c cmd_free(h, c); c 8910 drivers/scsi/hpsa.c struct CommandList *c; c 8921 drivers/scsi/hpsa.c c = cmd_alloc(h); c 8924 drivers/scsi/hpsa.c if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, c 8928 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 8930 drivers/scsi/hpsa.c if ((rc != 0) || (c->err_info->CommandStatus != 0)) c 8936 drivers/scsi/hpsa.c if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, c 8940 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, c 8942 drivers/scsi/hpsa.c if ((rc != 0) || (c->err_info->CommandStatus != 0)) c 8946 drivers/scsi/hpsa.c if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, c 8950 drivers/scsi/hpsa.c rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, c 8952 drivers/scsi/hpsa.c if ((rc != 0) || (c->err_info->CommandStatus != 0)) c 8962 drivers/scsi/hpsa.c cmd_free(h, c); c 9488 drivers/scsi/hpsa.c static int is_accelerated_cmd(struct CommandList *c) c 9490 drivers/scsi/hpsa.c return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; c 9495 drivers/scsi/hpsa.c struct CommandList *c = NULL; c 9502 drivers/scsi/hpsa.c c = h->cmd_pool + i; c 9503 drivers/scsi/hpsa.c refcount = atomic_inc_return(&c->refcount); c 9505 drivers/scsi/hpsa.c accel_cmds_out += is_accelerated_cmd(c); c 9506 drivers/scsi/hpsa.c cmd_free(h, c); c 31 drivers/scsi/hpsa.h struct CommandList *c); c 420 drivers/scsi/hpsa.h struct CommandList *c) c 422 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); c 427 drivers/scsi/hpsa.h struct CommandList *c) c 429 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); c 433 drivers/scsi/hpsa.h struct CommandList *c) c 435 drivers/scsi/hpsa.h writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); c 3304 drivers/scsi/lpfc/lpfc_ct.c char c; c 3323 drivers/scsi/lpfc/lpfc_ct.c c = 'N'; c 3326 drivers/scsi/lpfc/lpfc_ct.c c = 'A'; c 3329 drivers/scsi/lpfc/lpfc_ct.c c = 'B'; c 3332 drivers/scsi/lpfc/lpfc_ct.c c = 'X'; c 3335 drivers/scsi/lpfc/lpfc_ct.c c = 0; c 3354 drivers/scsi/lpfc/lpfc_ct.c if (c == 0) { c 3364 drivers/scsi/lpfc/lpfc_ct.c b1, b2, b3, c, c 3368 drivers/scsi/lpfc/lpfc_ct.c b1, b2, b3, c, b4); c 3376 drivers/scsi/lpfc/lpfc_ct.c c = (rev & 0x0000ff00) >> 8; c 3379 drivers/scsi/lpfc/lpfc_ct.c sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); c 122 drivers/scsi/lpfc/lpfc_sli.c #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) c 1419 drivers/scsi/megaraid.c u8 c; c 1551 drivers/scsi/megaraid.c c = *(unsigned char *) sg_virt(&sgl[0]); c 1554 drivers/scsi/megaraid.c c = 0; c 1558 drivers/scsi/megaraid.c ((c & 0x1F ) == TYPE_DISK)) { c 2188 drivers/scsi/megaraid/megaraid_mbox.c uint8_t c; c 2262 drivers/scsi/megaraid/megaraid_mbox.c c = *(unsigned char *) sg_virt(&sgl[0]); c 2267 drivers/scsi/megaraid/megaraid_mbox.c c = 0; c 2270 drivers/scsi/megaraid/megaraid_mbox.c if ((c & 0x1F ) == TYPE_DISK) { c 3362 drivers/scsi/megaraid/megaraid_mbox.c uint8_t c; c 3377 drivers/scsi/megaraid/megaraid_mbox.c for (c = 0; c < adapter->max_channel; c++) c 3379 drivers/scsi/megaraid/megaraid_mbox.c adapter->device_ids[c][t] = (c << 8) | t; c 300 drivers/scsi/mesh.c #define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) c 953 drivers/scsi/myrs.h struct myrs_hba *c, void __iomem *base); c 657 drivers/scsi/ncr53c8xx.c int i, val, c; c 665 drivers/scsi/ncr53c8xx.c c = *++pv; c 667 drivers/scsi/ncr53c8xx.c if (c == 'n') c 669 drivers/scsi/ncr53c8xx.c else if (c == 'y') c 789 drivers/scsi/ncr53c8xx.c int c, h, t, u, v; c 796 drivers/scsi/ncr53c8xx.c while ((c = *p++) != 0) { c 798 drivers/scsi/ncr53c8xx.c switch(c) { c 341 drivers/scsi/pm8001/pm8001_ctl.c #define AAP1_MEMMAP(r, c) \ c 343 drivers/scsi/pm8001/pm8001_ctl.c + (c))) c 378 drivers/scsi/pm8001/pm8001_ctl.c #define IB_MEMMAP(c) \ c 381 drivers/scsi/pm8001/pm8001_ctl.c pm8001_ha->evtlog_ib_offset + (c))) c 411 drivers/scsi/pm8001/pm8001_ctl.c #define OB_MEMMAP(c) \ c 414 drivers/scsi/pm8001/pm8001_ctl.c pm8001_ha->evtlog_ob_offset + (c))) c 568 drivers/scsi/qla1280.c #define qla1280_dump_buffer(a, b, c) do{}while(0) c 3929 drivers/scsi/qla1280.c u8 c; c 3937 drivers/scsi/qla1280.c c = *b++; c 3939 drivers/scsi/qla1280.c printk("0x%02x", c); c 502 drivers/scsi/qla2xxx/qla_dbg.c struct qla2xxx_offld_chain *c = ptr; c 507 drivers/scsi/qla2xxx/qla_dbg.c *last_chain = &c->type; c 509 drivers/scsi/qla2xxx/qla_dbg.c c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN); c 510 drivers/scsi/qla2xxx/qla_dbg.c c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + c 512 drivers/scsi/qla2xxx/qla_dbg.c c->size = cpu_to_be32(ha->exlogin_size); c 513 drivers/scsi/qla2xxx/qla_dbg.c c->addr = cpu_to_be64(ha->exlogin_buf_dma); c 518 drivers/scsi/qla2xxx/qla_dbg.c return (char *)ptr + cpu_to_be32(c->size); c 524 drivers/scsi/qla2xxx/qla_dbg.c struct qla2xxx_offld_chain *c = ptr; c 529 drivers/scsi/qla2xxx/qla_dbg.c *last_chain = &c->type; c 531 drivers/scsi/qla2xxx/qla_dbg.c c->type = cpu_to_be32(DUMP_CHAIN_EXCHG); c 532 drivers/scsi/qla2xxx/qla_dbg.c c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) + c 534 drivers/scsi/qla2xxx/qla_dbg.c c->size = cpu_to_be32(ha->exchoffld_size); c 535 drivers/scsi/qla2xxx/qla_dbg.c c->addr = cpu_to_be64(ha->exchoffld_buf_dma); c 540 drivers/scsi/qla2xxx/qla_dbg.c return (char *)ptr + cpu_to_be32(c->size); c 3119 drivers/scsi/qla2xxx/qla_gs.c struct srb_iocb *c = &sp->u.iocb_cmd; c 3123 drivers/scsi/qla2xxx/qla_gs.c qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); c 6245 drivers/scsi/qla2xxx/qla_mbx.c struct srb_iocb *c; c 6257 drivers/scsi/qla2xxx/qla_mbx.c c = &sp->u.iocb_cmd; c 6258 drivers/scsi/qla2xxx/qla_mbx.c c->timeout = qla2x00_async_iocb_timeout; c 6259 drivers/scsi/qla2xxx/qla_mbx.c init_completion(&c->u.mbx.comp); c 6278 drivers/scsi/qla2xxx/qla_mbx.c wait_for_completion(&c->u.mbx.comp); c 6281 drivers/scsi/qla2xxx/qla_mbx.c rval = c->u.mbx.rc; c 6518 drivers/scsi/qla2xxx/qla_mbx.c int rval, c; c 6525 drivers/scsi/qla2xxx/qla_mbx.c offset = c = 0; c 6544 drivers/scsi/qla2xxx/qla_mbx.c if (buf && (c < count)) { c 6547 drivers/scsi/qla2xxx/qla_mbx.c if ((count - c) >= SFP_BLOCK_SIZE) c 6550 drivers/scsi/qla2xxx/qla_mbx.c sz = count - c; c 6554 drivers/scsi/qla2xxx/qla_mbx.c c += sz; c 621 drivers/scsi/qla2xxx/qla_target.c char *c = NULL; c 627 drivers/scsi/qla2xxx/qla_target.c c = "PLOGI"; c 632 drivers/scsi/qla2xxx/qla_target.c c = "PRLI"; c 636 drivers/scsi/qla2xxx/qla_target.c c = "LOGO"; c 655 drivers/scsi/qla2xxx/qla_target.c sp->name, fcport->port_name, sp->handle, c); c 3833 drivers/scsi/qla2xxx/qla_target.c struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; c 3835 drivers/scsi/qla2xxx/qla_target.c term = !(c->flags & c 47 drivers/scsi/qla2xxx/tcm_qla2xxx.c char c; c 55 drivers/scsi/qla2xxx/tcm_qla2xxx.c c = *cp; c 56 drivers/scsi/qla2xxx/tcm_qla2xxx.c if (c == '\n' && cp[1] == '\0') c 60 drivers/scsi/qla2xxx/tcm_qla2xxx.c if (c == ':') c 65 drivers/scsi/qla2xxx/tcm_qla2xxx.c if (c == '\0') { c 72 drivers/scsi/qla2xxx/tcm_qla2xxx.c if (isdigit(c)) c 73 drivers/scsi/qla2xxx/tcm_qla2xxx.c nibble = c - '0'; c 74 drivers/scsi/qla2xxx/tcm_qla2xxx.c else if (isxdigit(c) && (islower(c) || !strict)) c 75 drivers/scsi/qla2xxx/tcm_qla2xxx.c nibble = tolower(c) - 'a' + 10; c 16 drivers/scsi/qla4xxx/ql4_dbg.c uint8_t *c = b; c 22 drivers/scsi/qla4xxx/ql4_dbg.c for (cnt = 0; cnt < size; c++) { c 23 drivers/scsi/qla4xxx/ql4_dbg.c printk("%02x", *c); c 2802 drivers/scsi/scsi_debug.c unsigned char c = buf[i+j]; c 2804 drivers/scsi/scsi_debug.c if (c >= 0x20 && c < 0x7e) c 1822 drivers/scsi/scsi_scan.c static void do_scan_async(void *_data, async_cookie_t c) c 63 drivers/scsi/snic/snic_ctl.c char c; c 70 drivers/scsi/snic/snic_ctl.c while ((c = *p++)) { c 71 drivers/scsi/snic/snic_ctl.c if (c == '.') { c 76 drivers/scsi/snic/snic_ctl.c if (i > 3 || !isdigit(c)) c 79 drivers/scsi/snic/snic_ctl.c v[i] = v[i] * 10 + (c - '0'); c 504 drivers/scsi/snic/snic_fwint.h u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1; c 507 drivers/scsi/snic/snic_fwint.h *c |= 0x80; c 509 drivers/scsi/snic/snic_fwint.h *c &= ~0x80; c 515 drivers/scsi/snic/snic_fwint.h u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1; c 517 drivers/scsi/snic/snic_fwint.h *color = *c >> 7; c 35 drivers/scsi/snic/snic_res.c struct vnic_snic_config *c = &snic->config; c 42 drivers/scsi/snic/snic_res.c sizeof(c->m), \ c 43 drivers/scsi/snic/snic_res.c &c->m); \ c 63 drivers/scsi/snic/snic_res.c c->wq_enet_desc_count = min_t(u32, c 67 drivers/scsi/snic/snic_res.c c->wq_enet_desc_count)); c 69 drivers/scsi/snic/snic_res.c c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16); c 71 drivers/scsi/snic/snic_res.c c->maxdatafieldsize = min_t(u32, c 75 drivers/scsi/snic/snic_res.c c->maxdatafieldsize)); c 77 drivers/scsi/snic/snic_res.c c->io_throttle_count = min_t(u32, c 81 drivers/scsi/snic/snic_res.c c->io_throttle_count)); c 83 drivers/scsi/snic/snic_res.c c->port_down_timeout = min_t(u32, c 85 drivers/scsi/snic/snic_res.c c->port_down_timeout); c 87 drivers/scsi/snic/snic_res.c c->port_down_io_retries = min_t(u32, c 89 drivers/scsi/snic/snic_res.c c->port_down_io_retries); c 91 drivers/scsi/snic/snic_res.c c->luns_per_tgt = min_t(u32, c 95 drivers/scsi/snic/snic_res.c c->luns_per_tgt)); c 97 drivers/scsi/snic/snic_res.c c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer); c 99 drivers/scsi/snic/snic_res.c SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count); c 101 drivers/scsi/snic/snic_res.c c->maxdatafieldsize, c 102 drivers/scsi/snic/snic_res.c c->intr_timer); c 105 drivers/scsi/snic/snic_res.c c->flags, c 106 drivers/scsi/snic/snic_res.c c->luns_per_tgt); c 108 drivers/scsi/snic/snic_res.c SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count); c 110 drivers/scsi/snic/snic_res.c c->port_down_timeout, c 111 drivers/scsi/snic/snic_res.c c->port_down_io_retries); c 113 drivers/scsi/snic/snic_res.c SNIC_INFO("vNIC back end type = %d\n", c->xpt_type); c 114 drivers/scsi/snic/snic_res.c SNIC_INFO("vNIC hid = %d\n", c->hid); c 980 drivers/scsi/sym53c8xx_2/sym_glue.c int cnt, c; c 982 drivers/scsi/sym53c8xx_2/sym_glue.c for (cnt = len; cnt > 0 && (c = *ptr++) && isspace(c); cnt--); c 305 drivers/scsi/ufs/ufshci.h #define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) c 24 drivers/sh/intc/internals.h #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) c 28 drivers/sh/intc/internals.h #define INTC_REG(d, x, c) (d->reg[(x)]) c 527 drivers/soc/fsl/qbman/bman.c const struct bm_portal_config *c) c 538 drivers/soc/fsl/qbman/bman.c p->addr.ce = c->addr_virt_ce; c 539 drivers/soc/fsl/qbman/bman.c p->addr.ce_be = c->addr_virt_ce; c 540 drivers/soc/fsl/qbman/bman.c p->addr.ci = c->addr_virt_ci; c 542 drivers/soc/fsl/qbman/bman.c dev_err(c->dev, "RCR initialisation failed\n"); c 546 drivers/soc/fsl/qbman/bman.c dev_err(c->dev, "MC initialisation failed\n"); c 560 drivers/soc/fsl/qbman/bman.c snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); c 561 drivers/soc/fsl/qbman/bman.c if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { c 562 drivers/soc/fsl/qbman/bman.c dev_err(c->dev, "request_irq() failed\n"); c 566 drivers/soc/fsl/qbman/bman.c if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) c 572 drivers/soc/fsl/qbman/bman.c dev_err(c->dev, "RCR unclean\n"); c 576 drivers/soc/fsl/qbman/bman.c portal->config = c; c 585 drivers/soc/fsl/qbman/bman.c free_irq(c->irq, portal); c 594 drivers/soc/fsl/qbman/bman.c struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c) c 599 drivers/soc/fsl/qbman/bman.c portal = &per_cpu(bman_affine_portal, c->cpu); c 600 drivers/soc/fsl/qbman/bman.c err = bman_create_portal(portal, c); c 605 drivers/soc/fsl/qbman/bman.c cpumask_set_cpu(c->cpu, &affine_mask); c 1229 drivers/soc/fsl/qbman/qman.c const struct qm_portal_config *c, c 1249 drivers/soc/fsl/qbman/qman.c p->addr.ce = c->addr_virt_ce; c 1250 drivers/soc/fsl/qbman/qman.c p->addr.ce_be = c->addr_virt_ce; c 1251 drivers/soc/fsl/qbman/qman.c p->addr.ci = c->addr_virt_ci; c 1258 drivers/soc/fsl/qbman/qman.c dev_err(c->dev, "EQCR initialisation failed\n"); c 1261 drivers/soc/fsl/qbman/qman.c if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, c 1263 drivers/soc/fsl/qbman/qman.c dev_err(c->dev, "DQRR initialisation failed\n"); c 1267 drivers/soc/fsl/qbman/qman.c dev_err(c->dev, "MR initialisation failed\n"); c 1271 drivers/soc/fsl/qbman/qman.c dev_err(c->dev, "MC initialisation failed\n"); c 1300 drivers/soc/fsl/qbman/qman.c snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); c 1302 drivers/soc/fsl/qbman/qman.c if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { c 1303 drivers/soc/fsl/qbman/qman.c dev_err(c->dev, "request_irq() failed\n"); c 1307 drivers/soc/fsl/qbman/qman.c if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) c 1315 drivers/soc/fsl/qbman/qman.c dev_err(c->dev, "EQCR unclean\n"); c 1321 drivers/soc/fsl/qbman/qman.c dev_dbg(c->dev, "DQRR unclean\n"); c 1328 drivers/soc/fsl/qbman/qman.c dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", c 1333 drivers/soc/fsl/qbman/qman.c portal->config = c; c 1345 drivers/soc/fsl/qbman/qman.c free_irq(c->irq, portal); c 1360 drivers/soc/fsl/qbman/qman.c struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, c 1366 drivers/soc/fsl/qbman/qman.c portal = &per_cpu(qman_affine_portal, c->cpu); c 1367 drivers/soc/fsl/qbman/qman.c err = qman_create_portal(portal, c, cgrs); c 1372 drivers/soc/fsl/qbman/qman.c cpumask_set_cpu(c->cpu, &affine_mask); c 1373 drivers/soc/fsl/qbman/qman.c affine_channels[c->cpu] = c->channel; c 1374 drivers/soc/fsl/qbman/qman.c affine_portals[c->cpu] = portal; c 1460 drivers/soc/fsl/qbman/qman.c struct qman_cgrs rr, c; c 1477 drivers/soc/fsl/qbman/qman.c qman_cgrs_xor(&c, &rr, &p->cgrs[1]); c 1482 drivers/soc/fsl/qbman/qman.c if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) c 108 drivers/soc/fsl/qbman/qman_priv.h static inline void qman_cgrs_init(struct qman_cgrs *c) c 110 drivers/soc/fsl/qbman/qman_priv.h memset(c, 0, sizeof(*c)); c 113 drivers/soc/fsl/qbman/qman_priv.h static inline void qman_cgrs_fill(struct qman_cgrs *c) c 115 drivers/soc/fsl/qbman/qman_priv.h memset(c, 0xff, sizeof(*c)); c 118 drivers/soc/fsl/qbman/qman_priv.h static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr) c 120 drivers/soc/fsl/qbman/qman_priv.h return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr); c 419 drivers/soc/mediatek/mtk-scpsys.c struct clk *c = clk[data->clk_id[j]]; c 421 drivers/soc/mediatek/mtk-scpsys.c if (IS_ERR(c)) { c 424 drivers/soc/mediatek/mtk-scpsys.c return ERR_CAST(c); c 427 drivers/soc/mediatek/mtk-scpsys.c scpd->clk[j] = c; c 45 drivers/soc/qcom/trace-rpmh.h const struct tcs_cmd *c), c 47 drivers/soc/qcom/trace-rpmh.h TP_ARGS(d, m, n, h, c), c 64 drivers/soc/qcom/trace-rpmh.h __entry->addr = c->addr; c 65 drivers/soc/qcom/trace-rpmh.h __entry->data = c->data; c 66 drivers/soc/qcom/trace-rpmh.h __entry->wait = c->wait; c 927 drivers/soundwire/cadence_master.c int c; c 931 drivers/soundwire/cadence_master.c c = sdw_find_col_index(n_cols) & CDNS_MCP_FRAME_SHAPE_COL_MASK; c 933 drivers/soundwire/cadence_master.c val = (r << CDNS_MCP_FRAME_SHAPE_ROW_OFFSET) | c; c 259 drivers/spi/spi-fsl-qspi.c struct completion c; c 338 drivers/spi/spi-fsl-qspi.c complete(&q->c); c 598 drivers/spi/spi-fsl-qspi.c init_completion(&q->c); c 609 drivers/spi/spi-fsl-qspi.c if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) c 60 drivers/spi/spi-mxs.c struct completion c; c 150 drivers/spi/spi-mxs.c complete(&spi->c); c 189 drivers/spi/spi-mxs.c reinit_completion(&spi->c); c 277 drivers/spi/spi-mxs.c if (!wait_for_completion_timeout(&spi->c, c 586 drivers/spi/spi-mxs.c init_completion(&spi->c); c 334 drivers/spi/spi-nxp-fspi.c struct completion c; c 374 drivers/spi/spi-nxp-fspi.c complete(&f->c); c 448 drivers/spi/spi-nxp-fspi.c u32 timeout_us, bool c) c 455 drivers/spi/spi-nxp-fspi.c if (c) c 760 drivers/spi/spi-nxp-fspi.c init_completion(&f->c); c 777 drivers/spi/spi-nxp-fspi.c if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000))) c 182 drivers/spi/spi-omap-100k.c unsigned int count, c; c 186 drivers/spi/spi-omap-100k.c c = count; c 196 drivers/spi/spi-omap-100k.c c -= 1; c 201 drivers/spi/spi-omap-100k.c } while (c); c 209 drivers/spi/spi-omap-100k.c c -= 2; c 214 drivers/spi/spi-omap-100k.c } while (c); c 222 drivers/spi/spi-omap-100k.c c -= 4; c 227 drivers/spi/spi-omap-100k.c } while (c); c 229 drivers/spi/spi-omap-100k.c return count - c; c 144 drivers/spi/spi-omap-uwire.c int c = 0; c 157 drivers/spi/spi-omap-uwire.c c++; c 158 drivers/spi/spi-omap-uwire.c if (might_not_catch && c > 64) c 693 drivers/spi/spi-omap2-mcspi.c unsigned int count, c; c 702 drivers/spi/spi-omap2-mcspi.c c = count; c 713 drivers/spi/spi-omap2-mcspi.c if (c < (word_len>>3)) c 724 drivers/spi/spi-omap2-mcspi.c c -= 1; c 742 drivers/spi/spi-omap2-mcspi.c if (c == 1 && tx == NULL && c 754 drivers/spi/spi-omap2-mcspi.c c = 0; c 755 drivers/spi/spi-omap2-mcspi.c } else if (c == 0 && tx == NULL) { c 763 drivers/spi/spi-omap2-mcspi.c } while (c); c 771 drivers/spi/spi-omap2-mcspi.c c -= 2; c 789 drivers/spi/spi-omap2-mcspi.c if (c == 2 && tx == NULL && c 801 drivers/spi/spi-omap2-mcspi.c c = 0; c 802 drivers/spi/spi-omap2-mcspi.c } else if (c == 0 && tx == NULL) { c 810 drivers/spi/spi-omap2-mcspi.c } while (c >= 2); c 818 drivers/spi/spi-omap2-mcspi.c c -= 4; c 836 drivers/spi/spi-omap2-mcspi.c if (c == 4 && tx == NULL && c 848 drivers/spi/spi-omap2-mcspi.c c = 0; c 849 drivers/spi/spi-omap2-mcspi.c } else if (c == 0 && tx == NULL) { c 857 drivers/spi/spi-omap2-mcspi.c } while (c >= 4); c 877 drivers/spi/spi-omap2-mcspi.c return count - c; c 40 drivers/spi/spi-pxa2xx-pci.c int (*setup)(struct pci_dev *pdev, struct pxa_spi_info *c); c 74 drivers/spi/spi-pxa2xx-pci.c static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) c 78 drivers/spi/spi-pxa2xx-pci.c c->num_chipselect = 1; c 79 drivers/spi/spi-pxa2xx-pci.c c->max_clk_rate = 50000000; c 83 drivers/spi/spi-pxa2xx-pci.c if (c->tx_param) { c 84 drivers/spi/spi-pxa2xx-pci.c struct dw_dma_slave *slave = c->tx_param; c 91 drivers/spi/spi-pxa2xx-pci.c if (c->rx_param) { c 92 drivers/spi/spi-pxa2xx-pci.c struct dw_dma_slave *slave = c->rx_param; c 99 drivers/spi/spi-pxa2xx-pci.c c->dma_filter = lpss_dma_filter; c 103 drivers/spi/spi-pxa2xx-pci.c static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) c 110 drivers/spi/spi-pxa2xx-pci.c c->port_id = 3; c 111 drivers/spi/spi-pxa2xx-pci.c c->num_chipselect = 1; c 112 drivers/spi/spi-pxa2xx-pci.c c->tx_param = &mrfld3_tx_param; c 113 drivers/spi/spi-pxa2xx-pci.c c->rx_param = &mrfld3_rx_param; c 116 drivers/spi/spi-pxa2xx-pci.c c->port_id = 5; c 117 drivers/spi/spi-pxa2xx-pci.c c->num_chipselect = 4; c 118 drivers/spi/spi-pxa2xx-pci.c c->tx_param = &mrfld5_tx_param; c 119 drivers/spi/spi-pxa2xx-pci.c c->rx_param = &mrfld5_rx_param; c 122 drivers/spi/spi-pxa2xx-pci.c c->port_id = 6; c 123 drivers/spi/spi-pxa2xx-pci.c c->num_chipselect = 1; c 124 drivers/spi/spi-pxa2xx-pci.c c->tx_param = &mrfld6_tx_param; c 125 drivers/spi/spi-pxa2xx-pci.c c->rx_param = &mrfld6_rx_param; c 131 drivers/spi/spi-pxa2xx-pci.c tx = c->tx_param; c 134 drivers/spi/spi-pxa2xx-pci.c rx = c->rx_param; c 137 drivers/spi/spi-pxa2xx-pci.c c->dma_filter = lpss_dma_filter; c 138 drivers/spi/spi-pxa2xx-pci.c c->dma_burst_size = 8; c 205 drivers/spi/spi-pxa2xx-pci.c struct pxa_spi_info *c; c 216 drivers/spi/spi-pxa2xx-pci.c c = &spi_info_configs[ent->driver_data]; c 217 drivers/spi/spi-pxa2xx-pci.c if (c->setup) { c 218 drivers/spi/spi-pxa2xx-pci.c ret = c->setup(dev, c); c 224 drivers/spi/spi-pxa2xx-pci.c spi_pdata.num_chipselect = (c->num_chipselect > 0) ? c->num_chipselect : dev->devfn; c 225 drivers/spi/spi-pxa2xx-pci.c spi_pdata.dma_filter = c->dma_filter; c 226 drivers/spi/spi-pxa2xx-pci.c spi_pdata.tx_param = c->tx_param; c 227 drivers/spi/spi-pxa2xx-pci.c spi_pdata.rx_param = c->rx_param; c 228 drivers/spi/spi-pxa2xx-pci.c spi_pdata.enable_dma = c->rx_param && c->tx_param; c 229 drivers/spi/spi-pxa2xx-pci.c spi_pdata.dma_burst_size = c->dma_burst_size ? c->dma_burst_size : 1; c 234 drivers/spi/spi-pxa2xx-pci.c ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn; c 235 drivers/spi/spi-pxa2xx-pci.c ssp->type = c->type; c 246 drivers/spi/spi-pxa2xx-pci.c c->max_clk_rate); c 489 drivers/spi/spi-sprd.c struct dma_slave_config *c, c 498 drivers/spi/spi-sprd.c ret = dmaengine_slave_config(dma_chan, c); c 86 drivers/spi/spi-txx9.c static u32 txx9spi_rd(struct txx9spi *c, int reg) c 88 drivers/spi/spi-txx9.c return __raw_readl(c->membase + reg); c 90 drivers/spi/spi-txx9.c static void txx9spi_wr(struct txx9spi *c, u32 val, int reg) c 92 drivers/spi/spi-txx9.c __raw_writel(val, c->membase + reg); c 95 drivers/spi/spi-txx9.c static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c, c 102 drivers/spi/spi-txx9.c if (c->last_chipselect >= 0) c 103 drivers/spi/spi-txx9.c gpio_set_value(c->last_chipselect, c 104 drivers/spi/spi-txx9.c !c->last_chipselect_val); c 105 drivers/spi/spi-txx9.c c->last_chipselect = spi->chip_select; c 106 drivers/spi/spi-txx9.c c->last_chipselect_val = val; c 108 drivers/spi/spi-txx9.c c->last_chipselect = -1; c 117 drivers/spi/spi-txx9.c struct txx9spi *c = spi_master_get_devdata(spi->master); c 129 drivers/spi/spi-txx9.c spin_lock(&c->lock); c 130 drivers/spi/spi-txx9.c txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz); c 131 drivers/spi/spi-txx9.c spin_unlock(&c->lock); c 138 drivers/spi/spi-txx9.c struct txx9spi *c = dev_id; c 141 drivers/spi/spi-txx9.c txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE, c 143 drivers/spi/spi-txx9.c wake_up(&c->waitq); c 147 drivers/spi/spi-txx9.c static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) c 161 drivers/spi/spi-txx9.c mcr = txx9spi_rd(c, TXx9_SPMCR); c 170 drivers/spi/spi-txx9.c txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); c 171 drivers/spi/spi-txx9.c txx9spi_wr(c, TXx9_SPCR0_SBOS c 190 drivers/spi/spi-txx9.c int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; c 194 drivers/spi/spi-txx9.c txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, c 196 drivers/spi/spi-txx9.c txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1); c 198 drivers/spi/spi-txx9.c txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR); c 205 drivers/spi/spi-txx9.c txx9spi_cs_func(spi, c, 1, cs_delay); c 215 drivers/spi/spi-txx9.c while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE)) c 217 drivers/spi/spi-txx9.c cr0 = txx9spi_rd(c, TXx9_SPCR0); c 222 drivers/spi/spi-txx9.c txx9spi_wr(c, cr0, TXx9_SPCR0); c 229 drivers/spi/spi-txx9.c txx9spi_wr(c, data, TXx9_SPDR); c 232 drivers/spi/spi-txx9.c txx9spi_wr(c, 0, TXx9_SPDR); c 235 drivers/spi/spi-txx9.c wait_event(c->waitq, c 236 drivers/spi/spi-txx9.c txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI); c 239 drivers/spi/spi-txx9.c data = txx9spi_rd(c, TXx9_SPDR); c 261 drivers/spi/spi-txx9.c txx9spi_cs_func(spi, c, 0, cs_delay); c 274 drivers/spi/spi-txx9.c txx9spi_cs_func(spi, c, 0, cs_delay); c 277 drivers/spi/spi-txx9.c txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); c 282 drivers/spi/spi-txx9.c struct txx9spi *c = container_of(work, struct txx9spi, work); c 285 drivers/spi/spi-txx9.c spin_lock_irqsave(&c->lock, flags); c 286 drivers/spi/spi-txx9.c while (!list_empty(&c->queue)) { c 289 drivers/spi/spi-txx9.c m = container_of(c->queue.next, struct spi_message, queue); c 291 drivers/spi/spi-txx9.c spin_unlock_irqrestore(&c->lock, flags); c 293 drivers/spi/spi-txx9.c txx9spi_work_one(c, m); c 295 drivers/spi/spi-txx9.c spin_lock_irqsave(&c->lock, flags); c 297 drivers/spi/spi-txx9.c spin_unlock_irqrestore(&c->lock, flags); c 303 drivers/spi/spi-txx9.c struct txx9spi *c = spi_master_get_devdata(master); c 315 drivers/spi/spi-txx9.c spin_lock_irqsave(&c->lock, flags); c 316 drivers/spi/spi-txx9.c list_add_tail(&m->queue, &c->queue); c 317 drivers/spi/spi-txx9.c schedule_work(&c->work); c 318 drivers/spi/spi-txx9.c spin_unlock_irqrestore(&c->lock, flags); c 326 drivers/spi/spi-txx9.c struct txx9spi *c; c 332 drivers/spi/spi-txx9.c master = spi_alloc_master(&dev->dev, sizeof(*c)); c 335 drivers/spi/spi-txx9.c c = spi_master_get_devdata(master); c 338 drivers/spi/spi-txx9.c INIT_WORK(&c->work, txx9spi_work); c 339 drivers/spi/spi-txx9.c spin_lock_init(&c->lock); c 340 drivers/spi/spi-txx9.c INIT_LIST_HEAD(&c->queue); c 341 drivers/spi/spi-txx9.c init_waitqueue_head(&c->waitq); c 343 drivers/spi/spi-txx9.c c->clk = devm_clk_get(&dev->dev, "spi-baseclk"); c 344 drivers/spi/spi-txx9.c if (IS_ERR(c->clk)) { c 345 drivers/spi/spi-txx9.c ret = PTR_ERR(c->clk); c 346 drivers/spi/spi-txx9.c c->clk = NULL; c 349 drivers/spi/spi-txx9.c ret = clk_prepare_enable(c->clk); c 351 drivers/spi/spi-txx9.c c->clk = NULL; c 354 drivers/spi/spi-txx9.c c->baseclk = clk_get_rate(c->clk); c 355 drivers/spi/spi-txx9.c master->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1); c 356 drivers/spi/spi-txx9.c master->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1); c 359 drivers/spi/spi-txx9.c c->membase = devm_ioremap_resource(&dev->dev, res); c 360 drivers/spi/spi-txx9.c if (IS_ERR(c->membase)) c 364 drivers/spi/spi-txx9.c mcr = txx9spi_rd(c, TXx9_SPMCR); c 366 drivers/spi/spi-txx9.c txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); c 372 drivers/spi/spi-txx9.c "spi_txx9", c); c 376 drivers/spi/spi-txx9.c c->last_chipselect = -1; c 380 drivers/spi/spi-txx9.c (c->baseclk + 500000) / 1000000); c 398 drivers/spi/spi-txx9.c clk_disable_unprepare(c->clk); c 406 drivers/spi/spi-txx9.c struct txx9spi *c = spi_master_get_devdata(master); c 408 drivers/spi/spi-txx9.c flush_work(&c->work); c 409 drivers/spi/spi-txx9.c clk_disable_unprepare(c->clk); c 39 drivers/ssb/sprom.c char c, tmp[5] = { 0 }; c 45 drivers/ssb/sprom.c c = dump[len - 1]; c 46 drivers/ssb/sprom.c if (!isspace(c) && c != '\0') c 21 drivers/staging/comedi/comedidev.h #define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) c 69 drivers/staging/comedi/drivers/adv_pci1710.c #define PCI171X_DAREF(c, r) (((r) & 0x3) << ((c) * 2)) c 70 drivers/staging/comedi/drivers/adv_pci1710.c #define PCI171X_DAREF_MASK(c) PCI171X_DAREF((c), 0x3) c 54 drivers/staging/comedi/drivers/adv_pci1720.c #define PCI1720_AO_RANGE(c, r) (((r) & 0x3) << ((c) * 2)) c 55 drivers/staging/comedi/drivers/adv_pci1720.c #define PCI1720_AO_RANGE_MASK(c) PCI1720_AO_RANGE((c), 0x3) c 142 drivers/staging/comedi/drivers/cb_pcidas.c #define PCIDAS_AO_CHAN_EN(c) BIT(5 + ((c) & 0x1)) c 145 drivers/staging/comedi/drivers/cb_pcidas.c #define PCIDAS_AO_RANGE(c, r) (((r) & 0x3) << (8 + 2 * ((c) & 0x1))) c 146 drivers/staging/comedi/drivers/cb_pcidas.c #define PCIDAS_AO_RANGE_MASK(c) PCIDAS_AO_RANGE((c), 0x3) c 926 drivers/staging/comedi/drivers/me4000.c int c = 0; c 942 drivers/staging/comedi/drivers/me4000.c c = ME4000_AI_FIFO_COUNT; c 946 drivers/staging/comedi/drivers/me4000.c c = ME4000_AI_FIFO_COUNT / 2; c 950 drivers/staging/comedi/drivers/me4000.c c = 0; c 953 drivers/staging/comedi/drivers/me4000.c for (i = 0; i < c; i++) { c 1288 drivers/staging/comedi/drivers/usbdux.c char c = *buf; c 1290 drivers/staging/comedi/drivers/usbdux.c c &= ~pwm_mask; c 1292 drivers/staging/comedi/drivers/usbdux.c c |= pwm_mask; c 1294 drivers/staging/comedi/drivers/usbdux.c c &= ~sgn_mask; c 1296 drivers/staging/comedi/drivers/usbdux.c c |= sgn_mask; c 1297 drivers/staging/comedi/drivers/usbdux.c *buf++ = c; c 1131 drivers/staging/comedi/drivers/usbduxsigma.c char c = *buf; c 1133 drivers/staging/comedi/drivers/usbduxsigma.c c &= ~pwm_mask; c 1135 drivers/staging/comedi/drivers/usbduxsigma.c c |= pwm_mask; c 1137 drivers/staging/comedi/drivers/usbduxsigma.c c &= ~sgn_mask; c 1139 drivers/staging/comedi/drivers/usbduxsigma.c c |= sgn_mask; c 1140 drivers/staging/comedi/drivers/usbduxsigma.c *buf++ = c; c 2892 drivers/staging/exfat/exfat_core.c u8 *c = (u8 *)data; c 2894 drivers/staging/exfat/exfat_core.c for (i = 0; i < len; i++, c++) c 2895 drivers/staging/exfat/exfat_core.c chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c; c 2903 drivers/staging/exfat/exfat_core.c u8 *c = (u8 *)data; c 2907 drivers/staging/exfat/exfat_core.c for (i = 0; i < len; i++, c++) { c 2911 drivers/staging/exfat/exfat_core.c ((chksum & 0xFFFE) >> 1)) + (u16)*c; c 2916 drivers/staging/exfat/exfat_core.c for (i = 0; i < len; i++, c++) c 2918 drivers/staging/exfat/exfat_core.c ((chksum & 0xFFFE) >> 1)) + (u16)*c; c 2927 drivers/staging/exfat/exfat_core.c u8 *c = (u8 *)data; c 2931 drivers/staging/exfat/exfat_core.c for (i = 0; i < len; i++, c++) { c 2935 drivers/staging/exfat/exfat_core.c ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c; c 2940 drivers/staging/exfat/exfat_core.c for (i = 0; i < len; i++, c++) c 2942 drivers/staging/exfat/exfat_core.c ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c; c 176 drivers/staging/fbtft/fb_st7789v.c int c; /* curve index offset */ c 201 drivers/staging/fbtft/fb_st7789v.c c = i * par->gamma.num_values; c 203 drivers/staging/fbtft/fb_st7789v.c curves[c + j] &= gamma_par_mask[j]; c 205 drivers/staging/fbtft/fb_st7789v.c curves[c + 0], curves[c + 1], curves[c + 2], c 206 drivers/staging/fbtft/fb_st7789v.c curves[c + 3], curves[c + 4], curves[c + 5], c 207 drivers/staging/fbtft/fb_st7789v.c curves[c + 6], curves[c + 7], curves[c + 8], c 208 drivers/staging/fbtft/fb_st7789v.c curves[c + 9], curves[c + 10], curves[c + 11], c 209 drivers/staging/fbtft/fb_st7789v.c curves[c + 12], curves[c + 13]); c 93 drivers/staging/fbtft/fb_watterott.c static inline int rgb565_to_rgb332(u16 c) c 95 drivers/staging/fbtft/fb_watterott.c return ((c & 0xE000) >> 8) | ((c & 000700) >> 6) | ((c & 0x0018) >> 3); c 108 drivers/staging/fwserial/fwserial.c static inline void debug_short_write(struct fwtty_port *port, int c, int n) c 112 drivers/staging/fwserial/fwserial.c if (n < c) { c 117 drivers/staging/fwserial/fwserial.c avail, c, n); c 121 drivers/staging/fwserial/fwserial.c #define debug_short_write(port, c, n) c 504 drivers/staging/fwserial/fwserial.c int n, t, c, brk = 0; c 514 drivers/staging/fwserial/fwserial.c c = tty_insert_flip_string_fixed_flag(&port->port, buf, c 516 drivers/staging/fwserial/fwserial.c n -= c; c 517 drivers/staging/fwserial/fwserial.c brk += c; c 518 drivers/staging/fwserial/fwserial.c if (c < t) c 530 drivers/staging/fwserial/fwserial.c int c, n = len; c 566 drivers/staging/fwserial/fwserial.c c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n); c 567 drivers/staging/fwserial/fwserial.c if (c > 0) c 569 drivers/staging/fwserial/fwserial.c n -= c; c 1093 drivers/staging/fwserial/fwserial.c static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c) c 1098 drivers/staging/fwserial/fwserial.c fwtty_dbg(port, "%d\n", c); c 1099 drivers/staging/fwserial/fwserial.c fwtty_profile_data(port->stats.writes, c); c 1102 drivers/staging/fwserial/fwserial.c n = dma_fifo_in(&port->tx_fifo, buf, c); c 1111 drivers/staging/fwserial/fwserial.c debug_short_write(port, c, n); c 99 drivers/staging/iio/adc/ad7280a.c #define AD7280A_CALC_VOLTAGE_CHAN_NUM(d, c) (((d) * AD7280A_CELLS_PER_DEV) + \ c 100 drivers/staging/iio/adc/ad7280a.c (c)) c 101 drivers/staging/iio/adc/ad7280a.c #define AD7280A_CALC_TEMP_CHAN_NUM(d, c) (((d) * AD7280A_CELLS_PER_DEV) + \ c 102 drivers/staging/iio/adc/ad7280a.c (c) - AD7280A_CELLS_PER_DEV) c 23 drivers/staging/isdn/gigaset/asyncdata.c static inline int muststuff(unsigned char c) c 25 drivers/staging/isdn/gigaset/asyncdata.c if (c < PPP_TRANS) return 1; c 26 drivers/staging/isdn/gigaset/asyncdata.c if (c == PPP_FLAG) return 1; c 27 drivers/staging/isdn/gigaset/asyncdata.c if (c == PPP_ESCAPE) return 1; c 52 drivers/staging/isdn/gigaset/asyncdata.c unsigned char c; c 55 drivers/staging/isdn/gigaset/asyncdata.c c = *src++; c 58 drivers/staging/isdn/gigaset/asyncdata.c switch (c) { c 80 drivers/staging/isdn/gigaset/asyncdata.c cs->respdata[0] = c; c 104 drivers/staging/isdn/gigaset/asyncdata.c cs->respdata[cbytes] = c; c 144 drivers/staging/isdn/gigaset/asyncdata.c unsigned char c; c 154 drivers/staging/isdn/gigaset/asyncdata.c c = *src++; c 156 drivers/staging/isdn/gigaset/asyncdata.c if (c == DLE_FLAG) { c 167 drivers/staging/isdn/gigaset/asyncdata.c if (c == PPP_ESCAPE) { c 175 drivers/staging/isdn/gigaset/asyncdata.c c = *src++; c 177 drivers/staging/isdn/gigaset/asyncdata.c if (c == DLE_FLAG) { c 189 drivers/staging/isdn/gigaset/asyncdata.c c ^= PPP_TRANS; c 191 drivers/staging/isdn/gigaset/asyncdata.c if (!muststuff(c)) c 192 drivers/staging/isdn/gigaset/asyncdata.c gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c); c 194 drivers/staging/isdn/gigaset/asyncdata.c } else if (c == PPP_FLAG) { c 242 drivers/staging/isdn/gigaset/asyncdata.c } else if (muststuff(c)) { c 244 drivers/staging/isdn/gigaset/asyncdata.c gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c); c 264 drivers/staging/isdn/gigaset/asyncdata.c __skb_put_u8(skb, c); c 265 drivers/staging/isdn/gigaset/asyncdata.c fcs = crc_ccitt_byte(fcs, c); c 290 drivers/staging/isdn/gigaset/asyncdata.c unsigned char c; c 299 drivers/staging/isdn/gigaset/asyncdata.c c = *src++; c 302 drivers/staging/isdn/gigaset/asyncdata.c if (c == DLE_FLAG) { c 315 drivers/staging/isdn/gigaset/asyncdata.c __skb_put_u8(skb, bitrev8(c)); c 459 drivers/staging/isdn/gigaset/asyncdata.c unsigned char c; c 504 drivers/staging/isdn/gigaset/asyncdata.c c = (fcs & 0x00ff); /* least significant byte first */ c 505 drivers/staging/isdn/gigaset/asyncdata.c if (muststuff(c)) { c 507 drivers/staging/isdn/gigaset/asyncdata.c c ^= PPP_TRANS; c 509 drivers/staging/isdn/gigaset/asyncdata.c skb_put_u8(hdlc_skb, c); c 511 drivers/staging/isdn/gigaset/asyncdata.c c = ((fcs >> 8) & 0x00ff); c 512 drivers/staging/isdn/gigaset/asyncdata.c if (muststuff(c)) { c 514 drivers/staging/isdn/gigaset/asyncdata.c c ^= PPP_TRANS; c 516 drivers/staging/isdn/gigaset/asyncdata.c skb_put_u8(hdlc_skb, c); c 536 drivers/staging/isdn/gigaset/asyncdata.c unsigned char c; c 559 drivers/staging/isdn/gigaset/asyncdata.c c = bitrev8(*cp++); c 560 drivers/staging/isdn/gigaset/asyncdata.c if (c == DLE_FLAG) c 561 drivers/staging/isdn/gigaset/asyncdata.c skb_put_u8(iraw_skb, c); c 562 drivers/staging/isdn/gigaset/asyncdata.c skb_put_u8(iraw_skb, c); c 52 drivers/staging/isdn/gigaset/common.c unsigned char c; c 58 drivers/staging/isdn/gigaset/common.c c = *buf++; c 59 drivers/staging/isdn/gigaset/common.c if (c == '~' || c == '^' || c == '\\') { c 64 drivers/staging/isdn/gigaset/common.c if (c & 0x80) { c 68 drivers/staging/isdn/gigaset/common.c c ^= 0x80; c 70 drivers/staging/isdn/gigaset/common.c if (c < 0x20 || c == 0x7f) { c 74 drivers/staging/isdn/gigaset/common.c c ^= 0x40; c 78 drivers/staging/isdn/gigaset/common.c *out++ = c; c 215 drivers/staging/isdn/gigaset/isocdata.c unsigned char c; c 228 drivers/staging/isdn/gigaset/isocdata.c c = *bytes++; c 231 drivers/staging/isdn/gigaset/isocdata.c dbgline[i++] = hex_asc_hi(c); c 232 drivers/staging/isdn/gigaset/isocdata.c dbgline[i++] = hex_asc_lo(c); c 400 drivers/staging/isdn/gigaset/isocdata.c unsigned char c; c 415 drivers/staging/isdn/gigaset/isocdata.c c = *in++; c 416 drivers/staging/isdn/gigaset/isocdata.c ones = hdlc_bitstuff_byte(iwb, c, ones); c 417 drivers/staging/isdn/gigaset/isocdata.c fcs = crc_ccitt_byte(fcs, c); c 451 drivers/staging/isdn/gigaset/isocdata.c unsigned char c; c 467 drivers/staging/isdn/gigaset/isocdata.c c = bitrev8(*in++); c 468 drivers/staging/isdn/gigaset/isocdata.c iwb->data[write++] = c; c 472 drivers/staging/isdn/gigaset/isocdata.c iwb->idle = c; c 498 drivers/staging/isdn/gigaset/isocdata.c static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs) c 500 drivers/staging/isdn/gigaset/isocdata.c bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c); c 511 drivers/staging/isdn/gigaset/isocdata.c __skb_put_u8(bcs->rx_skb, c); c 656 drivers/staging/isdn/gigaset/isocdata.c unsigned char c = *src++; c 657 drivers/staging/isdn/gigaset/isocdata.c unsigned char tabentry = bitcounts[c]; c 664 drivers/staging/isdn/gigaset/isocdata.c if (c == PPP_FLAG) { c 672 drivers/staging/isdn/gigaset/isocdata.c inbyte = c >> (lead1 + 1); c 680 drivers/staging/isdn/gigaset/isocdata.c switch (c) { c 690 drivers/staging/isdn/gigaset/isocdata.c inbyte |= c << inbits; c 719 drivers/staging/isdn/gigaset/isocdata.c if (c == PPP_FLAG) { c 726 drivers/staging/isdn/gigaset/isocdata.c inbyte = c >> (lead1 + 1); c 734 drivers/staging/isdn/gigaset/isocdata.c switch (c) { c 748 drivers/staging/isdn/gigaset/isocdata.c if (c == PPP_FLAG) { c 770 drivers/staging/isdn/gigaset/isocdata.c c = (c & mask) | ((c & ~mask) >> 1); c 771 drivers/staging/isdn/gigaset/isocdata.c inbyte |= c << inbits; c 778 drivers/staging/isdn/gigaset/isocdata.c switch (c) { c 780 drivers/staging/isdn/gigaset/isocdata.c c = 0x7e; c 783 drivers/staging/isdn/gigaset/isocdata.c inbyte |= c << inbits; c 789 drivers/staging/isdn/gigaset/isocdata.c switch (c) { c 791 drivers/staging/isdn/gigaset/isocdata.c c = 0x3f; c 794 drivers/staging/isdn/gigaset/isocdata.c c = 0x3f; c 797 drivers/staging/isdn/gigaset/isocdata.c c = 0x1f; c 800 drivers/staging/isdn/gigaset/isocdata.c c = 0x3e; c 803 drivers/staging/isdn/gigaset/isocdata.c inbyte |= c << inbits; c 895 drivers/staging/isdn/gigaset/isocdata.c unsigned char c; c 898 drivers/staging/isdn/gigaset/isocdata.c c = *src++; c 899 drivers/staging/isdn/gigaset/isocdata.c switch (c) { c 921 drivers/staging/isdn/gigaset/isocdata.c cs->respdata[0] = c; c 926 drivers/staging/isdn/gigaset/isocdata.c cs->respdata[cbytes] = c; c 210 drivers/staging/kpc2000/kpc2000_spi.c unsigned int c = count; c 219 drivers/staging/kpc2000/kpc2000_spi.c for (i = 0 ; i < c ; i++) { c 232 drivers/staging/kpc2000/kpc2000_spi.c for (i = 0 ; i < c ; i++) { c 25 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c struct kpc_dma_device *c; c 28 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c list_for_each_entry(c, &kpc_dma_list, list) { c 29 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c if (c->pldev->id == minor) { c 33 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c c = NULL; // not-found case c 36 drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c return c; c 187 drivers/staging/ks7010/ks_wlan_net.c int c = 0; c 189 drivers/staging/ks7010/ks_wlan_net.c while ((c < 14) && (f != frequency_list[c])) c 190 drivers/staging/ks7010/ks_wlan_net.c c++; c 193 drivers/staging/ks7010/ks_wlan_net.c fwrq->freq.m = c + 1; c 2198 drivers/staging/media/ipu3/include/intel-ipu3.h __s32 c:12; c 2570 drivers/staging/media/ipu3/include/intel-ipu3.h __u16 c[IPU3_UAPI_ISP_VEC_ELEMS]; c 2266 drivers/staging/media/ipu3/ipu3-css-params.c acc->tcc.macc_table.entries[i].c = 0; c 2697 drivers/staging/media/ipu3/ipu3-css-params.c const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM; c 2700 drivers/staging/media/ipu3/ipu3-css-params.c new_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par, c 2710 drivers/staging/media/ipu3/ipu3-css-params.c old_setting = imgu_css_fw_pipeline_params(css, pipe, c, m, par, c 2738 drivers/staging/media/ipu3/ipu3-css-params.c const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM; c 2744 drivers/staging/media/ipu3/ipu3-css-params.c memset(vmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size); c 2794 drivers/staging/media/ipu3/ipu3-css-params.c xnr_vmem->c[i] = imgu_css_xnr3_vmem_defaults.c c 2819 drivers/staging/media/ipu3/ipu3-css-params.c const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM; c 2824 drivers/staging/media/ipu3/ipu3-css-params.c memset(dmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size); c 9308 drivers/staging/media/ipu3/ipu3-tables.c .c = { c 37 drivers/staging/media/ipu3/ipu3-tables.h s16 c[IMGU_XNR3_VMEM_LUT_LEN]; c 51 drivers/staging/most/cdev/cdev.c static inline bool ch_has_mbo(struct comp_channel *c) c 53 drivers/staging/most/cdev/cdev.c return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0; c 56 drivers/staging/most/cdev/cdev.c static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo) c 58 drivers/staging/most/cdev/cdev.c if (!kfifo_peek(&c->fifo, mbo)) { c 59 drivers/staging/most/cdev/cdev.c *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc); c 61 drivers/staging/most/cdev/cdev.c kfifo_in(&c->fifo, mbo, 1); c 68 drivers/staging/most/cdev/cdev.c struct comp_channel *c, *tmp; c 73 drivers/staging/most/cdev/cdev.c list_for_each_entry_safe(c, tmp, &channel_list, list) { c 74 drivers/staging/most/cdev/cdev.c if ((c->iface == iface) && (c->channel_id == id)) { c 82 drivers/staging/most/cdev/cdev.c return c; c 85 drivers/staging/most/cdev/cdev.c static void stop_channel(struct comp_channel *c) c 89 drivers/staging/most/cdev/cdev.c while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1)) c 91 drivers/staging/most/cdev/cdev.c most_stop_channel(c->iface, c->channel_id, &comp.cc); c 94 drivers/staging/most/cdev/cdev.c static void destroy_cdev(struct comp_channel *c) c 98 drivers/staging/most/cdev/cdev.c device_destroy(comp.class, c->devno); c 99 drivers/staging/most/cdev/cdev.c cdev_del(&c->cdev); c 101 drivers/staging/most/cdev/cdev.c list_del(&c->list); c 105 drivers/staging/most/cdev/cdev.c static void destroy_channel(struct comp_channel *c) c 107 drivers/staging/most/cdev/cdev.c ida_simple_remove(&comp.minor_id, MINOR(c->devno)); c 108 drivers/staging/most/cdev/cdev.c kfifo_free(&c->fifo); c 109 drivers/staging/most/cdev/cdev.c kfree(c); c 122 drivers/staging/most/cdev/cdev.c struct comp_channel *c; c 125 drivers/staging/most/cdev/cdev.c c = to_channel(inode->i_cdev); c 126 drivers/staging/most/cdev/cdev.c filp->private_data = c; c 128 drivers/staging/most/cdev/cdev.c if (((c->cfg->direction == MOST_CH_RX) && c 130 drivers/staging/most/cdev/cdev.c ((c->cfg->direction == MOST_CH_TX) && c 136 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 137 drivers/staging/most/cdev/cdev.c if (!c->dev) { c 139 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 143 drivers/staging/most/cdev/cdev.c if (c->access_ref) { c 145 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 149 drivers/staging/most/cdev/cdev.c c->mbo_offs = 0; c 150 drivers/staging/most/cdev/cdev.c ret = most_start_channel(c->iface, c->channel_id, &comp.cc); c 152 drivers/staging/most/cdev/cdev.c c->access_ref = 1; c 153 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 166 drivers/staging/most/cdev/cdev.c struct comp_channel *c = to_channel(inode->i_cdev); c 168 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 169 drivers/staging/most/cdev/cdev.c spin_lock(&c->unlink); c 170 drivers/staging/most/cdev/cdev.c c->access_ref = 0; c 171 drivers/staging/most/cdev/cdev.c spin_unlock(&c->unlink); c 172 drivers/staging/most/cdev/cdev.c if (c->dev) { c 173 drivers/staging/most/cdev/cdev.c stop_channel(c); c 174 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 176 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 177 drivers/staging/most/cdev/cdev.c destroy_channel(c); c 195 drivers/staging/most/cdev/cdev.c struct comp_channel *c = filp->private_data; c 197 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 198 drivers/staging/most/cdev/cdev.c while (c->dev && !ch_get_mbo(c, &mbo)) { c 199 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 203 drivers/staging/most/cdev/cdev.c if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev)) c 205 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 208 drivers/staging/most/cdev/cdev.c if (unlikely(!c->dev)) { c 213 drivers/staging/most/cdev/cdev.c to_copy = min(count, c->cfg->buffer_size - c->mbo_offs); c 214 drivers/staging/most/cdev/cdev.c left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy); c 220 drivers/staging/most/cdev/cdev.c c->mbo_offs += to_copy - left; c 221 drivers/staging/most/cdev/cdev.c if (c->mbo_offs >= c->cfg->buffer_size || c 222 drivers/staging/most/cdev/cdev.c c->cfg->data_type == MOST_CH_CONTROL || c 223 drivers/staging/most/cdev/cdev.c c->cfg->data_type == MOST_CH_ASYNC) { c 224 drivers/staging/most/cdev/cdev.c kfifo_skip(&c->fifo); c 225 drivers/staging/most/cdev/cdev.c mbo->buffer_length = c->mbo_offs; c 226 drivers/staging/most/cdev/cdev.c c->mbo_offs = 0; c 232 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 248 drivers/staging/most/cdev/cdev.c struct comp_channel *c = filp->private_data; c 250 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 251 drivers/staging/most/cdev/cdev.c while (c->dev && !kfifo_peek(&c->fifo, &mbo)) { c 252 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 255 drivers/staging/most/cdev/cdev.c if (wait_event_interruptible(c->wq, c 256 drivers/staging/most/cdev/cdev.c (!kfifo_is_empty(&c->fifo) || c 257 drivers/staging/most/cdev/cdev.c (!c->dev)))) c 259 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 263 drivers/staging/most/cdev/cdev.c if (unlikely(!c->dev)) { c 264 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 270 drivers/staging/most/cdev/cdev.c mbo->processed_length - c->mbo_offs); c 273 drivers/staging/most/cdev/cdev.c mbo->virt_address + c->mbo_offs, c 278 drivers/staging/most/cdev/cdev.c c->mbo_offs += copied; c 279 drivers/staging/most/cdev/cdev.c if (c->mbo_offs >= mbo->processed_length) { c 280 drivers/staging/most/cdev/cdev.c kfifo_skip(&c->fifo); c 282 drivers/staging/most/cdev/cdev.c c->mbo_offs = 0; c 284 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 290 drivers/staging/most/cdev/cdev.c struct comp_channel *c = filp->private_data; c 293 drivers/staging/most/cdev/cdev.c poll_wait(filp, &c->wq, wait); c 295 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 296 drivers/staging/most/cdev/cdev.c if (c->cfg->direction == MOST_CH_RX) { c 297 drivers/staging/most/cdev/cdev.c if (!c->dev || !kfifo_is_empty(&c->fifo)) c 300 drivers/staging/most/cdev/cdev.c if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c)) c 303 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 329 drivers/staging/most/cdev/cdev.c struct comp_channel *c; c 336 drivers/staging/most/cdev/cdev.c c = get_channel(iface, channel_id); c 337 drivers/staging/most/cdev/cdev.c if (!c) c 340 drivers/staging/most/cdev/cdev.c mutex_lock(&c->io_mutex); c 341 drivers/staging/most/cdev/cdev.c spin_lock(&c->unlink); c 342 drivers/staging/most/cdev/cdev.c c->dev = NULL; c 343 drivers/staging/most/cdev/cdev.c spin_unlock(&c->unlink); c 344 drivers/staging/most/cdev/cdev.c destroy_cdev(c); c 345 drivers/staging/most/cdev/cdev.c if (c->access_ref) { c 346 drivers/staging/most/cdev/cdev.c stop_channel(c); c 347 drivers/staging/most/cdev/cdev.c wake_up_interruptible(&c->wq); c 348 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 350 drivers/staging/most/cdev/cdev.c mutex_unlock(&c->io_mutex); c 351 drivers/staging/most/cdev/cdev.c destroy_channel(c); c 365 drivers/staging/most/cdev/cdev.c struct comp_channel *c; c 370 drivers/staging/most/cdev/cdev.c c = get_channel(mbo->ifp, mbo->hdm_channel_id); c 371 drivers/staging/most/cdev/cdev.c if (!c) c 374 drivers/staging/most/cdev/cdev.c spin_lock(&c->unlink); c 375 drivers/staging/most/cdev/cdev.c if (!c->access_ref || !c->dev) { c 376 drivers/staging/most/cdev/cdev.c spin_unlock(&c->unlink); c 379 drivers/staging/most/cdev/cdev.c kfifo_in(&c->fifo, &mbo, 1); c 380 drivers/staging/most/cdev/cdev.c spin_unlock(&c->unlink); c 382 drivers/staging/most/cdev/cdev.c if (kfifo_is_full(&c->fifo)) c 385 drivers/staging/most/cdev/cdev.c wake_up_interruptible(&c->wq); c 398 drivers/staging/most/cdev/cdev.c struct comp_channel *c; c 409 drivers/staging/most/cdev/cdev.c c = get_channel(iface, channel_id); c 410 drivers/staging/most/cdev/cdev.c if (!c) c 412 drivers/staging/most/cdev/cdev.c wake_up_interruptible(&c->wq); c 430 drivers/staging/most/cdev/cdev.c struct comp_channel *c; c 439 drivers/staging/most/cdev/cdev.c c = get_channel(iface, channel_id); c 440 drivers/staging/most/cdev/cdev.c if (c) c 447 drivers/staging/most/cdev/cdev.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 448 drivers/staging/most/cdev/cdev.c if (!c) { c 453 drivers/staging/most/cdev/cdev.c c->devno = MKDEV(comp.major, current_minor); c 454 drivers/staging/most/cdev/cdev.c cdev_init(&c->cdev, &channel_fops); c 455 drivers/staging/most/cdev/cdev.c c->cdev.owner = THIS_MODULE; c 456 drivers/staging/most/cdev/cdev.c retval = cdev_add(&c->cdev, c->devno, 1); c 459 drivers/staging/most/cdev/cdev.c c->iface = iface; c 460 drivers/staging/most/cdev/cdev.c c->cfg = cfg; c 461 drivers/staging/most/cdev/cdev.c c->channel_id = channel_id; c 462 drivers/staging/most/cdev/cdev.c c->access_ref = 0; c 463 drivers/staging/most/cdev/cdev.c spin_lock_init(&c->unlink); c 464 drivers/staging/most/cdev/cdev.c INIT_KFIFO(c->fifo); c 465 drivers/staging/most/cdev/cdev.c retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL); c 468 drivers/staging/most/cdev/cdev.c init_waitqueue_head(&c->wq); c 469 drivers/staging/most/cdev/cdev.c mutex_init(&c->io_mutex); c 471 drivers/staging/most/cdev/cdev.c list_add_tail(&c->list, &channel_list); c 473 drivers/staging/most/cdev/cdev.c c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name); c 475 drivers/staging/most/cdev/cdev.c if (IS_ERR(c->dev)) { c 476 drivers/staging/most/cdev/cdev.c retval = PTR_ERR(c->dev); c 480 drivers/staging/most/cdev/cdev.c kobject_uevent(&c->dev->kobj, KOBJ_ADD); c 484 drivers/staging/most/cdev/cdev.c kfifo_free(&c->fifo); c 485 drivers/staging/most/cdev/cdev.c list_del(&c->list); c 487 drivers/staging/most/cdev/cdev.c cdev_del(&c->cdev); c 489 drivers/staging/most/cdev/cdev.c kfree(c); c 545 drivers/staging/most/cdev/cdev.c struct comp_channel *c, *tmp; c 552 drivers/staging/most/cdev/cdev.c list_for_each_entry_safe(c, tmp, &channel_list, list) { c 553 drivers/staging/most/cdev/cdev.c destroy_cdev(c); c 554 drivers/staging/most/cdev/cdev.c destroy_channel(c); c 592 drivers/staging/most/configfs.c int most_register_configfs_subsys(struct core_component *c) c 596 drivers/staging/most/configfs.c if (!strcmp(c->name, "cdev")) c 598 drivers/staging/most/configfs.c else if (!strcmp(c->name, "net")) c 600 drivers/staging/most/configfs.c else if (!strcmp(c->name, "video")) c 602 drivers/staging/most/configfs.c else if (!strcmp(c->name, "sound")) c 609 drivers/staging/most/configfs.c ret, c->name); c 631 drivers/staging/most/configfs.c void most_deregister_configfs_subsys(struct core_component *c) c 633 drivers/staging/most/configfs.c if (!strcmp(c->name, "cdev")) c 635 drivers/staging/most/configfs.c else if (!strcmp(c->name, "net")) c 637 drivers/staging/most/configfs.c else if (!strcmp(c->name, "video")) c 639 drivers/staging/most/configfs.c else if (!strcmp(c->name, "sound")) c 111 drivers/staging/most/core.c struct most_channel *c = mbo->context; c 112 drivers/staging/most/core.c u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; c 114 drivers/staging/most/core.c if (c->iface->dma_free) c 115 drivers/staging/most/core.c c->iface->dma_free(mbo, coherent_buf_size); c 119 drivers/staging/most/core.c if (atomic_sub_and_test(1, &c->mbo_ref)) c 120 drivers/staging/most/core.c complete(&c->cleanup); c 127 drivers/staging/most/core.c static void flush_channel_fifos(struct most_channel *c) c 132 drivers/staging/most/core.c if (list_empty(&c->fifo) && list_empty(&c->halt_fifo)) c 135 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 136 drivers/staging/most/core.c list_for_each_entry_safe(mbo, tmp, &c->fifo, list) { c 138 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 140 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 142 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 144 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, hf_flags); c 145 drivers/staging/most/core.c list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) { c 147 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, hf_flags); c 149 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, hf_flags); c 151 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, hf_flags); c 153 drivers/staging/most/core.c if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo)))) c 161 drivers/staging/most/core.c static int flush_trash_fifo(struct most_channel *c) c 166 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 167 drivers/staging/most/core.c list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) { c 169 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 171 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 173 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 181 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 182 drivers/staging/most/core.c unsigned int i = c->channel_id; c 185 drivers/staging/most/core.c if (c->iface->channel_vector[i].direction & MOST_CH_RX) c 187 drivers/staging/most/core.c if (c->iface->channel_vector[i].direction & MOST_CH_TX) c 197 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 198 drivers/staging/most/core.c unsigned int i = c->channel_id; c 201 drivers/staging/most/core.c if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL) c 203 drivers/staging/most/core.c if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC) c 205 drivers/staging/most/core.c if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC) c 207 drivers/staging/most/core.c if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC) c 217 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 218 drivers/staging/most/core.c unsigned int i = c->channel_id; c 221 drivers/staging/most/core.c c->iface->channel_vector[i].num_buffers_packet); c 228 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 229 drivers/staging/most/core.c unsigned int i = c->channel_id; c 232 drivers/staging/most/core.c c->iface->channel_vector[i].num_buffers_streaming); c 239 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 240 drivers/staging/most/core.c unsigned int i = c->channel_id; c 243 drivers/staging/most/core.c c->iface->channel_vector[i].buffer_size_packet); c 250 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 251 drivers/staging/most/core.c unsigned int i = c->channel_id; c 254 drivers/staging/most/core.c c->iface->channel_vector[i].buffer_size_streaming); c 261 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 263 drivers/staging/most/core.c return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving); c 270 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 272 drivers/staging/most/core.c return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers); c 279 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 281 drivers/staging/most/core.c return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size); c 288 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 290 drivers/staging/most/core.c if (c->cfg.direction & MOST_CH_TX) c 292 drivers/staging/most/core.c else if (c->cfg.direction & MOST_CH_RX) c 302 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 305 drivers/staging/most/core.c if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) c 316 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 318 drivers/staging/most/core.c return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size); c 325 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 327 drivers/staging/most/core.c return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact); c 333 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 335 drivers/staging/most/core.c return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size); c 344 drivers/staging/most/core.c struct most_channel *c = to_channel(dev); c 347 drivers/staging/most/core.c (c->iface->interface != ITYPE_MEDIALB_DIM2)) c 350 drivers/staging/most/core.c (c->iface->interface != ITYPE_USB)) c 478 drivers/staging/most/core.c struct most_channel *c; c 481 drivers/staging/most/core.c list_for_each_entry(c, &iface->p->channel_list, list) { c 482 drivers/staging/most/core.c if (c->pipe0.comp) { c 486 drivers/staging/most/core.c c->pipe0.comp->name, c 488 drivers/staging/most/core.c dev_name(&c->dev)); c 490 drivers/staging/most/core.c if (c->pipe1.comp) { c 494 drivers/staging/most/core.c c->pipe1.comp->name, c 496 drivers/staging/most/core.c dev_name(&c->dev)); c 545 drivers/staging/most/core.c static int split_string(char *buf, char **a, char **b, char **c, char **d) c 555 drivers/staging/most/core.c *c = strsep(&buf, ":\n"); c 556 drivers/staging/most/core.c if (!*c) c 574 drivers/staging/most/core.c struct most_channel *c, *tmp; c 580 drivers/staging/most/core.c list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { c 581 drivers/staging/most/core.c if (!strcmp(dev_name(&c->dev), mdev_ch)) c 582 drivers/staging/most/core.c return c; c 588 drivers/staging/most/core.c inline int link_channel_to_component(struct most_channel *c, c 596 drivers/staging/most/core.c if (!c->pipe0.comp) c 597 drivers/staging/most/core.c comp_ptr = &c->pipe0.comp; c 598 drivers/staging/most/core.c else if (!c->pipe1.comp) c 599 drivers/staging/most/core.c comp_ptr = &c->pipe1.comp; c 604 drivers/staging/most/core.c ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name, c 615 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 617 drivers/staging/most/core.c if (!c) c 619 drivers/staging/most/core.c c->cfg.buffer_size = val; c 625 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 627 drivers/staging/most/core.c if (!c) c 629 drivers/staging/most/core.c c->cfg.subbuffer_size = val; c 635 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 637 drivers/staging/most/core.c if (!c) c 639 drivers/staging/most/core.c c->cfg.dbr_size = val; c 645 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 647 drivers/staging/most/core.c if (!c) c 649 drivers/staging/most/core.c c->cfg.num_buffers = val; c 656 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 658 drivers/staging/most/core.c if (!c) c 662 drivers/staging/most/core.c c->cfg.data_type = ch_data_type[i].most_ch_data_type; c 674 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 676 drivers/staging/most/core.c if (!c) c 679 drivers/staging/most/core.c c->cfg.direction = MOST_CH_RX; c 681 drivers/staging/most/core.c c->cfg.direction = MOST_CH_RX; c 683 drivers/staging/most/core.c c->cfg.direction = MOST_CH_TX; c 685 drivers/staging/most/core.c c->cfg.direction = MOST_CH_TX; c 695 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 697 drivers/staging/most/core.c if (!c) c 699 drivers/staging/most/core.c c->cfg.packets_per_xact = val; c 717 drivers/staging/most/core.c struct most_channel *c = get_channel(mdev, mdev_ch); c 720 drivers/staging/most/core.c if (!c || !comp) c 723 drivers/staging/most/core.c return link_channel_to_component(c, comp, link_name, comp_param); c 739 drivers/staging/most/core.c struct most_channel *c; c 755 drivers/staging/most/core.c c = get_channel(mdev, mdev_ch); c 756 drivers/staging/most/core.c if (!c) c 759 drivers/staging/most/core.c if (comp->disconnect_channel(c->iface, c->channel_id)) c 761 drivers/staging/most/core.c if (c->pipe0.comp == comp) c 762 drivers/staging/most/core.c c->pipe0.comp = NULL; c 763 drivers/staging/most/core.c if (c->pipe1.comp == comp) c 764 drivers/staging/most/core.c c->pipe1.comp = NULL; c 770 drivers/staging/most/core.c struct most_channel *c; c 776 drivers/staging/most/core.c c = get_channel(mdev, mdev_ch); c 777 drivers/staging/most/core.c if (!c) c 780 drivers/staging/most/core.c if (comp->disconnect_channel(c->iface, c->channel_id)) c 782 drivers/staging/most/core.c if (c->pipe0.comp == comp) c 783 drivers/staging/most/core.c c->pipe0.comp = NULL; c 784 drivers/staging/most/core.c if (c->pipe1.comp == comp) c 785 drivers/staging/most/core.c c->pipe1.comp = NULL; c 822 drivers/staging/most/core.c struct most_channel *c = mbo->context; c 824 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 825 drivers/staging/most/core.c list_add(&mbo->list, &c->trash_fifo); c 826 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 829 drivers/staging/most/core.c static bool hdm_mbo_ready(struct most_channel *c) c 833 drivers/staging/most/core.c if (c->enqueue_halt) c 836 drivers/staging/most/core.c spin_lock_irq(&c->fifo_lock); c 837 drivers/staging/most/core.c empty = list_empty(&c->halt_fifo); c 838 drivers/staging/most/core.c spin_unlock_irq(&c->fifo_lock); c 846 drivers/staging/most/core.c struct most_channel *c = mbo->context; c 848 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 849 drivers/staging/most/core.c list_add_tail(&mbo->list, &c->halt_fifo); c 850 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 851 drivers/staging/most/core.c wake_up_interruptible(&c->hdm_fifo_wq); c 856 drivers/staging/most/core.c struct most_channel *c = data; c 859 drivers/staging/most/core.c typeof(c->iface->enqueue) enqueue = c->iface->enqueue; c 862 drivers/staging/most/core.c wait_event_interruptible(c->hdm_fifo_wq, c 863 drivers/staging/most/core.c hdm_mbo_ready(c) || c 866 drivers/staging/most/core.c mutex_lock(&c->nq_mutex); c 867 drivers/staging/most/core.c spin_lock_irq(&c->fifo_lock); c 868 drivers/staging/most/core.c if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) { c 869 drivers/staging/most/core.c spin_unlock_irq(&c->fifo_lock); c 870 drivers/staging/most/core.c mutex_unlock(&c->nq_mutex); c 874 drivers/staging/most/core.c mbo = list_pop_mbo(&c->halt_fifo); c 875 drivers/staging/most/core.c spin_unlock_irq(&c->fifo_lock); c 877 drivers/staging/most/core.c if (c->cfg.direction == MOST_CH_RX) c 878 drivers/staging/most/core.c mbo->buffer_length = c->cfg.buffer_size; c 881 drivers/staging/most/core.c mutex_unlock(&c->nq_mutex); c 886 drivers/staging/most/core.c c->hdm_enqueue_task = NULL; c 894 drivers/staging/most/core.c static int run_enqueue_thread(struct most_channel *c, int channel_id) c 897 drivers/staging/most/core.c kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d", c 903 drivers/staging/most/core.c c->hdm_enqueue_task = task; c 921 drivers/staging/most/core.c struct most_channel *c; c 923 drivers/staging/most/core.c c = mbo->context; c 925 drivers/staging/most/core.c if (c->is_poisoned) { c 930 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 932 drivers/staging/most/core.c list_add_tail(&mbo->list, &c->fifo); c 933 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 935 drivers/staging/most/core.c if (c->pipe0.refs && c->pipe0.comp->tx_completion) c 936 drivers/staging/most/core.c c->pipe0.comp->tx_completion(c->iface, c->channel_id); c 938 drivers/staging/most/core.c if (c->pipe1.refs && c->pipe1.comp->tx_completion) c 939 drivers/staging/most/core.c c->pipe1.comp->tx_completion(c->iface, c->channel_id); c 955 drivers/staging/most/core.c static int arm_mbo_chain(struct most_channel *c, int dir, c 961 drivers/staging/most/core.c u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; c 963 drivers/staging/most/core.c atomic_set(&c->mbo_nq_level, 0); c 965 drivers/staging/most/core.c for (i = 0; i < c->cfg.num_buffers; i++) { c 970 drivers/staging/most/core.c mbo->context = c; c 971 drivers/staging/most/core.c mbo->ifp = c->iface; c 972 drivers/staging/most/core.c mbo->hdm_channel_id = c->channel_id; c 973 drivers/staging/most/core.c if (c->iface->dma_alloc) { c 975 drivers/staging/most/core.c c->iface->dma_alloc(mbo, coherent_buf_size); c 987 drivers/staging/most/core.c atomic_inc(&c->mbo_nq_level); c 989 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 990 drivers/staging/most/core.c list_add_tail(&mbo->list, &c->fifo); c 991 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 994 drivers/staging/most/core.c return c->cfg.num_buffers; c 1000 drivers/staging/most/core.c flush_channel_fifos(c); c 1027 drivers/staging/most/core.c struct most_channel *c; c 1029 drivers/staging/most/core.c c = mbo->context; c 1032 drivers/staging/most/core.c if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) c 1041 drivers/staging/most/core.c struct most_channel *c = iface->p->channel[id]; c 1045 drivers/staging/most/core.c if (unlikely(!c)) c 1048 drivers/staging/most/core.c if (c->pipe0.refs && c->pipe1.refs && c 1049 drivers/staging/most/core.c ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || c 1050 drivers/staging/most/core.c (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) c 1053 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 1054 drivers/staging/most/core.c empty = list_empty(&c->fifo); c 1055 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 1073 drivers/staging/most/core.c struct most_channel *c; c 1077 drivers/staging/most/core.c c = iface->p->channel[id]; c 1078 drivers/staging/most/core.c if (unlikely(!c)) c 1081 drivers/staging/most/core.c if (c->pipe0.refs && c->pipe1.refs && c 1082 drivers/staging/most/core.c ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || c 1083 drivers/staging/most/core.c (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) c 1086 drivers/staging/most/core.c if (comp == c->pipe0.comp) c 1087 drivers/staging/most/core.c num_buffers_ptr = &c->pipe0.num_buffers; c 1088 drivers/staging/most/core.c else if (comp == c->pipe1.comp) c 1089 drivers/staging/most/core.c num_buffers_ptr = &c->pipe1.num_buffers; c 1093 drivers/staging/most/core.c spin_lock_irqsave(&c->fifo_lock, flags); c 1094 drivers/staging/most/core.c if (list_empty(&c->fifo)) { c 1095 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 1098 drivers/staging/most/core.c mbo = list_pop_mbo(&c->fifo); c 1100 drivers/staging/most/core.c spin_unlock_irqrestore(&c->fifo_lock, flags); c 1103 drivers/staging/most/core.c mbo->buffer_length = c->cfg.buffer_size; c 1114 drivers/staging/most/core.c struct most_channel *c = mbo->context; c 1116 drivers/staging/most/core.c if (c->cfg.direction == MOST_CH_TX) { c 1121 drivers/staging/most/core.c atomic_inc(&c->mbo_nq_level); c 1137 drivers/staging/most/core.c struct most_channel *c = mbo->context; c 1139 drivers/staging/most/core.c if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) { c 1146 drivers/staging/most/core.c atomic_inc(&c->mbo_nq_level); c 1150 drivers/staging/most/core.c if (atomic_sub_and_test(1, &c->mbo_nq_level)) c 1151 drivers/staging/most/core.c c->is_starving = 1; c 1153 drivers/staging/most/core.c if (c->pipe0.refs && c->pipe0.comp->rx_completion && c 1154 drivers/staging/most/core.c c->pipe0.comp->rx_completion(mbo) == 0) c 1157 drivers/staging/most/core.c if (c->pipe1.refs && c->pipe1.comp->rx_completion && c 1158 drivers/staging/most/core.c c->pipe1.comp->rx_completion(mbo) == 0) c 1180 drivers/staging/most/core.c struct most_channel *c = iface->p->channel[id]; c 1182 drivers/staging/most/core.c if (unlikely(!c)) c 1185 drivers/staging/most/core.c mutex_lock(&c->start_mutex); c 1186 drivers/staging/most/core.c if (c->pipe0.refs + c->pipe1.refs > 0) c 1191 drivers/staging/most/core.c mutex_unlock(&c->start_mutex); c 1195 drivers/staging/most/core.c c->cfg.extra_len = 0; c 1196 drivers/staging/most/core.c if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) { c 1202 drivers/staging/most/core.c init_waitqueue_head(&c->hdm_fifo_wq); c 1204 drivers/staging/most/core.c if (c->cfg.direction == MOST_CH_RX) c 1205 drivers/staging/most/core.c num_buffer = arm_mbo_chain(c, c->cfg.direction, c 1208 drivers/staging/most/core.c num_buffer = arm_mbo_chain(c, c->cfg.direction, c 1215 drivers/staging/most/core.c ret = run_enqueue_thread(c, id); c 1219 drivers/staging/most/core.c c->is_starving = 0; c 1220 drivers/staging/most/core.c c->pipe0.num_buffers = c->cfg.num_buffers / 2; c 1221 drivers/staging/most/core.c c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers; c 1222 drivers/staging/most/core.c atomic_set(&c->mbo_ref, num_buffer); c 1225 drivers/staging/most/core.c if (comp == c->pipe0.comp) c 1226 drivers/staging/most/core.c c->pipe0.refs++; c 1227 drivers/staging/most/core.c if (comp == c->pipe1.comp) c 1228 drivers/staging/most/core.c c->pipe1.refs++; c 1229 drivers/staging/most/core.c mutex_unlock(&c->start_mutex); c 1234 drivers/staging/most/core.c mutex_unlock(&c->start_mutex); c 1248 drivers/staging/most/core.c struct most_channel *c; c 1254 drivers/staging/most/core.c c = iface->p->channel[id]; c 1255 drivers/staging/most/core.c if (unlikely(!c)) c 1258 drivers/staging/most/core.c mutex_lock(&c->start_mutex); c 1259 drivers/staging/most/core.c if (c->pipe0.refs + c->pipe1.refs >= 2) c 1262 drivers/staging/most/core.c if (c->hdm_enqueue_task) c 1263 drivers/staging/most/core.c kthread_stop(c->hdm_enqueue_task); c 1264 drivers/staging/most/core.c c->hdm_enqueue_task = NULL; c 1269 drivers/staging/most/core.c c->is_poisoned = true; c 1270 drivers/staging/most/core.c if (c->iface->poison_channel(c->iface, c->channel_id)) { c 1271 drivers/staging/most/core.c pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id, c 1272 drivers/staging/most/core.c c->iface->description); c 1273 drivers/staging/most/core.c mutex_unlock(&c->start_mutex); c 1276 drivers/staging/most/core.c flush_trash_fifo(c); c 1277 drivers/staging/most/core.c flush_channel_fifos(c); c 1280 drivers/staging/most/core.c if (wait_for_completion_interruptible(&c->cleanup)) { c 1281 drivers/staging/most/core.c pr_info("Interrupted while clean up ch %d\n", c->channel_id); c 1282 drivers/staging/most/core.c mutex_unlock(&c->start_mutex); c 1286 drivers/staging/most/core.c wait_for_completion(&c->cleanup); c 1288 drivers/staging/most/core.c c->is_poisoned = false; c 1291 drivers/staging/most/core.c if (comp == c->pipe0.comp) c 1292 drivers/staging/most/core.c c->pipe0.refs--; c 1293 drivers/staging/most/core.c if (comp == c->pipe1.comp) c 1294 drivers/staging/most/core.c c->pipe1.refs--; c 1295 drivers/staging/most/core.c mutex_unlock(&c->start_mutex); c 1319 drivers/staging/most/core.c struct most_channel *c, *tmp; c 1323 drivers/staging/most/core.c list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { c 1324 drivers/staging/most/core.c if (c->pipe0.comp == comp || c->pipe1.comp == comp) c 1325 drivers/staging/most/core.c comp->disconnect_channel(c->iface, c->channel_id); c 1326 drivers/staging/most/core.c if (c->pipe0.comp == comp) c 1327 drivers/staging/most/core.c c->pipe0.comp = NULL; c 1328 drivers/staging/most/core.c if (c->pipe1.comp == comp) c 1329 drivers/staging/most/core.c c->pipe1.comp = NULL; c 1373 drivers/staging/most/core.c struct most_channel *c; c 1411 drivers/staging/most/core.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 1412 drivers/staging/most/core.c if (!c) c 1415 drivers/staging/most/core.c snprintf(c->name, STRING_SIZE, "ch%d", i); c 1417 drivers/staging/most/core.c snprintf(c->name, STRING_SIZE, "%s", name_suffix); c 1418 drivers/staging/most/core.c c->dev.init_name = c->name; c 1419 drivers/staging/most/core.c c->dev.parent = &iface->dev; c 1420 drivers/staging/most/core.c c->dev.groups = channel_attr_groups; c 1421 drivers/staging/most/core.c c->dev.release = release_channel; c 1422 drivers/staging/most/core.c iface->p->channel[i] = c; c 1423 drivers/staging/most/core.c c->is_starving = 0; c 1424 drivers/staging/most/core.c c->iface = iface; c 1425 drivers/staging/most/core.c c->channel_id = i; c 1426 drivers/staging/most/core.c c->keep_mbo = false; c 1427 drivers/staging/most/core.c c->enqueue_halt = false; c 1428 drivers/staging/most/core.c c->is_poisoned = false; c 1429 drivers/staging/most/core.c c->cfg.direction = 0; c 1430 drivers/staging/most/core.c c->cfg.data_type = 0; c 1431 drivers/staging/most/core.c c->cfg.num_buffers = 0; c 1432 drivers/staging/most/core.c c->cfg.buffer_size = 0; c 1433 drivers/staging/most/core.c c->cfg.subbuffer_size = 0; c 1434 drivers/staging/most/core.c c->cfg.packets_per_xact = 0; c 1435 drivers/staging/most/core.c spin_lock_init(&c->fifo_lock); c 1436 drivers/staging/most/core.c INIT_LIST_HEAD(&c->fifo); c 1437 drivers/staging/most/core.c INIT_LIST_HEAD(&c->trash_fifo); c 1438 drivers/staging/most/core.c INIT_LIST_HEAD(&c->halt_fifo); c 1439 drivers/staging/most/core.c init_completion(&c->cleanup); c 1440 drivers/staging/most/core.c atomic_set(&c->mbo_ref, 0); c 1441 drivers/staging/most/core.c mutex_init(&c->start_mutex); c 1442 drivers/staging/most/core.c mutex_init(&c->nq_mutex); c 1443 drivers/staging/most/core.c list_add_tail(&c->list, &iface->p->channel_list); c 1444 drivers/staging/most/core.c if (device_register(&c->dev)) { c 1455 drivers/staging/most/core.c kfree(c); c 1459 drivers/staging/most/core.c c = iface->p->channel[--i]; c 1460 drivers/staging/most/core.c device_unregister(&c->dev); c 1461 drivers/staging/most/core.c kfree(c); c 1480 drivers/staging/most/core.c struct most_channel *c; c 1485 drivers/staging/most/core.c c = iface->p->channel[i]; c 1486 drivers/staging/most/core.c if (c->pipe0.comp) c 1487 drivers/staging/most/core.c c->pipe0.comp->disconnect_channel(c->iface, c 1488 drivers/staging/most/core.c c->channel_id); c 1489 drivers/staging/most/core.c if (c->pipe1.comp) c 1490 drivers/staging/most/core.c c->pipe1.comp->disconnect_channel(c->iface, c 1491 drivers/staging/most/core.c c->channel_id); c 1492 drivers/staging/most/core.c c->pipe0.comp = NULL; c 1493 drivers/staging/most/core.c c->pipe1.comp = NULL; c 1494 drivers/staging/most/core.c list_del(&c->list); c 1495 drivers/staging/most/core.c device_unregister(&c->dev); c 1496 drivers/staging/most/core.c kfree(c); c 1517 drivers/staging/most/core.c struct most_channel *c = iface->p->channel[id]; c 1519 drivers/staging/most/core.c if (!c) c 1522 drivers/staging/most/core.c mutex_lock(&c->nq_mutex); c 1523 drivers/staging/most/core.c c->enqueue_halt = true; c 1524 drivers/staging/most/core.c mutex_unlock(&c->nq_mutex); c 1538 drivers/staging/most/core.c struct most_channel *c = iface->p->channel[id]; c 1540 drivers/staging/most/core.c if (!c) c 1543 drivers/staging/most/core.c mutex_lock(&c->nq_mutex); c 1544 drivers/staging/most/core.c c->enqueue_halt = false; c 1545 drivers/staging/most/core.c mutex_unlock(&c->nq_mutex); c 1547 drivers/staging/most/core.c wake_up_interruptible(&c->hdm_fifo_wq); c 172 drivers/staging/mt7621-dma/mtk-hsdma.c static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c) c 174 drivers/staging/mt7621-dma/mtk-hsdma.c return container_of(c, struct mtk_hsdma_chan, vchan.chan); c 284 drivers/staging/mt7621-dma/mtk-hsdma.c static int mtk_hsdma_terminate_all(struct dma_chan *c) c 286 drivers/staging/mt7621-dma/mtk-hsdma.c struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); c 444 drivers/staging/mt7621-dma/mtk-hsdma.c static void mtk_hsdma_issue_pending(struct dma_chan *c) c 446 drivers/staging/mt7621-dma/mtk-hsdma.c struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); c 462 drivers/staging/mt7621-dma/mtk-hsdma.c struct dma_chan *c, dma_addr_t dest, dma_addr_t src, c 465 drivers/staging/mt7621-dma/mtk-hsdma.c struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); c 473 drivers/staging/mt7621-dma/mtk-hsdma.c dev_err(c->device->dev, "alloc memcpy decs error\n"); c 484 drivers/staging/mt7621-dma/mtk-hsdma.c static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, c 488 drivers/staging/mt7621-dma/mtk-hsdma.c return dma_cookie_status(c, cookie, state); c 491 drivers/staging/mt7621-dma/mtk-hsdma.c static void mtk_hsdma_free_chan_resources(struct dma_chan *c) c 493 drivers/staging/mt7621-dma/mtk-hsdma.c vchan_free_chan_resources(to_virt_chan(c)); c 198 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c int i, j, c = 0; c 235 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c f[c] = &gpio_func; c 236 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c c++; c 241 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c f[c] = &p->groups[i].func[j]; c 242 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c f[c]->groups = devm_kzalloc(p->dev, sizeof(int), c 244 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c f[c]->groups[0] = i; c 245 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c f[c]->group_count = 1; c 246 drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c c++; c 387 drivers/staging/octeon-usb/octeon-hcd.c union _union c; \ c 389 drivers/staging/octeon-usb/octeon-hcd.c c.u32 = cvmx_usb_read_csr32(usb, address); \ c 390 drivers/staging/octeon-usb/octeon-hcd.c c.s.field = value; \ c 391 drivers/staging/octeon-usb/octeon-hcd.c cvmx_usb_write_csr32(usb, address, c.u32); \ c 580 drivers/staging/octeon-usb/octeon-hcd.c union cvmx_usbcx_grstctl c; c 583 drivers/staging/octeon-usb/octeon-hcd.c c.u32 = cvmx_usb_read_csr32(usb, address); c 584 drivers/staging/octeon-usb/octeon-hcd.c if (fflsh_type == 0 && c.s.txfflsh == 0) { c 587 drivers/staging/octeon-usb/octeon-hcd.c } else if (fflsh_type == 1 && c.s.rxfflsh == 0) { c 621 drivers/staging/qlge/qlge_ethtool.c static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c 625 drivers/staging/qlge/qlge_ethtool.c c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; c 626 drivers/staging/qlge/qlge_ethtool.c c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; c 638 drivers/staging/qlge/qlge_ethtool.c c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; c 639 drivers/staging/qlge/qlge_ethtool.c c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; c 644 drivers/staging/qlge/qlge_ethtool.c static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) c 649 drivers/staging/qlge/qlge_ethtool.c if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) c 652 drivers/staging/qlge/qlge_ethtool.c if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) c 654 drivers/staging/qlge/qlge_ethtool.c if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) c 656 drivers/staging/qlge/qlge_ethtool.c if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) c 660 drivers/staging/qlge/qlge_ethtool.c if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && c 661 drivers/staging/qlge/qlge_ethtool.c qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && c 662 drivers/staging/qlge/qlge_ethtool.c qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && c 663 drivers/staging/qlge/qlge_ethtool.c qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) c 666 drivers/staging/qlge/qlge_ethtool.c qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; c 667 drivers/staging/qlge/qlge_ethtool.c qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; c 668 drivers/staging/qlge/qlge_ethtool.c qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; c 669 drivers/staging/qlge/qlge_ethtool.c qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; c 145 drivers/staging/ralink-gdma/ralink-gdma.c static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c) c 147 drivers/staging/ralink-gdma/ralink-gdma.c return container_of(c, struct gdma_dmaengine_chan, vchan.chan); c 181 drivers/staging/ralink-gdma/ralink-gdma.c static int gdma_dma_config(struct dma_chan *c, c 184 drivers/staging/ralink-gdma/ralink-gdma.c struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); c 220 drivers/staging/ralink-gdma/ralink-gdma.c static int gdma_dma_terminate_all(struct dma_chan *c) c 222 drivers/staging/ralink-gdma/ralink-gdma.c struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); c 494 drivers/staging/ralink-gdma/ralink-gdma.c static void gdma_dma_issue_pending(struct dma_chan *c) c 496 drivers/staging/ralink-gdma/ralink-gdma.c struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); c 514 drivers/staging/ralink-gdma/ralink-gdma.c struct dma_chan *c, struct scatterlist *sgl, c 518 drivers/staging/ralink-gdma/ralink-gdma.c struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); c 525 drivers/staging/ralink-gdma/ralink-gdma.c dev_err(c->device->dev, "alloc sg decs error\n"); c 536 drivers/staging/ralink-gdma/ralink-gdma.c dev_err(c->device->dev, "direction type %d error\n", c 542 drivers/staging/ralink-gdma/ralink-gdma.c dev_err(c->device->dev, "sg len too large %d\n", c 562 drivers/staging/ralink-gdma/ralink-gdma.c struct dma_chan *c, dma_addr_t dest, dma_addr_t src, c 565 drivers/staging/ralink-gdma/ralink-gdma.c struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); c 580 drivers/staging/ralink-gdma/ralink-gdma.c dev_err(c->device->dev, "alloc memcpy decs error\n"); c 605 drivers/staging/ralink-gdma/ralink-gdma.c struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, c 609 drivers/staging/ralink-gdma/ralink-gdma.c struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); c 617 drivers/staging/ralink-gdma/ralink-gdma.c dev_err(c->device->dev, "cyclic len too large %d\n", c 625 drivers/staging/ralink-gdma/ralink-gdma.c dev_err(c->device->dev, "alloc cyclic decs error\n"); c 636 drivers/staging/ralink-gdma/ralink-gdma.c dev_err(c->device->dev, "direction type %d error\n", c 655 drivers/staging/ralink-gdma/ralink-gdma.c static enum dma_status gdma_dma_tx_status(struct dma_chan *c, c 659 drivers/staging/ralink-gdma/ralink-gdma.c struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); c 665 drivers/staging/ralink-gdma/ralink-gdma.c status = dma_cookie_status(c, cookie, state); c 689 drivers/staging/ralink-gdma/ralink-gdma.c dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue); c 694 drivers/staging/ralink-gdma/ralink-gdma.c static void gdma_dma_free_chan_resources(struct dma_chan *c) c 696 drivers/staging/ralink-gdma/ralink-gdma.c vchan_free_chan_resources(to_virt_chan(c)); c 91 drivers/staging/rtl8188eu/core/rtw_security.c u32 c; c 92 drivers/staging/rtl8188eu/core/rtw_security.c u8 *p = (u8 *)&c, *p1; c 98 drivers/staging/rtl8188eu/core/rtw_security.c c = 0x12340000; c 102 drivers/staging/rtl8188eu/core/rtw_security.c for (c = ((u32)k) << 24, j = 8; j > 0; --j) c 103 drivers/staging/rtl8188eu/core/rtw_security.c c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1); c 780 drivers/staging/rtl8188eu/core/rtw_security.c static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c); c 1050 drivers/staging/rtl8188eu/core/rtw_security.c static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c) c 1068 drivers/staging/rtl8188eu/core/rtw_security.c ctr_preload[14] = (unsigned char)(c / 256); /* Ctr */ c 1069 drivers/staging/rtl8188eu/core/rtw_security.c ctr_preload[15] = (unsigned char)(c % 256); c 229 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c u8 c = *extra; c 236 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c if (c > 0) c 237 drivers/staging/rtl8192e/rtl8192e/rtl_wx.c rt_global_debug_component |= (1 << c); c 1456 drivers/staging/rtl8192e/rtllib_softmac.c u8 *c; c 1468 drivers/staging/rtl8192e/rtllib_softmac.c c = skb_put(skb, chlen+2); c 1469 drivers/staging/rtl8192e/rtllib_softmac.c *(c++) = MFIE_TYPE_CHALLENGE; c 1470 drivers/staging/rtl8192e/rtllib_softmac.c *(c++) = chlen; c 1471 drivers/staging/rtl8192e/rtllib_softmac.c memcpy(c, challenge, chlen); c 47 drivers/staging/rtl8192e/rtllib_softmac_wx.c int c = 0; c 49 drivers/staging/rtl8192e/rtllib_softmac_wx.c while ((c < 14) && (f != rtllib_wlan_frequencies[c])) c 50 drivers/staging/rtl8192e/rtllib_softmac_wx.c c++; c 54 drivers/staging/rtl8192e/rtllib_softmac_wx.c fwrq->m = c + 1; c 1223 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c u8 *c; c 1235 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c c = skb_put(skb, chlen + 2); c 1236 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c *(c++) = MFIE_TYPE_CHALLENGE; c 1237 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c *(c++) = chlen; c 1238 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c memcpy(c, challenge, chlen); c 48 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c int c = 0; c 50 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c while ((c < 14) && (f != ieee80211_wlan_frequencies[c])) c 51 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c c++; c 55 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c fwrq->m = c + 1; c 671 drivers/staging/rtl8712/rtl871x_ioctl_linux.c int c = 0; c 673 drivers/staging/rtl8712/rtl871x_ioctl_linux.c while ((c < 14) && (f != frequency_list[c])) c 674 drivers/staging/rtl8712/rtl871x_ioctl_linux.c c++; c 676 drivers/staging/rtl8712/rtl871x_ioctl_linux.c fwrq->m = c + 1; c 115 drivers/staging/rtl8712/rtl871x_security.c u32 c; c 116 drivers/staging/rtl8712/rtl871x_security.c u8 *p = (u8 *)&c, *p1; c 124 drivers/staging/rtl8712/rtl871x_security.c for (c = ((u32)k) << 24, j = 8; j > 0; --j) c 125 drivers/staging/rtl8712/rtl871x_security.c c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1); c 982 drivers/staging/rtl8712/rtl871x_security.c u8 *mpdu, u8 *pn_vector, sint c) c 998 drivers/staging/rtl8712/rtl871x_security.c ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */ c 999 drivers/staging/rtl8712/rtl871x_security.c ctr_preload[15] = (unsigned char) (c % 256); c 1297 drivers/staging/rtl8723bs/core/rtw_ieee80211.c u8 c; c 1308 drivers/staging/rtl8723bs/core/rtw_ieee80211.c c = frame_body[0]; c 1310 drivers/staging/rtl8723bs/core/rtw_ieee80211.c switch (c) { c 1318 drivers/staging/rtl8723bs/core/rtw_ieee80211.c *category = c; c 171 drivers/staging/rtl8723bs/core/rtw_security.c u32 c; c 172 drivers/staging/rtl8723bs/core/rtw_security.c u8 *p = (u8 *)&c, *p1; c 175 drivers/staging/rtl8723bs/core/rtw_security.c c = 0x12340000; c 179 drivers/staging/rtl8723bs/core/rtw_security.c for (c = ((u32)k) << 24, j = 8; j > 0; --j) { c 180 drivers/staging/rtl8723bs/core/rtw_security.c c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY_BE : (c << 1); c 944 drivers/staging/rtl8723bs/core/rtw_security.c sint c, c 1281 drivers/staging/rtl8723bs/core/rtw_security.c sint c, c 1310 drivers/staging/rtl8723bs/core/rtw_security.c ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */ c 1311 drivers/staging/rtl8723bs/core/rtw_security.c ctr_preload[15] = (unsigned char) (c % 256); c 86 drivers/staging/rtl8723bs/hal/HalPhyRf.c TXPWRTRACK_CFG c; c 97 drivers/staging/rtl8723bs/hal/HalPhyRf.c ConfigureTxpowerTrack(pDM_Odm, &c); c 99 drivers/staging/rtl8723bs/hal/HalPhyRf.c (*c.GetDeltaSwingTable)( c 123 drivers/staging/rtl8723bs/hal/HalPhyRf.c ThermalValue = (u8)PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, c.ThermalRegAddr, 0xfc00); /* 0x42: RF Reg[15:10] 88E */ c 144 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index == c.AverageThermalNum) /* Average times = c.AverageThermalNum */ c 147 drivers/staging/rtl8723bs/hal/HalPhyRf.c for (i = 0; i < c.AverageThermalNum; i++) { c 198 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (delta_LCK >= c.Threshold_IQK) { c 206 drivers/staging/rtl8723bs/hal/HalPhyRf.c c.Threshold_IQK c 210 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (c.PHY_LCCalibrate) c 211 drivers/staging/rtl8723bs/hal/HalPhyRf.c (*c.PHY_LCCalibrate)(pDM_Odm); c 256 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (c.RfPathCount > 1) { c 317 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (c.RfPathCount > 1) { c 350 drivers/staging/rtl8723bs/hal/HalPhyRf.c for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) { c 423 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (pDM_Odm->RFCalibrateInfo.OFDM_index[p] > c.SwingTableSize_OFDM-1) c 424 drivers/staging/rtl8723bs/hal/HalPhyRf.c pDM_Odm->RFCalibrateInfo.OFDM_index[p] = c.SwingTableSize_OFDM-1; c 434 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (pDM_Odm->RFCalibrateInfo.CCK_index > c.SwingTableSize_CCK-1) c 435 drivers/staging/rtl8723bs/hal/HalPhyRf.c pDM_Odm->RFCalibrateInfo.CCK_index = c.SwingTableSize_CCK-1; c 451 drivers/staging/rtl8723bs/hal/HalPhyRf.c for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) c 466 drivers/staging/rtl8723bs/hal/HalPhyRf.c for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) { c 508 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (c.RfPathCount > 1) c 538 drivers/staging/rtl8723bs/hal/HalPhyRf.c if (c.RfPathCount > 1) c 573 drivers/staging/rtl8723bs/hal/HalPhyRf.c for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) c 574 drivers/staging/rtl8723bs/hal/HalPhyRf.c (*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, 0); c 593 drivers/staging/rtl8723bs/hal/HalPhyRf.c for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) c 594 drivers/staging/rtl8723bs/hal/HalPhyRf.c (*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, Indexforchannel); c 599 drivers/staging/rtl8723bs/hal/HalPhyRf.c for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) c 1523 drivers/staging/rtl8723bs/hal/hal_com.c char c = In[(*Start)++]; c 1525 drivers/staging/rtl8723bs/hal/hal_com.c if (c != LeftQualifier) c 1529 drivers/staging/rtl8723bs/hal/hal_com.c while ((c = In[(*Start)++]) != RightQualifier) c 466 drivers/staging/speakup/main.c u16 c; c 470 drivers/staging/speakup/main.c c = w & 0xff; c 474 drivers/staging/speakup/main.c c |= 0x100; c 477 drivers/staging/speakup/main.c ch = inverse_translate(vc, c, 1); c 2299 drivers/staging/speakup/main.c if (param->c == '\b') { c 2302 drivers/staging/speakup/main.c u16 d = param->c; c 115 drivers/staging/speakup/serialio.c int c; c 119 drivers/staging/speakup/serialio.c c = inb_p(speakup_info.port_tts + UART_RX); c 120 drivers/staging/speakup/serialio.c synth->read_buff_add((u_char)c); c 34 drivers/staging/speakup/speakup_dectlk.c static void read_buff_add(u_char c); c 166 drivers/staging/speakup/speakup_dectlk.c static void read_buff_add(u_char c) c 170 drivers/staging/speakup/speakup_dectlk.c if (c == 0x01) { c 177 drivers/staging/speakup/speakup_dectlk.c } else if (c == 0x13) { c 179 drivers/staging/speakup/speakup_dectlk.c } else if (c == 0x11) { c 181 drivers/staging/speakup/speakup_dectlk.c } else if (is_indnum(&c)) { c 183 drivers/staging/speakup/speakup_dectlk.c ind = c; c 185 drivers/staging/speakup/speakup_dectlk.c ind = ind * 10 + c; c 186 drivers/staging/speakup/speakup_dectlk.c } else if ((c > 31) && (c < 127)) { c 258 drivers/staging/speakup/speakup_soft.c u_char c = ch; c 260 drivers/staging/speakup/speakup_soft.c if (copy_to_user(cp, &c, 1)) c 301 drivers/staging/unisys/visornic/visornic_main.c int c; c 305 drivers/staging/unisys/visornic/visornic_main.c c = visor_copy_fragsinfo_from_skb(skbinlist, c 310 drivers/staging/unisys/visornic/visornic_main.c if (c < 0) c 311 drivers/staging/unisys/visornic/visornic_main.c return c; c 312 drivers/staging/unisys/visornic/visornic_main.c count += c; c 18 drivers/staging/uwb/allocator.c unsigned char c; c 30 drivers/staging/uwb/allocator.c c = UWB_RSV_MAS_SAFE; c 33 drivers/staging/uwb/allocator.c c = UWB_RSV_MAS_UNSAFE; c 37 drivers/staging/uwb/allocator.c bm[col * UWB_MAS_PER_ZONE + mas] = c; c 48 drivers/staging/uwb/allocator.c unsigned char c; c 51 drivers/staging/uwb/allocator.c c = UWB_RSV_MAS_SAFE; c 58 drivers/staging/uwb/allocator.c c = UWB_RSV_MAS_UNSAFE; c 63 drivers/staging/uwb/allocator.c bm[col * UWB_NUM_ZONES + mas] = c; c 64 drivers/staging/uwb/allocator.c if(c == UWB_RSV_MAS_SAFE) c 571 drivers/staging/uwb/drp.c struct uwb_cnflt_alien *c; c 583 drivers/staging/uwb/drp.c list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { c 585 drivers/staging/uwb/drp.c c->mas.bm, UWB_NUM_MAS); c 23 drivers/staging/uwb/include/whci.h # define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) c 25 drivers/staging/uwb/include/whci.h # define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) c 26 drivers/staging/uwb/include/whci.h # define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) c 27 drivers/staging/uwb/include/whci.h # define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) c 28 drivers/staging/uwb/include/whci.h # define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) c 29 drivers/staging/uwb/include/whci.h # define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) c 1164 drivers/staging/vc04_services/bcm2835-camera/controls.c int c; c 1167 drivers/staging/vc04_services/bcm2835-camera/controls.c for (c = 0; c < V4L2_CTRL_COUNT; c++) { c 1168 drivers/staging/vc04_services/bcm2835-camera/controls.c if ((dev->ctrls[c]) && (v4l2_ctrls[c].setter)) { c 1169 drivers/staging/vc04_services/bcm2835-camera/controls.c ret = v4l2_ctrls[c].setter(dev, dev->ctrls[c], c 1170 drivers/staging/vc04_services/bcm2835-camera/controls.c &v4l2_ctrls[c]); c 1171 drivers/staging/vc04_services/bcm2835-camera/controls.c if (!v4l2_ctrls[c].ignore_errors && ret) { c 1174 drivers/staging/vc04_services/bcm2835-camera/controls.c c); c 1233 drivers/staging/vc04_services/bcm2835-camera/controls.c int c; c 1238 drivers/staging/vc04_services/bcm2835-camera/controls.c for (c = 0; c < V4L2_CTRL_COUNT; c++) { c 1239 drivers/staging/vc04_services/bcm2835-camera/controls.c ctrl = &v4l2_ctrls[c]; c 1243 drivers/staging/vc04_services/bcm2835-camera/controls.c dev->ctrls[c] = c 1272 drivers/staging/vc04_services/bcm2835-camera/controls.c dev->ctrls[c] = c 1281 drivers/staging/vc04_services/bcm2835-camera/controls.c dev->ctrls[c] = c 1296 drivers/staging/vc04_services/bcm2835-camera/controls.c dev->ctrls[c]->priv = (void *)ctrl; c 1300 drivers/staging/vc04_services/bcm2835-camera/controls.c pr_err("error adding control %d/%d id 0x%x\n", c, c 1305 drivers/staging/vc04_services/bcm2835-camera/controls.c for (c = 0; c < V4L2_CTRL_COUNT; c++) { c 1306 drivers/staging/vc04_services/bcm2835-camera/controls.c ctrl = &v4l2_ctrls[c]; c 1311 drivers/staging/vc04_services/bcm2835-camera/controls.c &dev->ctrls[c + 1], c 19 drivers/staging/vc04_services/bcm2835-camera/mmal-common.h #define MMAL_FOURCC(a, b, c, d) ((a) | (b << 8) | (c << 16) | (d << 24)) c 60 drivers/staging/wlan-ng/hfa384x.h #define HFA384x_FIRMWARE_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) c 407 drivers/staging/wlan-ng/prism2fw.c int c; c 419 drivers/staging/wlan-ng/prism2fw.c for (c = 0; c < nfchunks; c++) { c 420 drivers/staging/wlan-ng/prism2fw.c cstart = fchunk[c].addr; c 421 drivers/staging/wlan-ng/prism2fw.c cend = fchunk[c].addr + fchunk[c].len; c 433 drivers/staging/wlan-ng/prism2fw.c if (c >= nfchunks) { c 442 drivers/staging/wlan-ng/prism2fw.c dest = fchunk[c].data + chunkoff; c 680 drivers/staging/wlan-ng/prism2fw.c int c; /* chunk index */ c 728 drivers/staging/wlan-ng/prism2fw.c for (c = 0; c < nfchunks; c++) { c 729 drivers/staging/wlan-ng/prism2fw.c cstart = fchunk[c].addr; c 730 drivers/staging/wlan-ng/prism2fw.c cend = fchunk[c].addr + fchunk[c].len; c 734 drivers/staging/wlan-ng/prism2fw.c if (c >= nfchunks) { c 743 drivers/staging/wlan-ng/prism2fw.c dest = fchunk[c].data + chunkoff; c 746 drivers/staging/wlan-ng/prism2fw.c c, chunkoff); c 155 drivers/staging/wusbcore/host/whci/whci-hc.h #define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */ c 1023 drivers/target/iscsi/iscsi_target_nego.c char *c; c 1027 drivers/target/iscsi/iscsi_target_nego.c c = ¶m_buf[i]; c 1028 drivers/target/iscsi/iscsi_target_nego.c if (!isupper(*c)) c 1031 drivers/target/iscsi/iscsi_target_nego.c *c = tolower(*c); c 1265 drivers/target/iscsi/iscsi_target_util.c struct iscsi_data_count c; c 1270 drivers/target/iscsi/iscsi_target_util.c memset(&c, 0, sizeof(struct iscsi_data_count)); c 1271 drivers/target/iscsi/iscsi_target_util.c c.iov = iov; c 1272 drivers/target/iscsi/iscsi_target_util.c c.iov_count = iov_count; c 1273 drivers/target/iscsi/iscsi_target_util.c c.data_length = data; c 1274 drivers/target/iscsi/iscsi_target_util.c c.type = ISCSI_RX_DATA; c 1276 drivers/target/iscsi/iscsi_target_util.c return iscsit_do_rx_data(conn, &c); c 1915 drivers/target/sbp/sbp_target.c char c, nibble; c 1920 drivers/target/sbp/sbp_target.c c = *cp; c 1921 drivers/target/sbp/sbp_target.c if (c == '\n' && cp[1] == '\0') c 1923 drivers/target/sbp/sbp_target.c if (c == '\0') { c 1930 drivers/target/sbp/sbp_target.c if (isdigit(c)) c 1931 drivers/target/sbp/sbp_target.c nibble = c - '0'; c 1932 drivers/target/sbp/sbp_target.c else if (isxdigit(c)) c 1933 drivers/target/sbp/sbp_target.c nibble = tolower(c) - 'a' + 10; c 2790 drivers/target/target_core_configfs.c struct config_item *item, const char *p, size_t c) \ c 2817 drivers/target/target_core_configfs.c return c; \ c 52 drivers/target/tcm_fc/tfc_conf.c char c; c 60 drivers/target/tcm_fc/tfc_conf.c c = *cp; c 61 drivers/target/tcm_fc/tfc_conf.c if (c == '\n' && cp[1] == '\0') c 65 drivers/target/tcm_fc/tfc_conf.c if (c == ':') c 70 drivers/target/tcm_fc/tfc_conf.c if (c == '\0') { c 77 drivers/target/tcm_fc/tfc_conf.c val = hex_to_bin(c); c 78 drivers/target/tcm_fc/tfc_conf.c if (val < 0 || (strict && isupper(c))) c 19 drivers/tee/optee/call.c struct completion c; c 41 drivers/tee/optee/call.c init_completion(&w->c); c 50 drivers/tee/optee/call.c wait_for_completion(&w->c); c 56 drivers/tee/optee/call.c reinit_completion(&w->c); c 67 drivers/tee/optee/call.c if (!completion_done(&w->c)) { c 68 drivers/tee/optee/call.c complete(&w->c); c 96 drivers/tee/optee/call.c if (completion_done(&w->c)) c 237 drivers/tee/optee/call.c msg_arg->params[1].u.value.c = arg->clnt_login; c 61 drivers/tee/optee/core.c p->u.value.c = mp->u.value.c; c 187 drivers/tee/optee/core.c mp->u.value.c = p->u.value.c; c 140 drivers/tee/optee/optee_msg.h u64 c; c 17 drivers/tee/optee/rpc.c struct completion c; c 64 drivers/tee/optee/rpc.c init_completion(&w->c); c 78 drivers/tee/optee/rpc.c wait_for_completion(&w->c); c 91 drivers/tee/optee/rpc.c complete(&w->c); c 180 drivers/tee/optee/rpc.c param.u.value.c = 0; c 188 drivers/tee/optee/rpc.c shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); c 296 drivers/tee/optee/rpc.c param.u.value.c = 0; c 19 drivers/tee/optee/supp.c struct completion c; c 50 drivers/tee/optee/supp.c complete(&req->c); c 58 drivers/tee/optee/supp.c complete(&req->c); c 97 drivers/tee/optee/supp.c init_completion(&req->c); c 116 drivers/tee/optee/supp.c while (wait_for_completion_interruptible(&req->c)) { c 276 drivers/tee/optee/supp.c param->u.value.c = 0; c 366 drivers/tee/optee/supp.c p->u.value.c = param[n + num_meta].u.value.c; c 379 drivers/tee/optee/supp.c complete(&req->c); c 229 drivers/tee/tee_core.c params[n].u.value.c = ip.c; c 242 drivers/tee/tee_core.c shm = tee_shm_get_from_id(ctx, ip.c); c 283 drivers/tee/tee_core.c put_user(p->u.value.c, &up->c)) c 476 drivers/tee/tee_core.c ip.c = p->u.value.c; c 484 drivers/tee/tee_core.c ip.c = (u64)-1; /* invalid shm id */ c 488 drivers/tee/tee_core.c ip.c = p->u.memref.shm->id; c 493 drivers/tee/tee_core.c ip.c = 0; c 578 drivers/tee/tee_core.c p->u.value.c = ip.c; c 24 drivers/thermal/armada_thermal.c #define TO_MCELSIUS(c) ((c) * 1000) c 466 drivers/thermal/intel/x86_pkg_temp_thermal.c struct cpuinfo_x86 *c = &cpu_data(cpu); c 469 drivers/thermal/intel/x86_pkg_temp_thermal.c if (!cpu_has(c, X86_FEATURE_DTHERM) || !cpu_has(c, X86_FEATURE_PTS)) c 142 drivers/thermal/rcar_thermal.c #define rcar_thermal_common_read(c, r) \ c 143 drivers/thermal/rcar_thermal.c _rcar_thermal_common_read(c, COMMON_ ##r) c 150 drivers/thermal/rcar_thermal.c #define rcar_thermal_common_write(c, r, d) \ c 151 drivers/thermal/rcar_thermal.c _rcar_thermal_common_write(c, COMMON_ ##r, d) c 158 drivers/thermal/rcar_thermal.c #define rcar_thermal_common_bset(c, r, m, d) \ c 159 drivers/thermal/rcar_thermal.c _rcar_thermal_common_bset(c, COMMON_ ##r, m, d) c 55 drivers/thermal/ti-soc-thermal/ti-thermal-common.c static inline int ti_thermal_hotspot_temperature(int t, int s, int c) c 57 drivers/thermal/ti-soc-thermal/ti-thermal-common.c int delta = t * s / 1000 + c; c 832 drivers/tty/amiserial.c int c, ret = 0; c 844 drivers/tty/amiserial.c c = CIRC_SPACE_TO_END(info->xmit.head, c 847 drivers/tty/amiserial.c if (count < c) c 848 drivers/tty/amiserial.c c = count; c 849 drivers/tty/amiserial.c if (c <= 0) { c 852 drivers/tty/amiserial.c memcpy(info->xmit.buf + info->xmit.head, buf, c); c 853 drivers/tty/amiserial.c info->xmit.head = ((info->xmit.head + c) & c 855 drivers/tty/amiserial.c buf += c; c 856 drivers/tty/amiserial.c count -= c; c 857 drivers/tty/amiserial.c ret += c; c 1747 drivers/tty/amiserial.c static void amiga_serial_putc(char c) c 1749 drivers/tty/amiserial.c custom.serdat = (unsigned char)c | 0x100; c 1776 drivers/tty/amiserial.c static struct tty_driver *serial_console_device(struct console *c, int *index) c 1768 drivers/tty/cyclades.c int c, ret = 0; c 1782 drivers/tty/cyclades.c c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1)); c 1783 drivers/tty/cyclades.c c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head)); c 1785 drivers/tty/cyclades.c if (c <= 0) c 1788 drivers/tty/cyclades.c memcpy(info->port.xmit_buf + info->xmit_head, buf, c); c 1789 drivers/tty/cyclades.c info->xmit_head = (info->xmit_head + c) & c 1791 drivers/tty/cyclades.c info->xmit_cnt += c; c 1792 drivers/tty/cyclades.c buf += c; c 1793 drivers/tty/cyclades.c count -= c; c 1794 drivers/tty/cyclades.c ret += c; c 3403 drivers/tty/cyclades.c const struct zfile_config *c, *cs; c 3433 drivers/tty/cyclades.c for (c = cs; c < cs + h->n_config; c++) { c 3434 drivers/tty/cyclades.c for (a = 0; a < c->n_blocks; a++) c 3435 drivers/tty/cyclades.c if (c->block_list[a] > h->n_blocks) { c 3439 drivers/tty/cyclades.c if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */ c 3454 drivers/tty/cyclades.c for (c = cs; c < cs + h->n_config; c++) c 3455 drivers/tty/cyclades.c if (c->mailbox == mailbox && c->function == 0) c 3458 drivers/tty/cyclades.c for (a = 0; a < c->n_blocks; a++) { c 3459 drivers/tty/cyclades.c b = &bs[c->block_list[a]]; c 144 drivers/tty/ehv_bytechan.c unsigned int c = *count; c 146 drivers/tty/ehv_bytechan.c if (c < sizeof(buffer)) { c 147 drivers/tty/ehv_bytechan.c memcpy(buffer, p, c); c 148 drivers/tty/ehv_bytechan.c memset(&buffer[c], 0, sizeof(buffer) - c); c 181 drivers/tty/ehv_bytechan.c static void ehv_bc_udbg_putc(char c) c 183 drivers/tty/ehv_bytechan.c if (c == '\n') c 186 drivers/tty/ehv_bytechan.c byte_channel_spin_send(c); c 263 drivers/tty/ehv_bytechan.c char c; c 266 drivers/tty/ehv_bytechan.c c = *s++; c 268 drivers/tty/ehv_bytechan.c if (c == '\n') c 271 drivers/tty/ehv_bytechan.c s2[j++] = c; c 214 drivers/tty/goldfish.c static struct tty_driver *goldfish_tty_console_device(struct console *c, c 217 drivers/tty/goldfish.c *index = c->index; c 154 drivers/tty/hvc/hvc_console.c char c[N_OUTBUF] __ALIGNED__; c 167 drivers/tty/hvc/hvc_console.c if (count > 0 && i < sizeof(c)) { c 169 drivers/tty/hvc/hvc_console.c c[i++] = '\r'; c 172 drivers/tty/hvc/hvc_console.c c[i++] = b[n++]; c 177 drivers/tty/hvc/hvc_console.c r = cons_ops[index]->put_chars(vtermnos[index], c, i); c 190 drivers/tty/hvc/hvc_console.c memmove(c, c+r, i); c 197 drivers/tty/hvc/hvc_console.c static struct tty_driver *hvc_console_device(struct console *c, int *index) c 199 drivers/tty/hvc/hvc_console.c if (vtermnos[c->index] == -1) c 202 drivers/tty/hvc/hvc_console.c *index = c->index; c 269 drivers/tty/hvc/hvc_opal.c static void udbg_opal_putc(char c) c 274 drivers/tty/hvc/hvc_opal.c if (c == '\n') c 280 drivers/tty/hvc/hvc_opal.c count = opal_put_chars(termno, &c, 1); c 283 drivers/tty/hvc/hvc_opal.c count = hvc_opal_hvsi_put_chars(termno, &c, 1); c 298 drivers/tty/hvc/hvc_opal.c char c; c 302 drivers/tty/hvc/hvc_opal.c rc = opal_get_chars(termno, &c, 1); c 305 drivers/tty/hvc/hvc_opal.c rc = hvc_opal_hvsi_get_chars(termno, &c, 1); c 310 drivers/tty/hvc/hvc_opal.c return c; c 30 drivers/tty/hvc/hvc_riscv_sbi.c int i, c; c 33 drivers/tty/hvc/hvc_riscv_sbi.c c = sbi_console_getchar(); c 34 drivers/tty/hvc/hvc_riscv_sbi.c if (c < 0) c 36 drivers/tty/hvc/hvc_riscv_sbi.c buf[i] = c; c 49 drivers/tty/hvc/hvc_rtas.c int i, c; c 52 drivers/tty/hvc/hvc_rtas.c if (rtas_call(rtascons_get_char_token, 0, 2, &c)) c 55 drivers/tty/hvc/hvc_rtas.c buf[i] = c; c 34 drivers/tty/hvc/hvc_udbg.c int i, c; c 40 drivers/tty/hvc/hvc_udbg.c if ((c = udbg_getc_poll()) == -1) c 42 drivers/tty/hvc/hvc_udbg.c buf[i] = c; c 227 drivers/tty/hvc/hvc_vio.c static void udbg_hvc_putc(char c) c 235 drivers/tty/hvc/hvc_vio.c if (c == '\n') c 245 drivers/tty/hvc/hvc_vio.c bounce_buffer[0] = c; c 249 drivers/tty/hvc/hvc_vio.c count = hvterm_hvsi_put_chars(0, &c, 1); c 258 drivers/tty/hvc/hvc_vio.c char c; c 265 drivers/tty/hvc/hvc_vio.c rc = hvterm_raw_get_chars(0, &c, 1); c 268 drivers/tty/hvc/hvc_vio.c rc = hvterm_hvsi_get_chars(0, &c, 1); c 273 drivers/tty/hvc/hvc_vio.c return c; c 326 drivers/tty/hvc/hvsi.c char c = buf[i]; c 328 drivers/tty/hvc/hvsi.c if (c == '\0') { c 332 drivers/tty/hvc/hvsi.c handle_sysrq(c); c 337 drivers/tty/hvc/hvsi.c tty_insert_flip_char(&hp->port, c, 0); c 1087 drivers/tty/hvc/hvsi.c char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__; c 1100 drivers/tty/hvc/hvsi.c if (count > 0 && i < sizeof(c)) { c 1102 drivers/tty/hvc/hvsi.c c[i++] = '\r'; c 1105 drivers/tty/hvc/hvsi.c c[i++] = buf[n++]; c 1110 drivers/tty/hvc/hvsi.c ret = hvsi_put_chars(hp, c, i); c 141 drivers/tty/isicom.c #define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c)) c 143 drivers/tty/isicom.c #define isicom_paranoia_check(a, b, c) 0 c 300 drivers/tty/mips_ejtag_fdc.c static void mips_ejtag_fdc_console_write(struct console *c, const char *s, c 304 drivers/tty/mips_ejtag_fdc.c container_of(c, struct mips_ejtag_fdc_console, cons); c 349 drivers/tty/mips_ejtag_fdc.c __raw_writel(word.word, regs + REG_FDTX(c->index)); c 355 drivers/tty/mips_ejtag_fdc.c static struct tty_driver *mips_ejtag_fdc_console_device(struct console *c, c 359 drivers/tty/mips_ejtag_fdc.c container_of(c, struct mips_ejtag_fdc_console, cons); c 361 drivers/tty/mips_ejtag_fdc.c *index = c->index; c 366 drivers/tty/mips_ejtag_fdc.c static int __init mips_ejtag_fdc_console_init(struct mips_ejtag_fdc_console *c) c 372 drivers/tty/mips_ejtag_fdc.c raw_spin_lock_irqsave(&c->lock, flags); c 374 drivers/tty/mips_ejtag_fdc.c if (c->initialised) c 383 drivers/tty/mips_ejtag_fdc.c c->initialised = true; c 384 drivers/tty/mips_ejtag_fdc.c c->regs[smp_processor_id()] = regs; c 385 drivers/tty/mips_ejtag_fdc.c register_console(&c->cons); c 387 drivers/tty/mips_ejtag_fdc.c raw_spin_unlock_irqrestore(&c->lock, flags); c 1871 drivers/tty/moxa.c unsigned int c, total; c 1882 drivers/tty/moxa.c c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask); c 1883 drivers/tty/moxa.c if (c > len) c 1884 drivers/tty/moxa.c c = len; c 1885 drivers/tty/moxa.c moxaLog.txcnt[port->port.tty->index] += c; c 1886 drivers/tty/moxa.c total = c; c 1890 drivers/tty/moxa.c while (c > 0) { c 1895 drivers/tty/moxa.c len = (c > len) ? len : c; c 1900 drivers/tty/moxa.c c -= len; c 1905 drivers/tty/moxa.c while (c > 0) { c 1907 drivers/tty/moxa.c if (len > c) c 1908 drivers/tty/moxa.c len = c; c 1916 drivers/tty/moxa.c c -= len; c 1099 drivers/tty/mxser.c int c, total = 0; c 1107 drivers/tty/mxser.c c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, c 1109 drivers/tty/mxser.c if (c <= 0) c 1112 drivers/tty/mxser.c memcpy(info->port.xmit_buf + info->xmit_head, buf, c); c 1114 drivers/tty/mxser.c info->xmit_head = (info->xmit_head + c) & c 1116 drivers/tty/mxser.c info->xmit_cnt += c; c 1119 drivers/tty/mxser.c buf += c; c 1120 drivers/tty/mxser.c count -= c; c 1121 drivers/tty/mxser.c total += c; c 370 drivers/tty/n_gsm.c static inline u8 gsm_fcs_add(u8 fcs, u8 c) c 372 drivers/tty/n_gsm.c return gsm_fcs8[fcs ^ c]; c 385 drivers/tty/n_gsm.c static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len) c 388 drivers/tty/n_gsm.c fcs = gsm_fcs8[fcs ^ *c++]; c 401 drivers/tty/n_gsm.c static int gsm_read_ea(unsigned int *val, u8 c) c 405 drivers/tty/n_gsm.c *val |= c >> 1; c 407 drivers/tty/n_gsm.c return c & EA; c 1855 drivers/tty/n_gsm.c static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) c 1861 drivers/tty/n_gsm.c if (c == GSM0_SOF) { c 1869 drivers/tty/n_gsm.c gsm->fcs = gsm_fcs_add(gsm->fcs, c); c 1870 drivers/tty/n_gsm.c if (gsm_read_ea(&gsm->address, c)) c 1874 drivers/tty/n_gsm.c gsm->fcs = gsm_fcs_add(gsm->fcs, c); c 1875 drivers/tty/n_gsm.c gsm->control = c; c 1879 drivers/tty/n_gsm.c gsm->fcs = gsm_fcs_add(gsm->fcs, c); c 1880 drivers/tty/n_gsm.c if (gsm_read_ea(&gsm->len, c)) { c 1896 drivers/tty/n_gsm.c gsm->fcs = gsm_fcs_add(gsm->fcs, c); c 1897 drivers/tty/n_gsm.c len = c; c 1911 drivers/tty/n_gsm.c gsm->buf[gsm->count++] = c; c 1916 drivers/tty/n_gsm.c gsm->received_fcs = c; c 1921 drivers/tty/n_gsm.c if (c == GSM0_SOF) { c 1937 drivers/tty/n_gsm.c static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) c 1939 drivers/tty/n_gsm.c if (c == GSM1_SOF) { c 1961 drivers/tty/n_gsm.c if (c == GSM1_ESCAPE) { c 1971 drivers/tty/n_gsm.c c ^= GSM1_ESCAPE_BITS; c 1981 drivers/tty/n_gsm.c gsm->fcs = gsm_fcs_add(gsm->fcs, c); c 1982 drivers/tty/n_gsm.c if (gsm_read_ea(&gsm->address, c)) c 1986 drivers/tty/n_gsm.c gsm->fcs = gsm_fcs_add(gsm->fcs, c); c 1987 drivers/tty/n_gsm.c gsm->control = c; c 1996 drivers/tty/n_gsm.c gsm->buf[gsm->count++] = c; c 2225 drivers/tty/n_gsm.c struct gsm_config *c) c 2227 drivers/tty/n_gsm.c memset(c, 0, sizeof(*c)); c 2228 drivers/tty/n_gsm.c c->adaption = gsm->adaption; c 2229 drivers/tty/n_gsm.c c->encapsulation = gsm->encoding; c 2230 drivers/tty/n_gsm.c c->initiator = gsm->initiator; c 2231 drivers/tty/n_gsm.c c->t1 = gsm->t1; c 2232 drivers/tty/n_gsm.c c->t2 = gsm->t2; c 2233 drivers/tty/n_gsm.c c->t3 = 0; /* Not supported */ c 2234 drivers/tty/n_gsm.c c->n2 = gsm->n2; c 2236 drivers/tty/n_gsm.c c->i = 1; c 2238 drivers/tty/n_gsm.c c->i = 2; c 2239 drivers/tty/n_gsm.c pr_debug("Ftype %d i %d\n", gsm->ftype, c->i); c 2240 drivers/tty/n_gsm.c c->mru = gsm->mru; c 2241 drivers/tty/n_gsm.c c->mtu = gsm->mtu; c 2242 drivers/tty/n_gsm.c c->k = 0; c 2245 drivers/tty/n_gsm.c static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) c 2251 drivers/tty/n_gsm.c if ((c->adaption != 1 && c->adaption != 2) || c->k) c 2254 drivers/tty/n_gsm.c if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) c 2256 drivers/tty/n_gsm.c if (c->n2 < 3) c 2258 drivers/tty/n_gsm.c if (c->encapsulation > 1) /* Basic, advanced, no I */ c 2260 drivers/tty/n_gsm.c if (c->initiator > 1) c 2262 drivers/tty/n_gsm.c if (c->i == 0 || c->i > 2) /* UIH and UI only */ c 2269 drivers/tty/n_gsm.c if (c->t1 != 0 && c->t1 != gsm->t1) c 2271 drivers/tty/n_gsm.c if (c->t2 != 0 && c->t2 != gsm->t2) c 2273 drivers/tty/n_gsm.c if (c->encapsulation != gsm->encoding) c 2275 drivers/tty/n_gsm.c if (c->adaption != gsm->adaption) c 2278 drivers/tty/n_gsm.c if (c->initiator != gsm->initiator) c 2280 drivers/tty/n_gsm.c if (c->mru != gsm->mru) c 2282 drivers/tty/n_gsm.c if (c->mtu != gsm->mtu) c 2301 drivers/tty/n_gsm.c gsm->initiator = c->initiator; c 2302 drivers/tty/n_gsm.c gsm->mru = c->mru; c 2303 drivers/tty/n_gsm.c gsm->mtu = c->mtu; c 2304 drivers/tty/n_gsm.c gsm->encoding = c->encapsulation; c 2305 drivers/tty/n_gsm.c gsm->adaption = c->adaption; c 2306 drivers/tty/n_gsm.c gsm->n2 = c->n2; c 2308 drivers/tty/n_gsm.c if (c->i == 1) c 2310 drivers/tty/n_gsm.c else if (c->i == 2) c 2313 drivers/tty/n_gsm.c if (c->t1) c 2314 drivers/tty/n_gsm.c gsm->t1 = c->t1; c 2315 drivers/tty/n_gsm.c if (c->t2) c 2316 drivers/tty/n_gsm.c gsm->t2 = c->t2; c 2614 drivers/tty/n_gsm.c struct gsm_config c; c 2620 drivers/tty/n_gsm.c gsm_copy_config_values(gsm, &c); c 2621 drivers/tty/n_gsm.c if (copy_to_user((void *)arg, &c, sizeof(c))) c 2625 drivers/tty/n_gsm.c if (copy_from_user(&c, (void *)arg, sizeof(c))) c 2627 drivers/tty/n_gsm.c return gsm_config(gsm, &c); c 116 drivers/tty/n_r3964.c static void receive_char(struct r3964_info *pInfo, const unsigned char c); c 557 drivers/tty/n_r3964.c static void receive_char(struct r3964_info *pInfo, const unsigned char c) c 561 drivers/tty/n_r3964.c if (c == DLE) { c 568 drivers/tty/n_r3964.c } else if (c == STX) { c 581 drivers/tty/n_r3964.c TRACE_PE("TX_REQUEST - char != DLE: %x", c); c 586 drivers/tty/n_r3964.c if (c == NAK) { c 597 drivers/tty/n_r3964.c if (c == DLE) { c 610 drivers/tty/n_r3964.c if (c == STX) { c 637 drivers/tty/n_r3964.c pInfo->bcc ^= c; c 639 drivers/tty/n_r3964.c if (c == DLE) { c 646 drivers/tty/n_r3964.c } else if ((c == ETX) && (pInfo->last_rx == DLE)) { c 655 drivers/tty/n_r3964.c pInfo->last_rx = c; c 657 drivers/tty/n_r3964.c pInfo->rx_buf[pInfo->rx_position++] = c; c 664 drivers/tty/n_r3964.c pInfo->last_rx = c; c 317 drivers/tty/n_tty.c static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) c 319 drivers/tty/n_tty.c *read_buf_addr(ldata, ldata->read_head) = c; c 391 drivers/tty/n_tty.c static inline int is_utf8_continuation(unsigned char c) c 393 drivers/tty/n_tty.c return (c & 0xc0) == 0x80; c 404 drivers/tty/n_tty.c static inline int is_continuation(unsigned char c, struct tty_struct *tty) c 406 drivers/tty/n_tty.c return I_IUTF8(tty) && is_utf8_continuation(c); c 431 drivers/tty/n_tty.c static int do_output_char(unsigned char c, struct tty_struct *tty, int space) c 439 drivers/tty/n_tty.c switch (c) { c 456 drivers/tty/n_tty.c c = '\n'; c 479 drivers/tty/n_tty.c if (!iscntrl(c)) { c 481 drivers/tty/n_tty.c c = toupper(c); c 482 drivers/tty/n_tty.c if (!is_continuation(c, tty)) c 488 drivers/tty/n_tty.c tty_put_char(tty, c); c 506 drivers/tty/n_tty.c static int process_output(unsigned char c, struct tty_struct *tty) c 514 drivers/tty/n_tty.c retval = do_output_char(c, tty, space); c 561 drivers/tty/n_tty.c unsigned char c = *cp; c 563 drivers/tty/n_tty.c switch (c) { c 585 drivers/tty/n_tty.c if (!iscntrl(c)) { c 588 drivers/tty/n_tty.c if (!is_continuation(c, tty)) c 631 drivers/tty/n_tty.c unsigned char c; c 637 drivers/tty/n_tty.c c = echo_buf(ldata, tail); c 638 drivers/tty/n_tty.c if (c == ECHO_OP_START) { c 739 drivers/tty/n_tty.c int retval = do_output_char(c, tty, space); c 746 drivers/tty/n_tty.c tty_put_char(tty, c); c 841 drivers/tty/n_tty.c static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) c 843 drivers/tty/n_tty.c *echo_buf_addr(ldata, ldata->echo_head) = c; c 917 drivers/tty/n_tty.c static void echo_char_raw(unsigned char c, struct n_tty_data *ldata) c 919 drivers/tty/n_tty.c if (c == ECHO_OP_START) { c 923 drivers/tty/n_tty.c add_echo_byte(c, ldata); c 939 drivers/tty/n_tty.c static void echo_char(unsigned char c, struct tty_struct *tty) c 943 drivers/tty/n_tty.c if (c == ECHO_OP_START) { c 947 drivers/tty/n_tty.c if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') c 949 drivers/tty/n_tty.c add_echo_byte(c, ldata); c 979 drivers/tty/n_tty.c static void eraser(unsigned char c, struct tty_struct *tty) c 991 drivers/tty/n_tty.c if (c == ERASE_CHAR(tty)) c 993 drivers/tty/n_tty.c else if (c == WERASE_CHAR(tty)) c 1019 drivers/tty/n_tty.c c = read_buf(ldata, head); c 1020 drivers/tty/n_tty.c } while (is_continuation(c, tty) && c 1024 drivers/tty/n_tty.c if (is_continuation(c, tty)) c 1029 drivers/tty/n_tty.c if (isalnum(c) || c == '_') c 1043 drivers/tty/n_tty.c echo_char(c, tty); c 1051 drivers/tty/n_tty.c } else if (c == '\t') { c 1065 drivers/tty/n_tty.c c = read_buf(ldata, tail); c 1066 drivers/tty/n_tty.c if (c == '\t') { c 1069 drivers/tty/n_tty.c } else if (iscntrl(c)) { c 1072 drivers/tty/n_tty.c } else if (!is_continuation(c, tty)) { c 1078 drivers/tty/n_tty.c if (iscntrl(c) && L_ECHOCTL(tty)) { c 1083 drivers/tty/n_tty.c if (!iscntrl(c) || L_ECHOCTL(tty)) { c 1223 drivers/tty/n_tty.c static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c) c 1233 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1237 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1241 drivers/tty/n_tty.c n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c) c 1247 drivers/tty/n_tty.c echo_char(c, tty); c 1271 drivers/tty/n_tty.c n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) c 1276 drivers/tty/n_tty.c if (c == START_CHAR(tty)) { c 1281 drivers/tty/n_tty.c if (c == STOP_CHAR(tty)) { c 1288 drivers/tty/n_tty.c if (c == INTR_CHAR(tty)) { c 1289 drivers/tty/n_tty.c n_tty_receive_signal_char(tty, SIGINT, c); c 1291 drivers/tty/n_tty.c } else if (c == QUIT_CHAR(tty)) { c 1292 drivers/tty/n_tty.c n_tty_receive_signal_char(tty, SIGQUIT, c); c 1294 drivers/tty/n_tty.c } else if (c == SUSP_CHAR(tty)) { c 1295 drivers/tty/n_tty.c n_tty_receive_signal_char(tty, SIGTSTP, c); c 1305 drivers/tty/n_tty.c if (c == '\r') { c 1309 drivers/tty/n_tty.c c = '\n'; c 1310 drivers/tty/n_tty.c } else if (c == '\n' && I_INLCR(tty)) c 1311 drivers/tty/n_tty.c c = '\r'; c 1314 drivers/tty/n_tty.c if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || c 1315 drivers/tty/n_tty.c (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { c 1316 drivers/tty/n_tty.c eraser(c, tty); c 1320 drivers/tty/n_tty.c if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) { c 1332 drivers/tty/n_tty.c if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) { c 1336 drivers/tty/n_tty.c echo_char(c, tty); c 1345 drivers/tty/n_tty.c if (c == '\n') { c 1352 drivers/tty/n_tty.c if (c == EOF_CHAR(tty)) { c 1353 drivers/tty/n_tty.c c = __DISABLED_CHAR; c 1356 drivers/tty/n_tty.c if ((c == EOL_CHAR(tty)) || c 1357 drivers/tty/n_tty.c (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) { c 1365 drivers/tty/n_tty.c echo_char(c, tty); c 1372 drivers/tty/n_tty.c if (c == (unsigned char) '\377' && I_PARMRK(tty)) c 1373 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1377 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1387 drivers/tty/n_tty.c if (c == '\n') c 1393 drivers/tty/n_tty.c echo_char(c, tty); c 1399 drivers/tty/n_tty.c if (c == (unsigned char) '\377' && I_PARMRK(tty)) c 1400 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1402 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1407 drivers/tty/n_tty.c n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c) c 1420 drivers/tty/n_tty.c echo_char(c, tty); c 1424 drivers/tty/n_tty.c if (c == (unsigned char) '\377' && I_PARMRK(tty)) c 1425 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1426 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1429 drivers/tty/n_tty.c static void n_tty_receive_char(struct tty_struct *tty, unsigned char c) c 1431 drivers/tty/n_tty.c n_tty_receive_char_inline(tty, c); c 1435 drivers/tty/n_tty.c n_tty_receive_char_fast(struct tty_struct *tty, unsigned char c) c 1448 drivers/tty/n_tty.c echo_char(c, tty); c 1451 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1454 drivers/tty/n_tty.c static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c) c 1457 drivers/tty/n_tty.c c &= 0x7f; c 1459 drivers/tty/n_tty.c c = tolower(c); c 1462 drivers/tty/n_tty.c if (c == STOP_CHAR(tty)) c 1464 drivers/tty/n_tty.c else if (c == START_CHAR(tty) || c 1466 drivers/tty/n_tty.c c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c 1467 drivers/tty/n_tty.c c != SUSP_CHAR(tty))) { c 1475 drivers/tty/n_tty.c n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag) c 1483 drivers/tty/n_tty.c n_tty_receive_parity_error(tty, c); c 1495 drivers/tty/n_tty.c n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag) c 1502 drivers/tty/n_tty.c c &= 0x7f; c 1504 drivers/tty/n_tty.c c = tolower(c); c 1505 drivers/tty/n_tty.c n_tty_receive_char(tty, c); c 1507 drivers/tty/n_tty.c n_tty_receive_char_flagged(tty, c, flag); c 1572 drivers/tty/n_tty.c unsigned char c = *cp++; c 1575 drivers/tty/n_tty.c c &= 0x7f; c 1577 drivers/tty/n_tty.c c = tolower(c); c 1579 drivers/tty/n_tty.c put_tty_queue(c, ldata); c 1582 drivers/tty/n_tty.c if (!test_bit(c, ldata->char_map)) c 1583 drivers/tty/n_tty.c n_tty_receive_char_inline(tty, c); c 1584 drivers/tty/n_tty.c else if (n_tty_receive_char_special(tty, c) && count) { c 1606 drivers/tty/n_tty.c unsigned char c = *cp++; c 1608 drivers/tty/n_tty.c if (!test_bit(c, ldata->char_map)) c 1609 drivers/tty/n_tty.c n_tty_receive_char_fast(tty, c); c 1610 drivers/tty/n_tty.c else if (n_tty_receive_char_special(tty, c) && count) { c 2023 drivers/tty/n_tty.c size_t n, size, more, c; c 2052 drivers/tty/n_tty.c c = n + found; c 2055 drivers/tty/n_tty.c c = min(*nr, c); c 2056 drivers/tty/n_tty.c n = c; c 2060 drivers/tty/n_tty.c __func__, eol, found, n, c, tail, more); c 2070 drivers/tty/n_tty.c smp_store_release(&ldata->read_tail, ldata->read_tail + c); c 2138 drivers/tty/n_tty.c int c; c 2145 drivers/tty/n_tty.c c = job_control(tty, file); c 2146 drivers/tty/n_tty.c if (c < 0) c 2147 drivers/tty/n_tty.c return c; c 2306 drivers/tty/n_tty.c int c; c 2344 drivers/tty/n_tty.c c = *b; c 2345 drivers/tty/n_tty.c if (process_output(c, tty) < 0) c 2356 drivers/tty/n_tty.c c = tty->ops->write(tty, b, nr); c 2358 drivers/tty/n_tty.c if (c < 0) { c 2359 drivers/tty/n_tty.c retval = c; c 2362 drivers/tty/n_tty.c if (!c) c 2364 drivers/tty/n_tty.c b += c; c 2365 drivers/tty/n_tty.c nr -= c; c 111 drivers/tty/pty.c static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) c 119 drivers/tty/pty.c if (c > 0) { c 122 drivers/tty/pty.c c = tty_insert_flip_string(to->port, buf, c); c 124 drivers/tty/pty.c if (c) c 128 drivers/tty/pty.c return c; c 412 drivers/tty/rocket.c int c; c 437 drivers/tty/rocket.c c = min(info->xmit_fifo_room, info->xmit_cnt); c 438 drivers/tty/rocket.c c = min(c, XMIT_BUF_SIZE - info->xmit_tail); c 439 drivers/tty/rocket.c if (c <= 0 || info->xmit_fifo_room <= 0) c 441 drivers/tty/rocket.c sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2); c 442 drivers/tty/rocket.c if (c & 1) c 443 drivers/tty/rocket.c sOutB(sGetTxRxDataIO(cp), info->xmit_buf[info->xmit_tail + c - 1]); c 444 drivers/tty/rocket.c info->xmit_tail += c; c 446 drivers/tty/rocket.c info->xmit_cnt -= c; c 447 drivers/tty/rocket.c info->xmit_fifo_room -= c; c 449 drivers/tty/rocket.c printk(KERN_INFO "tx %d chars...\n", c); c 1558 drivers/tty/rocket.c int c, retval = 0; c 1580 drivers/tty/rocket.c c = min(count, info->xmit_fifo_room); c 1584 drivers/tty/rocket.c sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) b, c / 2); c 1587 drivers/tty/rocket.c if (c & 1) c 1588 drivers/tty/rocket.c sOutB(sGetTxRxDataIO(cp), b[c - 1]); c 1590 drivers/tty/rocket.c retval += c; c 1591 drivers/tty/rocket.c buf += c; c 1592 drivers/tty/rocket.c count -= c; c 1595 drivers/tty/rocket.c info->xmit_fifo_room -= c; c 1608 drivers/tty/rocket.c c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1); c 1609 drivers/tty/rocket.c c = min(c, XMIT_BUF_SIZE - info->xmit_head); c 1610 drivers/tty/rocket.c if (c <= 0) c 1614 drivers/tty/rocket.c memcpy(info->xmit_buf + info->xmit_head, b, c); c 1618 drivers/tty/rocket.c (info->xmit_head + c) & (XMIT_BUF_SIZE - 1); c 1619 drivers/tty/rocket.c info->xmit_cnt += c; c 1622 drivers/tty/rocket.c buf += c; c 1623 drivers/tty/rocket.c count -= c; c 1624 drivers/tty/rocket.c retval += c; c 89 drivers/tty/serial/8250/8250_early.c static void serial_putc(struct uart_port *port, int c) c 93 drivers/tty/serial/8250/8250_early.c serial8250_early_out(port, UART_TX, c); c 116 drivers/tty/serial/8250/8250_early.c unsigned char c; c 127 drivers/tty/serial/8250/8250_early.c c = serial8250_early_in(port, UART_LCR); c 128 drivers/tty/serial/8250/8250_early.c serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); c 131 drivers/tty/serial/8250/8250_early.c serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); c 55 drivers/tty/serial/8250/8250_ingenic.c static void ingenic_early_console_putc(struct uart_port *port, int c) c 63 drivers/tty/serial/8250/8250_ingenic.c early_out(port, UART_TX, c); c 808 drivers/tty/serial/8250/8250_pci.c unsigned int c = dev->class; c 812 drivers/tty/serial/8250/8250_pci.c pi = c & 0xff; c 2023 drivers/tty/serial/8250/8250_port.c unsigned char c) c 2042 drivers/tty/serial/8250/8250_port.c serial_port_out(port, UART_TX, c); c 303 drivers/tty/serial/altera_jtaguart.c static void altera_jtaguart_console_putc(struct uart_port *port, int c) c 319 drivers/tty/serial/altera_jtaguart.c writel(c, port->membase + ALTERA_JTAGUART_DATA_REG); c 323 drivers/tty/serial/altera_jtaguart.c static void altera_jtaguart_console_putc(struct uart_port *port, int c) c 334 drivers/tty/serial/altera_jtaguart.c writel(c, port->membase + ALTERA_JTAGUART_DATA_REG); c 404 drivers/tty/serial/altera_uart.c static void altera_uart_poll_put_char(struct uart_port *port, unsigned char c) c 410 drivers/tty/serial/altera_uart.c altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG); c 443 drivers/tty/serial/altera_uart.c static void altera_uart_console_putc(struct uart_port *port, int c) c 449 drivers/tty/serial/altera_uart.c altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG); c 1378 drivers/tty/serial/amba-pl011.c static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, c 1385 drivers/tty/serial/amba-pl011.c pl011_write(c, uap, REG_DR); c 2414 drivers/tty/serial/amba-pl011.c static void qdf2400_e44_putc(struct uart_port *port, int c) c 2418 drivers/tty/serial/amba-pl011.c writel(c, port->membase + UART01x_DR); c 2430 drivers/tty/serial/amba-pl011.c static void pl011_putc(struct uart_port *port, int c) c 2435 drivers/tty/serial/amba-pl011.c writel(c, port->membase + UART01x_DR); c 2437 drivers/tty/serial/amba-pl011.c writeb(c, port->membase + UART01x_DR); c 738 drivers/tty/serial/atmel_serial.c struct atmel_uart_char *c; c 744 drivers/tty/serial/atmel_serial.c c = &((struct atmel_uart_char *)ring->buf)[ring->head]; c 745 drivers/tty/serial/atmel_serial.c c->status = status; c 746 drivers/tty/serial/atmel_serial.c c->ch = ch; c 1526 drivers/tty/serial/atmel_serial.c struct atmel_uart_char c; c 1531 drivers/tty/serial/atmel_serial.c c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; c 1536 drivers/tty/serial/atmel_serial.c status = c.status; c 1571 drivers/tty/serial/atmel_serial.c if (uart_handle_sysrq_char(port, c.ch)) c 1574 drivers/tty/serial/atmel_serial.c uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); c 241 drivers/tty/serial/bcm63xx_uart.c unsigned int iestat, c, cstat; c 264 drivers/tty/serial/bcm63xx_uart.c cstat = c = bcm_uart_readl(port, UART_FIFO_REG); c 267 drivers/tty/serial/bcm63xx_uart.c c &= 0xff; c 292 drivers/tty/serial/bcm63xx_uart.c if (uart_handle_sysrq_char(port, c)) c 297 drivers/tty/serial/bcm63xx_uart.c tty_insert_flip_char(tty_port, c, flag); c 336 drivers/tty/serial/bcm63xx_uart.c unsigned int c; c 338 drivers/tty/serial/bcm63xx_uart.c c = xmit->buf[xmit->tail]; c 339 drivers/tty/serial/bcm63xx_uart.c bcm_uart_writel(port, c, UART_FIFO_REG); c 368 drivers/tty/serial/clps711x.c static void uart_clps711x_console_write(struct console *co, const char *c, c 375 drivers/tty/serial/clps711x.c uart_console_write(port, c, n, uart_clps711x_console_putchar); c 1053 drivers/tty/serial/cpm_uart/cpm_uart_core.c u_char c, *cp; c 1069 drivers/tty/serial/cpm_uart/cpm_uart_core.c i = c = bdp->cbd_datlen; c 1073 drivers/tty/serial/cpm_uart/cpm_uart_core.c c = *cp; c 1083 drivers/tty/serial/cpm_uart/cpm_uart_core.c return (int)c; c 1108 drivers/tty/serial/cpm_uart/cpm_uart_core.c unsigned char c) c 1114 drivers/tty/serial/cpm_uart/cpm_uart_core.c ch[0] = (char)c; c 392 drivers/tty/serial/digicolor-usart.c static void digicolor_uart_console_write(struct console *co, const char *c, c 405 drivers/tty/serial/digicolor-usart.c uart_console_write(port, c, n, digicolor_uart_console_putchar); c 24 drivers/tty/serial/earlycon-arm-semihost.c static void smh_putc(struct uart_port *port, int c) c 30 drivers/tty/serial/earlycon-arm-semihost.c : : "r" (&c) : "x0", "x1", "memory"); c 35 drivers/tty/serial/earlycon-arm-semihost.c : : "r" (&c) : "r0", "r1", "memory"); c 13 drivers/tty/serial/earlycon-riscv-sbi.c static void sbi_putc(struct uart_port *port, int c) c 15 drivers/tty/serial/earlycon-riscv-sbi.c sbi_console_putchar(c); c 168 drivers/tty/serial/fsl_linflexuart.c unsigned char c; c 172 drivers/tty/serial/fsl_linflexuart.c c = xmit->buf[xmit->tail]; c 173 drivers/tty/serial/fsl_linflexuart.c writeb(c, sport->membase + BDRL); c 612 drivers/tty/serial/fsl_lpuart.c static void lpuart_poll_put_char(struct uart_port *port, unsigned char c) c 616 drivers/tty/serial/fsl_lpuart.c writeb(c, port->membase + UARTDR); c 663 drivers/tty/serial/fsl_lpuart.c static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c) c 666 drivers/tty/serial/fsl_lpuart.c lpuart32_write(port, UARTDATA, c); c 1832 drivers/tty/serial/imx.c static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c) c 1843 drivers/tty/serial/imx.c imx_uart_writel(sport, c, URTX0); c 950 drivers/tty/serial/jsm/jsm_cls.c static void cls_send_immediate_char(struct jsm_channel *ch, unsigned char c) c 952 drivers/tty/serial/jsm/jsm_cls.c writeb(c, &ch->ch_cls_uart->txrx); c 1376 drivers/tty/serial/jsm/jsm_neo.c static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c) c 1381 drivers/tty/serial/jsm/jsm_neo.c writeb(c, &ch->ch_neo_uart->txrx); c 58 drivers/tty/serial/kgdb_nmi.c static void kgdb_nmi_console_write(struct console *co, const char *s, uint c) c 62 drivers/tty/serial/kgdb_nmi.c for (i = 0; i < c; i++) c 101 drivers/tty/serial/kgdb_nmi.c char c = ch; c 112 drivers/tty/serial/kgdb_nmi.c kfifo_in(&priv->fifo, &c, 1); c 118 drivers/tty/serial/kgdb_nmi.c int c = -1; c 123 drivers/tty/serial/kgdb_nmi.c c = dbg_io_ops->read_char(); c 124 drivers/tty/serial/kgdb_nmi.c if (c == NO_POLL_CHAR) c 125 drivers/tty/serial/kgdb_nmi.c return c; c 127 drivers/tty/serial/kgdb_nmi.c if (!kgdb_nmi_knock && (c == '\r' || c == '\n')) { c 129 drivers/tty/serial/kgdb_nmi.c } else if (c == magic[n]) { c 139 drivers/tty/serial/kgdb_nmi.c kgdb_tty_recv(c); c 144 drivers/tty/serial/kgdb_nmi.c kdb_printf("%c", c); c 307 drivers/tty/serial/kgdb_nmi.c static int kgdb_nmi_tty_write(struct tty_struct *tty, const unchar *buf, int c) c 311 drivers/tty/serial/kgdb_nmi.c for (i = 0; i < c; i++) c 313 drivers/tty/serial/kgdb_nmi.c return c; c 139 drivers/tty/serial/max3100.c static int max3100_do_parity(struct max3100_port *s, u16 c) c 149 drivers/tty/serial/max3100.c c &= 0x7f; c 151 drivers/tty/serial/max3100.c c &= 0xff; c 153 drivers/tty/serial/max3100.c parity = parity ^ (hweight8(c) & 1); c 157 drivers/tty/serial/max3100.c static int max3100_check_parity(struct max3100_port *s, u16 c) c 159 drivers/tty/serial/max3100.c return max3100_do_parity(s, c) == ((c >> 8) & 1); c 162 drivers/tty/serial/max3100.c static void max3100_calc_parity(struct max3100_port *s, u16 *c) c 165 drivers/tty/serial/max3100.c *c &= 0x7f; c 167 drivers/tty/serial/max3100.c *c &= 0xff; c 170 drivers/tty/serial/max3100.c *c |= max3100_do_parity(s, *c) << 8; c 504 drivers/tty/serial/max310x.c unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0; c 514 drivers/tty/serial/max310x.c c = 4; c 518 drivers/tty/serial/max310x.c c = 8; c 521 drivers/tty/serial/max310x.c c = 16; c 525 drivers/tty/serial/max310x.c div /= c; c 526 drivers/tty/serial/max310x.c F = c*baud; c 539 drivers/tty/serial/max310x.c return (16*port->uartclk) / (c*(16*div + frac)); c 516 drivers/tty/serial/mcf.c static void mcf_console_putc(struct console *co, const char c) c 525 drivers/tty/serial/mcf.c writeb(c, port->membase + MCFUART_UTB); c 67 drivers/tty/serial/meson_uart.c #define AML_UART_XMIT_IRQ(c) (((c) & 0xff) << 8) c 68 drivers/tty/serial/meson_uart.c #define AML_UART_RECV_IRQ(c) ((c) & 0xff) c 407 drivers/tty/serial/milbeaut_usio.c static void mlb_usio_console_putchar(struct uart_port *port, int c) c 412 drivers/tty/serial/milbeaut_usio.c writew(c, port->membase + MLB_USIO_REG_DR); c 100 drivers/tty/serial/mpc52xx_uart.c void (*write_char)(struct uart_port *port, unsigned char c); c 270 drivers/tty/serial/mpc52xx_uart.c static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c) c 272 drivers/tty/serial/mpc52xx_uart.c out_8(&PSC(port)->mpc52xx_psc_buffer_8, c); c 510 drivers/tty/serial/mpc52xx_uart.c static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c) c 512 drivers/tty/serial/mpc52xx_uart.c out_8(&FIFO_512x(port)->txdata_8, c); c 851 drivers/tty/serial/mpc52xx_uart.c static void mpc5125_psc_write_char(struct uart_port *port, unsigned char c) c 853 drivers/tty/serial/mpc52xx_uart.c out_8(&FIFO_5125(port)->txdata_8, c); c 778 drivers/tty/serial/msm_serial.c unsigned int c; c 782 drivers/tty/serial/msm_serial.c c = msm_read(port, UART_RF); c 803 drivers/tty/serial/msm_serial.c sysrq = uart_handle_sysrq_char(port, c); c 806 drivers/tty/serial/msm_serial.c tty_insert_flip_char(tport, c, flag); c 1426 drivers/tty/serial/msm_serial.c int c; c 1433 drivers/tty/serial/msm_serial.c c = sp[sizeof(slop) - count]; c 1446 drivers/tty/serial/msm_serial.c c = sp[0]; c 1453 drivers/tty/serial/msm_serial.c c = NO_POLL_CHAR; c 1458 drivers/tty/serial/msm_serial.c c = sp[0]; c 1462 drivers/tty/serial/msm_serial.c return c; c 1468 drivers/tty/serial/msm_serial.c int c; c 1476 drivers/tty/serial/msm_serial.c c = msm_poll_get_char_dm(port); c 1478 drivers/tty/serial/msm_serial.c c = msm_poll_get_char_single(port); c 1483 drivers/tty/serial/msm_serial.c return c; c 1486 drivers/tty/serial/msm_serial.c static void msm_poll_put_char(struct uart_port *port, unsigned char c) c 1503 drivers/tty/serial/msm_serial.c msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF); c 1618 drivers/tty/serial/msm_serial.c char c = *s; c 1620 drivers/tty/serial/msm_serial.c if (c == '\n' && !replaced) { c 1626 drivers/tty/serial/msm_serial.c buf[j] = c; c 67 drivers/tty/serial/mux.c #define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET) c 557 drivers/tty/serial/mvebu-uart.c static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c) c 570 drivers/tty/serial/mvebu-uart.c writel(c, port->membase + UART_TSH(port)); c 598 drivers/tty/serial/mvebu-uart.c static void mvebu_uart_putc(struct uart_port *port, int c) c 609 drivers/tty/serial/mvebu-uart.c writel(c, port->membase + UART_STD_TSH); c 651 drivers/tty/serial/mxs-auart.c u8 c; c 653 drivers/tty/serial/mxs-auart.c c = mxs_read(s, REG_DATA); c 684 drivers/tty/serial/mxs-auart.c if (uart_handle_sysrq_char(&s->port, c)) c 687 drivers/tty/serial/mxs-auart.c uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag); c 1247 drivers/tty/serial/omap-serial.c static void omap_serial_early_putc(struct uart_port *port, int c) c 1257 drivers/tty/serial/omap-serial.c omap_serial_early_out(port, UART_TX, c); c 1561 drivers/tty/serial/pch_uart.c unsigned char c) c 1577 drivers/tty/serial/pch_uart.c iowrite8(c, priv->membase + PCH_UART_THR); c 217 drivers/tty/serial/pic32_uart.c u32 sta_reg, c; c 237 drivers/tty/serial/pic32_uart.c c = pic32_uart_readl(sport, PIC32_UART_RX); c 241 drivers/tty/serial/pic32_uart.c c &= 0xff; c 261 drivers/tty/serial/pic32_uart.c if (uart_handle_sysrq_char(port, c)) c 265 drivers/tty/serial/pic32_uart.c tty_insert_flip_char(tty, c, flag); c 309 drivers/tty/serial/pic32_uart.c unsigned int c = xmit->buf[xmit->tail]; c 311 drivers/tty/serial/pic32_uart.c pic32_uart_writel(sport, PIC32_UART_TX, c); c 69 drivers/tty/serial/pic32_uart.h #define to_pic32_sport(c) container_of(c, struct pic32_sport, port) c 1354 drivers/tty/serial/pmac_zilog.c static void pmz_poll_put_char(struct uart_port *port, unsigned char c) c 1362 drivers/tty/serial/pmac_zilog.c write_zsdata(uap, c); c 698 drivers/tty/serial/pxa.c unsigned char c) c 713 drivers/tty/serial/pxa.c serial_out(up, UART_TX, c); c 359 drivers/tty/serial/qcom_geni_serial.c unsigned char c) c 365 drivers/tty/serial/qcom_geni_serial.c writel(c, uport->membase + SE_GENI_TX_FIFOn); c 486 drivers/tty/serial/qcom_geni_serial.c int c; c 494 drivers/tty/serial/qcom_geni_serial.c for (c = 0; c < chunk; c++) { c 498 drivers/tty/serial/qcom_geni_serial.c if (port->brk && buf[c] == 0) { c 504 drivers/tty/serial/qcom_geni_serial.c sysrq = uart_prepare_sysrq_char(uport, buf[c]); c 507 drivers/tty/serial/qcom_geni_serial.c tty_insert_flip_char(tport, buf[c], TTY_NORMAL); c 727 drivers/tty/serial/qcom_geni_serial.c int c; c 732 drivers/tty/serial/qcom_geni_serial.c for (c = 0; c < tx_bytes ; c++) { c 733 drivers/tty/serial/qcom_geni_serial.c buf[c] = xmit->buf[tail++]; c 1491 drivers/tty/serial/samsung.c unsigned char c); c 2091 drivers/tty/serial/samsung.c unsigned char c) c 2102 drivers/tty/serial/samsung.c wr_regb(port, S3C2410_UTXH, c); c 2516 drivers/tty/serial/samsung.c static void samsung_early_putc(struct uart_port *port, int c) c 2523 drivers/tty/serial/samsung.c writeb(c, port->membase + S3C2410_UTXH); c 835 drivers/tty/serial/sccnxp.c static void sccnxp_console_putchar(struct uart_port *port, int c) c 841 drivers/tty/serial/sccnxp.c sccnxp_port_write(port, SCCNXP_THR_REG, c); c 848 drivers/tty/serial/sccnxp.c static void sccnxp_console_write(struct console *co, const char *c, unsigned n) c 855 drivers/tty/serial/sccnxp.c uart_console_write(port, c, n, sccnxp_console_putchar); c 545 drivers/tty/serial/serial_core.c static int uart_put_char(struct tty_struct *tty, unsigned char c) c 561 drivers/tty/serial/serial_core.c circ->buf[circ->head] = c; c 581 drivers/tty/serial/serial_core.c int c, ret = 0; c 600 drivers/tty/serial/serial_core.c c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); c 601 drivers/tty/serial/serial_core.c if (count < c) c 602 drivers/tty/serial/serial_core.c c = count; c 603 drivers/tty/serial/serial_core.c if (c <= 0) c 605 drivers/tty/serial/serial_core.c memcpy(circ->buf + circ->head, buf, c); c 606 drivers/tty/serial/serial_core.c circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1); c 607 drivers/tty/serial/serial_core.c buf += c; c 608 drivers/tty/serial/serial_core.c count -= c; c 609 drivers/tty/serial/serial_core.c ret += c; c 490 drivers/tty/serial/serial_txx9.c unsigned char c; c 502 drivers/tty/serial/serial_txx9.c c = sio_in(up, TXX9_SIRFIFO); c 510 drivers/tty/serial/serial_txx9.c return c; c 514 drivers/tty/serial/serial_txx9.c static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) c 529 drivers/tty/serial/serial_txx9.c sio_out(up, TXX9_SITFIFO, c); c 667 drivers/tty/serial/sh-sci.c int c; c 681 drivers/tty/serial/sh-sci.c c = serial_port_in(port, SCxRDR); c 687 drivers/tty/serial/sh-sci.c return c; c 691 drivers/tty/serial/sh-sci.c static void sci_poll_put_char(struct uart_port *port, unsigned char c) c 699 drivers/tty/serial/sh-sci.c serial_port_out(port, SCxTDR, c); c 820 drivers/tty/serial/sh-sci.c unsigned char c; c 823 drivers/tty/serial/sh-sci.c c = port->x_char; c 826 drivers/tty/serial/sh-sci.c c = xmit->buf[xmit->tail]; c 832 drivers/tty/serial/sh-sci.c serial_port_out(port, SCxTDR, c); c 847 drivers/tty/serial/sh-sci.c #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) c 869 drivers/tty/serial/sh-sci.c char c = serial_port_in(port, SCxRDR); c 870 drivers/tty/serial/sh-sci.c if (uart_handle_sysrq_char(port, c)) c 873 drivers/tty/serial/sh-sci.c tty_insert_flip_char(tport, c, TTY_NORMAL); c 876 drivers/tty/serial/sh-sci.c char c; c 881 drivers/tty/serial/sh-sci.c c = serial_port_in(port, SCxRDR); c 883 drivers/tty/serial/sh-sci.c c = serial_port_in(port, SCxRDR); c 886 drivers/tty/serial/sh-sci.c if (uart_handle_sysrq_char(port, c)) { c 903 drivers/tty/serial/sh-sci.c tty_insert_flip_char(tport, c, flag); c 2275 drivers/tty/serial/sh-sci.c unsigned int sr, br, prediv, scrate, c; c 2297 drivers/tty/serial/sh-sci.c for (c = 0; c <= 3; c++) { c 2299 drivers/tty/serial/sh-sci.c prediv = sr * (1 << (2 * c + 1)); c 2324 drivers/tty/serial/sh-sci.c *cks = c; c 439 drivers/tty/serial/sifive.c int c; c 441 drivers/tty/serial/sifive.c for (c = SIFIVE_RX_FIFO_DEPTH; c > 0; --c) { c 717 drivers/tty/serial/sifive.c static void early_sifive_serial_putc(struct uart_port *port, int c) c 723 drivers/tty/serial/sifive.c __ssp_early_writel(c, SIFIVE_SERIAL_TXDATA_OFFS, port); c 1032 drivers/tty/serial/sprd_serial.c static void sprd_putc(struct uart_port *port, int c) c 1040 drivers/tty/serial/sprd_serial.c writeb(c, port->membase + SPRD_TXD); c 247 drivers/tty/serial/st-asc.c unsigned char c; c 252 drivers/tty/serial/st-asc.c c = port->x_char; c 254 drivers/tty/serial/st-asc.c asc_out(port, ASC_TXBUF, c); c 277 drivers/tty/serial/st-asc.c c = xmit->buf[xmit->tail]; c 279 drivers/tty/serial/st-asc.c asc_out(port, ASC_TXBUF, c); c 295 drivers/tty/serial/st-asc.c unsigned long c = 0; c 312 drivers/tty/serial/st-asc.c c = asc_in(port, ASC_RXBUF) | ASC_RXBUF_DUMMY_RX; c 316 drivers/tty/serial/st-asc.c if (status & ASC_STA_OE || c & ASC_RXBUF_FE || c 317 drivers/tty/serial/st-asc.c (c & ASC_RXBUF_PE && !ignore_pe)) { c 319 drivers/tty/serial/st-asc.c if (c & ASC_RXBUF_FE) { c 320 drivers/tty/serial/st-asc.c if (c == (ASC_RXBUF_FE | ASC_RXBUF_DUMMY_RX)) { c 324 drivers/tty/serial/st-asc.c c |= ASC_RXBUF_DUMMY_BE; c 328 drivers/tty/serial/st-asc.c } else if (c & ASC_RXBUF_PE) { c 337 drivers/tty/serial/st-asc.c c |= ASC_RXBUF_DUMMY_OE; c 340 drivers/tty/serial/st-asc.c c &= port->read_status_mask; c 342 drivers/tty/serial/st-asc.c if (c & ASC_RXBUF_DUMMY_BE) c 344 drivers/tty/serial/st-asc.c else if (c & ASC_RXBUF_PE) c 346 drivers/tty/serial/st-asc.c else if (c & ASC_RXBUF_FE) c 350 drivers/tty/serial/st-asc.c if (uart_handle_sysrq_char(port, c & 0xff)) c 353 drivers/tty/serial/st-asc.c uart_insert_char(port, c, ASC_RXBUF_DUMMY_OE, c & 0xff, flag); c 686 drivers/tty/serial/st-asc.c static void asc_put_poll_char(struct uart_port *port, unsigned char c) c 690 drivers/tty/serial/st-asc.c asc_out(port, ASC_TXBUF, c); c 200 drivers/tty/serial/stm32-usart.c unsigned long c; c 203 drivers/tty/serial/stm32-usart.c c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--]; c 207 drivers/tty/serial/stm32-usart.c c = readl_relaxed(port->membase + ofs->rdr); c 209 drivers/tty/serial/stm32-usart.c c &= stm32_port->rdr_mask; c 212 drivers/tty/serial/stm32-usart.c return c; c 220 drivers/tty/serial/stm32-usart.c unsigned long c; c 246 drivers/tty/serial/stm32-usart.c c = stm32_get_char(port, &sr, &stm32_port->last_res); c 255 drivers/tty/serial/stm32-usart.c if (!c) { c 269 drivers/tty/serial/stm32-usart.c if (!c) c 276 drivers/tty/serial/stm32-usart.c if (uart_handle_sysrq_char(port, c)) c 278 drivers/tty/serial/stm32-usart.c uart_insert_char(port, sr, USART_SR_ORE, c, flag); c 103 drivers/tty/serial/suncore.c char c; c 105 drivers/tty/serial/suncore.c c = 'a'; c 107 drivers/tty/serial/suncore.c c = *of_console_options; c 109 drivers/tty/serial/suncore.c mode_prop[3] = c; c 82 drivers/tty/serial/sunhv.c long c = sun4v_con_getchar(&status); c 87 drivers/tty/serial/sunhv.c if (c == CON_BREAK) { c 91 drivers/tty/serial/sunhv.c c = 0; c 94 drivers/tty/serial/sunhv.c if (c == CON_HUP) { c 103 drivers/tty/serial/sunhv.c uart_handle_sysrq_char(port, c); c 109 drivers/tty/serial/sunhv.c if (uart_handle_sysrq_char(port, c)) c 112 drivers/tty/serial/sunhv.c tty_insert_flip_char(&port->state->port, c, TTY_NORMAL); c 482 drivers/tty/serial/sunhv.c static inline void sunhv_console_putchar(struct uart_port *port, char c) c 487 drivers/tty/serial/sunhv.c long status = sun4v_con_putchar(c); c 853 drivers/tty/serial/sunsab.c static void sunsab_console_putchar(struct uart_port *port, int c) c 859 drivers/tty/serial/sunsab.c writeb(c, &up->regs->w.tic); c 550 drivers/tty/serial/uartlite.c static void early_uartlite_putc(struct uart_port *port, int c) c 568 drivers/tty/serial/uartlite.c writel(c & 0xff, port->membase + 4); c 170 drivers/tty/serial/vt8500_serial.c unsigned int c; c 173 drivers/tty/serial/vt8500_serial.c c = readw(port->membase + VT8500_RXFIFO) & 0x3ff; c 176 drivers/tty/serial/vt8500_serial.c c &= ~port->read_status_mask; c 178 drivers/tty/serial/vt8500_serial.c if (c & FER) { c 181 drivers/tty/serial/vt8500_serial.c } else if (c & PER) { c 187 drivers/tty/serial/vt8500_serial.c if (!uart_handle_sysrq_char(port, c)) c 188 drivers/tty/serial/vt8500_serial.c tty_insert_flip_char(tport, c, flag); c 493 drivers/tty/serial/vt8500_serial.c static void vt8500_console_putchar(struct uart_port *port, int c) c 496 drivers/tty/serial/vt8500_serial.c writeb(c, port->membase + VT8500_TXFIFO); c 571 drivers/tty/serial/vt8500_serial.c static void vt8500_put_poll_char(struct uart_port *port, unsigned char c) c 583 drivers/tty/serial/vt8500_serial.c vt8500_write(port, c, VT8500_TXFIFO); c 1048 drivers/tty/serial/xilinx_uartps.c int c; c 1055 drivers/tty/serial/xilinx_uartps.c c = NO_POLL_CHAR; c 1057 drivers/tty/serial/xilinx_uartps.c c = (unsigned char) readl(port->membase + CDNS_UART_FIFO); c 1061 drivers/tty/serial/xilinx_uartps.c return c; c 1064 drivers/tty/serial/xilinx_uartps.c static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c) c 1075 drivers/tty/serial/xilinx_uartps.c writel(c, port->membase + CDNS_UART_FIFO); c 2093 drivers/tty/synclink.c int c, ret = 0; c 2174 drivers/tty/synclink.c c = min_t(int, count, c 2177 drivers/tty/synclink.c if (c <= 0) { c 2181 drivers/tty/synclink.c memcpy(info->xmit_buf + info->xmit_head, buf, c); c 2182 drivers/tty/synclink.c info->xmit_head = ((info->xmit_head + c) & c 2184 drivers/tty/synclink.c info->xmit_cnt += c; c 2186 drivers/tty/synclink.c buf += c; c 2187 drivers/tty/synclink.c count -= c; c 2188 drivers/tty/synclink.c ret += c; c 911 drivers/tty/synclinkmp.c int c, ret = 0; c 944 drivers/tty/synclinkmp.c c = min_t(int, count, c 947 drivers/tty/synclinkmp.c if (c <= 0) c 950 drivers/tty/synclinkmp.c memcpy(info->tx_buf + info->tx_put, buf, c); c 953 drivers/tty/synclinkmp.c info->tx_put += c; c 956 drivers/tty/synclinkmp.c info->tx_count += c; c 959 drivers/tty/synclinkmp.c buf += c; c 960 drivers/tty/synclinkmp.c count -= c; c 961 drivers/tty/synclinkmp.c ret += c; c 1101 drivers/tty/sysrq.c char c; c 1103 drivers/tty/sysrq.c if (get_user(c, buf)) c 1105 drivers/tty/sysrq.c __handle_sysrq(c, false); c 3431 drivers/tty/tty_io.c struct console *c; c 3435 drivers/tty/tty_io.c for_each_console(c) { c 3436 drivers/tty/tty_io.c if (!c->device) c 3438 drivers/tty/tty_io.c if (!c->write) c 3440 drivers/tty/tty_io.c if ((c->flags & CON_ENABLED) == 0) c 3442 drivers/tty/tty_io.c cs[i++] = c; c 51 drivers/tty/ttynull.c static struct tty_driver *ttynull_device(struct console *c, int *index) c 793 drivers/tty/vt/consolemap.c u32 conv_8bit_to_uni(unsigned char c) c 795 drivers/tty/vt/consolemap.c unsigned short uni = translations[USER_MAP][c]; c 796 drivers/tty/vt/consolemap.c return uni == (0xf000 | c) ? c : uni; c 801 drivers/tty/vt/consolemap.c int c; c 802 drivers/tty/vt/consolemap.c for (c = 0; c < 0x100; c++) c 803 drivers/tty/vt/consolemap.c if (translations[USER_MAP][c] == uni || c 804 drivers/tty/vt/consolemap.c (translations[USER_MAP][c] == (c | 0xf000) && uni == c)) c 805 drivers/tty/vt/consolemap.c return c; c 340 drivers/tty/vt/keyboard.c static void to_utf8(struct vc_data *vc, uint c) c 342 drivers/tty/vt/keyboard.c if (c < 0x80) c 344 drivers/tty/vt/keyboard.c put_queue(vc, c); c 345 drivers/tty/vt/keyboard.c else if (c < 0x800) { c 347 drivers/tty/vt/keyboard.c put_queue(vc, 0xc0 | (c >> 6)); c 348 drivers/tty/vt/keyboard.c put_queue(vc, 0x80 | (c & 0x3f)); c 349 drivers/tty/vt/keyboard.c } else if (c < 0x10000) { c 350 drivers/tty/vt/keyboard.c if (c >= 0xD800 && c < 0xE000) c 352 drivers/tty/vt/keyboard.c if (c == 0xFFFF) c 355 drivers/tty/vt/keyboard.c put_queue(vc, 0xe0 | (c >> 12)); c 356 drivers/tty/vt/keyboard.c put_queue(vc, 0x80 | ((c >> 6) & 0x3f)); c 357 drivers/tty/vt/keyboard.c put_queue(vc, 0x80 | (c & 0x3f)); c 358 drivers/tty/vt/keyboard.c } else if (c < 0x110000) { c 360 drivers/tty/vt/keyboard.c put_queue(vc, 0xf0 | (c >> 18)); c 361 drivers/tty/vt/keyboard.c put_queue(vc, 0x80 | ((c >> 12) & 0x3f)); c 362 drivers/tty/vt/keyboard.c put_queue(vc, 0x80 | ((c >> 6) & 0x3f)); c 363 drivers/tty/vt/keyboard.c put_queue(vc, 0x80 | (c & 0x3f)); c 433 drivers/tty/vt/keyboard.c int c = conv_uni_to_8bit(d); c 434 drivers/tty/vt/keyboard.c if (c != -1) c 435 drivers/tty/vt/keyboard.c put_queue(vc, c); c 450 drivers/tty/vt/keyboard.c int c = conv_uni_to_8bit(diacr); c 451 drivers/tty/vt/keyboard.c if (c != -1) c 452 drivers/tty/vt/keyboard.c put_queue(vc, c); c 664 drivers/tty/vt/keyboard.c int c = conv_uni_to_8bit(value); c 665 drivers/tty/vt/keyboard.c if (c != -1) c 666 drivers/tty/vt/keyboard.c put_queue(vc, c); c 36 drivers/tty/vt/selection.c #define isspace(c) ((c) == ' ') c 107 drivers/tty/vt/selection.c static inline int inword(const u32 c) c 109 drivers/tty/vt/selection.c return c > 0x7f || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1); c 135 drivers/tty/vt/selection.c static int store_utf8(u32 c, char *p) c 137 drivers/tty/vt/selection.c if (c < 0x80) { c 139 drivers/tty/vt/selection.c p[0] = c; c 141 drivers/tty/vt/selection.c } else if (c < 0x800) { c 143 drivers/tty/vt/selection.c p[0] = 0xc0 | (c >> 6); c 144 drivers/tty/vt/selection.c p[1] = 0x80 | (c & 0x3f); c 146 drivers/tty/vt/selection.c } else if (c < 0x10000) { c 148 drivers/tty/vt/selection.c p[0] = 0xe0 | (c >> 12); c 149 drivers/tty/vt/selection.c p[1] = 0x80 | ((c >> 6) & 0x3f); c 150 drivers/tty/vt/selection.c p[2] = 0x80 | (c & 0x3f); c 152 drivers/tty/vt/selection.c } else if (c < 0x110000) { c 154 drivers/tty/vt/selection.c p[0] = 0xf0 | (c >> 18); c 155 drivers/tty/vt/selection.c p[1] = 0x80 | ((c >> 12) & 0x3f); c 156 drivers/tty/vt/selection.c p[2] = 0x80 | ((c >> 6) & 0x3f); c 157 drivers/tty/vt/selection.c p[3] = 0x80 | (c & 0x3f); c 195 drivers/tty/vt/selection.c u32 c; c 326 drivers/tty/vt/selection.c c = sel_pos(i); c 328 drivers/tty/vt/selection.c bp += store_utf8(c, bp); c 330 drivers/tty/vt/selection.c *bp++ = c; c 331 drivers/tty/vt/selection.c if (!isspace(c)) c 544 drivers/tty/vt/vc_screen.c unsigned char c = *con_buf0++; c 548 drivers/tty/vt/vc_screen.c (vcs_scr_readw(vc, org) & 0xff00) | c, org); c 573 drivers/tty/vt/vc_screen.c char c; c 576 drivers/tty/vt/vc_screen.c c = *con_buf0++; c 578 drivers/tty/vt/vc_screen.c vcs_scr_writew(vc, c | c 581 drivers/tty/vt/vc_screen.c vcs_scr_writew(vc, (c << 8) | c 608 drivers/tty/vt/vc_screen.c unsigned char c; c 610 drivers/tty/vt/vc_screen.c c = *con_buf0++; c 612 drivers/tty/vt/vc_screen.c vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff) | (c << 8), org); c 614 drivers/tty/vt/vc_screen.c vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff00) | c, org); c 271 drivers/tty/vt/vt.c struct vt_notifier_param param = { .vc = vc, .c = unicode }; c 1621 drivers/tty/vt/vt.c static void rgb_from_256(int i, struct rgb *c) c 1624 drivers/tty/vt/vt.c c->r = i&1 ? 0xaa : 0x00; c 1625 drivers/tty/vt/vt.c c->g = i&2 ? 0xaa : 0x00; c 1626 drivers/tty/vt/vt.c c->b = i&4 ? 0xaa : 0x00; c 1628 drivers/tty/vt/vt.c c->r = i&1 ? 0xff : 0x55; c 1629 drivers/tty/vt/vt.c c->g = i&2 ? 0xff : 0x55; c 1630 drivers/tty/vt/vt.c c->b = i&4 ? 0xff : 0x55; c 1632 drivers/tty/vt/vt.c c->r = (i - 16) / 36 * 85 / 2; c 1633 drivers/tty/vt/vt.c c->g = (i - 16) / 6 % 6 * 85 / 2; c 1634 drivers/tty/vt/vt.c c->b = (i - 16) % 6 * 85 / 2; c 1636 drivers/tty/vt/vt.c c->r = c->g = c->b = i * 10 - 2312; c 1639 drivers/tty/vt/vt.c static void rgb_foreground(struct vc_data *vc, const struct rgb *c) c 1641 drivers/tty/vt/vt.c u8 hue = 0, max = max3(c->r, c->g, c->b); c 1643 drivers/tty/vt/vt.c if (c->r > max / 2) c 1645 drivers/tty/vt/vt.c if (c->g > max / 2) c 1647 drivers/tty/vt/vt.c if (c->b > max / 2) c 1661 drivers/tty/vt/vt.c static void rgb_background(struct vc_data *vc, const struct rgb *c) c 1665 drivers/tty/vt/vt.c | (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3; c 1678 drivers/tty/vt/vt.c void(*set_color)(struct vc_data *vc, const struct rgb *c)) c 1680 drivers/tty/vt/vt.c struct rgb c; c 1689 drivers/tty/vt/vt.c rgb_from_256(vc->vc_par[i], &c); c 1692 drivers/tty/vt/vt.c c.r = vc->vc_par[i + 1]; c 1693 drivers/tty/vt/vt.c c.g = vc->vc_par[i + 2]; c 1694 drivers/tty/vt/vt.c c.b = vc->vc_par[i + 3]; c 1699 drivers/tty/vt/vt.c set_color(vc, &c); c 2120 drivers/tty/vt/vt.c static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) c 2126 drivers/tty/vt/vt.c if (vc->vc_state == ESosc && c>=8 && c<=13) /* ... except for OSC */ c 2128 drivers/tty/vt/vt.c switch (c) { c 2184 drivers/tty/vt/vt.c switch (c) { c 2237 drivers/tty/vt/vt.c if (c=='P') { /* palette escape sequence */ c 2243 drivers/tty/vt/vt.c } else if (c=='R') { /* reset palette */ c 2246 drivers/tty/vt/vt.c } else if (c>='0' && c<='9') c 2252 drivers/tty/vt/vt.c if (isxdigit(c)) { c 2253 drivers/tty/vt/vt.c vc->vc_par[vc->vc_npar++] = hex_to_bin(c); c 2273 drivers/tty/vt/vt.c if (c == '[') { /* Function key */ c 2277 drivers/tty/vt/vt.c switch (c) { c 2294 drivers/tty/vt/vt.c if (c == ';' && vc->vc_npar < NPAR - 1) { c 2297 drivers/tty/vt/vt.c } else if (c>='0' && c<='9') { c 2299 drivers/tty/vt/vt.c vc->vc_par[vc->vc_npar] += c - '0'; c 2302 drivers/tty/vt/vt.c if (c >= 0x20 && c <= 0x3f) { /* 0x2x, 0x3a and 0x3c - 0x3f */ c 2307 drivers/tty/vt/vt.c switch(c) { c 2348 drivers/tty/vt/vt.c switch(c) { c 2469 drivers/tty/vt/vt.c if (c >= 20 && c <= 0x3f) c 2475 drivers/tty/vt/vt.c switch (c) { c 2490 drivers/tty/vt/vt.c if (c == '8') { c 2501 drivers/tty/vt/vt.c if (c == '0') c 2503 drivers/tty/vt/vt.c else if (c == 'B') c 2505 drivers/tty/vt/vt.c else if (c == 'U') c 2507 drivers/tty/vt/vt.c else if (c == 'K') c 2514 drivers/tty/vt/vt.c if (c == '0') c 2516 drivers/tty/vt/vt.c else if (c == 'B') c 2518 drivers/tty/vt/vt.c else if (c == 'U') c 2520 drivers/tty/vt/vt.c else if (c == 'K') c 2584 drivers/tty/vt/vt.c int c, next_c, tc, ok, n = 0, draw_x = -1; c 2627 drivers/tty/vt/vt.c c = orig; c 2637 drivers/tty/vt/vt.c tc = c; c 2646 drivers/tty/vt/vt.c if ((c & 0xc0) == 0x80) { c 2650 drivers/tty/vt/vt.c vc->vc_utf_char = (vc->vc_utf_char << 6) | (c & 0x3f); c 2657 drivers/tty/vt/vt.c c = vc->vc_utf_char; c 2659 drivers/tty/vt/vt.c if (c <= utf8_length_changes[vc->vc_npar - 1] || c 2660 drivers/tty/vt/vt.c c > utf8_length_changes[vc->vc_npar]) c 2661 drivers/tty/vt/vt.c c = 0xfffd; c 2665 drivers/tty/vt/vt.c c = 0xfffd; c 2673 drivers/tty/vt/vt.c c = 0xfffd; c 2674 drivers/tty/vt/vt.c } else if (c > 0x7f) { c 2677 drivers/tty/vt/vt.c if ((c & 0xe0) == 0xc0) { c 2679 drivers/tty/vt/vt.c vc->vc_utf_char = (c & 0x1f); c 2680 drivers/tty/vt/vt.c } else if ((c & 0xf0) == 0xe0) { c 2682 drivers/tty/vt/vt.c vc->vc_utf_char = (c & 0x0f); c 2683 drivers/tty/vt/vt.c } else if ((c & 0xf8) == 0xf0) { c 2685 drivers/tty/vt/vt.c vc->vc_utf_char = (c & 0x07); c 2686 drivers/tty/vt/vt.c } else if ((c & 0xfc) == 0xf8) { c 2688 drivers/tty/vt/vt.c vc->vc_utf_char = (c & 0x03); c 2689 drivers/tty/vt/vt.c } else if ((c & 0xfe) == 0xfc) { c 2691 drivers/tty/vt/vt.c vc->vc_utf_char = (c & 0x01); c 2694 drivers/tty/vt/vt.c c = 0xfffd; c 2706 drivers/tty/vt/vt.c if ((c >= 0xd800 && c <= 0xdfff) || c == 0xfffe || c == 0xffff) c 2707 drivers/tty/vt/vt.c c = 0xfffd; c 2708 drivers/tty/vt/vt.c tc = c; c 2710 drivers/tty/vt/vt.c tc = vc_translate(vc, c); c 2713 drivers/tty/vt/vt.c param.c = tc; c 2728 drivers/tty/vt/vt.c ok = tc && (c >= 32 || c 2729 drivers/tty/vt/vt.c !(vc->vc_disp_ctrl ? (CTRL_ALWAYS >> c) & 1 : c 2730 drivers/tty/vt/vt.c vc->vc_utf || ((CTRL_ACTION >> c) & 1))) c 2731 drivers/tty/vt/vt.c && (c != 127 || vc->vc_disp_ctrl) c 2732 drivers/tty/vt/vt.c && (c != 128+27); c 2736 drivers/tty/vt/vt.c if (is_double_width(c)) c 2746 drivers/tty/vt/vt.c if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) { c 2752 drivers/tty/vt/vt.c tc = c; c 2778 drivers/tty/vt/vt.c next_c = c; c 2812 drivers/tty/vt/vt.c notify_write(vc, c); c 2821 drivers/tty/vt/vt.c c = orig; c 2949 drivers/tty/vt/vt.c unsigned char c; c 2982 drivers/tty/vt/vt.c c = *b++; c 2983 drivers/tty/vt/vt.c if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { c 2987 drivers/tty/vt/vt.c if (c == 8) { /* backspace */ c 2993 drivers/tty/vt/vt.c if (c != 13) c 2998 drivers/tty/vt/vt.c if (c == 10 || c == 13) c 3001 drivers/tty/vt/vt.c vc_uniscr_putc(vc, c); c 3002 drivers/tty/vt/vt.c scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); c 3003 drivers/tty/vt/vt.c notify_write(vc, c); c 3021 drivers/tty/vt/vt.c static struct tty_driver *vt_console_device(struct console *c, int *index) c 3023 drivers/tty/vt/vt.c *index = c->index ? c->index-1 : fg_console; c 4470 drivers/tty/vt/vt.c int c; c 4491 drivers/tty/vt/vt.c c = (font.width+7)/8 * 32 * font.charcount; c 4512 drivers/tty/vt/vt.c if (op->data && copy_to_user(op->data, font.data, c)) c 4659 drivers/tty/vt/vt.c u16 c = w & 0xff; c 4662 drivers/tty/vt/vt.c c |= 0x100; c 4663 drivers/tty/vt/vt.c return c; c 4719 drivers/tty/vt/vt.c void vc_scrolldelta_helper(struct vc_data *c, int lines, c 4723 drivers/tty/vt/vt.c ptrdiff_t scr_end = (void *)c->vc_scr_end - base; c 4724 drivers/tty/vt/vt.c ptrdiff_t vorigin = (void *)c->vc_visible_origin - base; c 4725 drivers/tty/vt/vt.c ptrdiff_t origin = (void *)c->vc_origin - base; c 4726 drivers/tty/vt/vt.c int margin = c->vc_size_row * 4; c 4731 drivers/tty/vt/vt.c c->vc_visible_origin = c->vc_origin; c 4738 drivers/tty/vt/vt.c wrap = rolled_over + c->vc_size_row; c 4744 drivers/tty/vt/vt.c from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row; c 4755 drivers/tty/vt/vt.c c->vc_visible_origin = ubase + (from + from_off) % wrap; c 418 drivers/usb/atm/ueagle-atm.c #define E1_MAKESA(a, b, c, d) \ c 419 drivers/usb/atm/ueagle-atm.c (((c) & 0xff) << 24 | \ c 1661 drivers/usb/cdns3/gadget.c char c[2] = {ep->name[2], '\0'}; c 1663 drivers/usb/cdns3/gadget.c ret = kstrtoul(c, 10, &num); c 831 drivers/usb/core/config.c int c, i; c 844 drivers/usb/core/config.c for (c = 0; c < dev->descriptor.bNumConfigurations; c++) { c 845 drivers/usb/core/config.c struct usb_host_config *cf = &dev->config[c]; c 61 drivers/usb/core/generic.c struct usb_host_config *c, *best; c 67 drivers/usb/core/generic.c c = udev->config; c 69 drivers/usb/core/generic.c for (i = 0; i < num_configs; (i++, c++)) { c 73 drivers/usb/core/generic.c if (c->desc.bNumInterfaces > 0) c 74 drivers/usb/core/generic.c desc = &c->intf_cache[0]->altsetting->desc; c 98 drivers/usb/core/generic.c if (bus_powered && (c->desc.bmAttributes & c 116 drivers/usb/core/generic.c if (usb_get_max_power(udev, c) > udev->bus_mA) { c 130 drivers/usb/core/generic.c best = c; c 136 drivers/usb/core/generic.c best = c; c 156 drivers/usb/core/generic.c best = c; c 168 drivers/usb/core/generic.c best = c; c 175 drivers/usb/core/generic.c best = c; c 200 drivers/usb/core/generic.c int err, c; c 208 drivers/usb/core/generic.c c = usb_choose_configuration(udev); c 209 drivers/usb/core/generic.c if (c >= 0) { c 210 drivers/usb/core/generic.c err = usb_set_configuration(udev, c); c 213 drivers/usb/core/generic.c c, err); c 2623 drivers/usb/core/hub.c int result = 0, c; c 2649 drivers/usb/core/hub.c c = usb_choose_configuration(usb_dev); c 2650 drivers/usb/core/hub.c if (c >= 0) { c 2651 drivers/usb/core/hub.c result = usb_set_configuration(usb_dev, c); c 2654 drivers/usb/core/hub.c "can't set config #%d, error %d\n", c, result); c 55 drivers/usb/core/usb.h struct usb_host_config *c) c 60 drivers/usb/core/usb.h return c->desc.bMaxPower * mul; c 145 drivers/usb/dwc3/dwc3-keystone.c static int kdwc3_remove_core(struct device *dev, void *c) c 217 drivers/usb/early/xhci-dbc.c static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size) c 222 drivers/usb/early/xhci-dbc.c s[i] = cpu_to_le16(c[i]); c 309 drivers/usb/gadget/composite.c void usb_remove_function(struct usb_configuration *c, struct usb_function *f) c 317 drivers/usb/gadget/composite.c f->unbind(c, f); c 430 drivers/usb/gadget/composite.c struct usb_configuration *c) c 434 drivers/usb/gadget/composite.c if (c->MaxPower) c 435 drivers/usb/gadget/composite.c val = c->MaxPower; c 453 drivers/usb/gadget/composite.c struct usb_config_descriptor *c = buf; c 461 drivers/usb/gadget/composite.c c = buf; c 462 drivers/usb/gadget/composite.c c->bLength = USB_DT_CONFIG_SIZE; c 463 drivers/usb/gadget/composite.c c->bDescriptorType = type; c 465 drivers/usb/gadget/composite.c c->bNumInterfaces = config->next_interface_id; c 466 drivers/usb/gadget/composite.c c->bConfigurationValue = config->bConfigurationValue; c 467 drivers/usb/gadget/composite.c c->iConfiguration = config->iConfiguration; c 468 drivers/usb/gadget/composite.c c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; c 469 drivers/usb/gadget/composite.c c->bMaxPower = encode_bMaxPower(speed, config); c 497 drivers/usb/gadget/composite.c c->wTotalLength = cpu_to_le16(len); c 504 drivers/usb/gadget/composite.c struct usb_configuration *c; c 526 drivers/usb/gadget/composite.c c = cdev->os_desc_config; c 527 drivers/usb/gadget/composite.c if (c) c 531 drivers/usb/gadget/composite.c c = list_entry(pos, typeof(*c), list); c 534 drivers/usb/gadget/composite.c if (c == cdev->os_desc_config) c 541 drivers/usb/gadget/composite.c if (!c->superspeed_plus) c 545 drivers/usb/gadget/composite.c if (!c->superspeed) c 549 drivers/usb/gadget/composite.c if (!c->highspeed) c 553 drivers/usb/gadget/composite.c if (!c->fullspeed) c 558 drivers/usb/gadget/composite.c return config_buf(c, speed, cdev->req->buf, type); c 567 drivers/usb/gadget/composite.c struct usb_configuration *c; c 583 drivers/usb/gadget/composite.c list_for_each_entry(c, &cdev->configs, list) { c 586 drivers/usb/gadget/composite.c if (!c->superspeed_plus) c 589 drivers/usb/gadget/composite.c if (!c->superspeed) c 592 drivers/usb/gadget/composite.c if (!c->highspeed) c 595 drivers/usb/gadget/composite.c if (!c->fullspeed) c 772 drivers/usb/gadget/composite.c struct usb_configuration *c = NULL; c 778 drivers/usb/gadget/composite.c list_for_each_entry(c, &cdev->configs, list) { c 779 drivers/usb/gadget/composite.c if (c->bConfigurationValue == number) { c 801 drivers/usb/gadget/composite.c number, c ? c->label : "unconfigured"); c 803 drivers/usb/gadget/composite.c if (!c) c 807 drivers/usb/gadget/composite.c cdev->config = c; c 811 drivers/usb/gadget/composite.c struct usb_function *f = c->interface[tmp]; c 858 drivers/usb/gadget/composite.c power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; c 878 drivers/usb/gadget/composite.c struct usb_configuration *c; c 884 drivers/usb/gadget/composite.c list_for_each_entry(c, &cdev->configs, list) { c 885 drivers/usb/gadget/composite.c if (c->bConfigurationValue == config->bConfigurationValue) c 1080 drivers/usb/gadget/composite.c struct usb_configuration *c; c 1101 drivers/usb/gadget/composite.c list_for_each_entry(c, &cdev->configs, list) { c 1102 drivers/usb/gadget/composite.c sp = c->strings; c 1106 drivers/usb/gadget/composite.c list_for_each_entry(f, &c->functions, list) { c 1159 drivers/usb/gadget/composite.c list_for_each_entry(c, &cdev->configs, list) { c 1160 drivers/usb/gadget/composite.c if (c->strings) { c 1161 drivers/usb/gadget/composite.c len = lookup_string(c->strings, buf, language, id); c 1165 drivers/usb/gadget/composite.c list_for_each_entry(f, &c->functions, list) { c 1367 drivers/usb/gadget/composite.c int usb_string_ids_n(struct usb_composite_dev *c, unsigned n) c 1369 drivers/usb/gadget/composite.c unsigned next = c->next_string_id; c 1372 drivers/usb/gadget/composite.c c->next_string_id += n; c 1426 drivers/usb/gadget/composite.c static int count_ext_compat(struct usb_configuration *c) c 1431 drivers/usb/gadget/composite.c for (i = 0; i < c->next_interface_id; ++i) { c 1435 drivers/usb/gadget/composite.c f = c->interface[i]; c 1450 drivers/usb/gadget/composite.c static int fill_ext_compat(struct usb_configuration *c, u8 *buf) c 1456 drivers/usb/gadget/composite.c for (i = 0; i < c->next_interface_id; ++i) { c 1460 drivers/usb/gadget/composite.c f = c->interface[i]; c 1486 drivers/usb/gadget/composite.c static int count_ext_prop(struct usb_configuration *c, int interface) c 1491 drivers/usb/gadget/composite.c f = c->interface[interface]; c 1504 drivers/usb/gadget/composite.c static int len_ext_prop(struct usb_configuration *c, int interface) c 1511 drivers/usb/gadget/composite.c f = c->interface[interface]; c 1522 drivers/usb/gadget/composite.c static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf) c 1529 drivers/usb/gadget/composite.c f = c->interface[interface]; c 1916 drivers/usb/gadget/composite.c struct usb_configuration *c; c 1917 drivers/usb/gadget/composite.c list_for_each_entry(c, &cdev->configs, list) c 1918 drivers/usb/gadget/composite.c list_for_each_entry(f, &c->functions, list) c 1948 drivers/usb/gadget/composite.c struct usb_configuration *c; c 1950 drivers/usb/gadget/composite.c c = cdev->config; c 1951 drivers/usb/gadget/composite.c if (!c) c 1955 drivers/usb/gadget/composite.c if (c->setup) { c 1956 drivers/usb/gadget/composite.c value = c->setup(c, ctrl); c 1961 drivers/usb/gadget/composite.c if (!list_is_singular(&c->functions)) c 1963 drivers/usb/gadget/composite.c f = list_first_entry(&c->functions, struct usb_function, c 2038 drivers/usb/gadget/composite.c struct usb_configuration *c; c 2039 drivers/usb/gadget/composite.c c = list_first_entry(&cdev->configs, c 2041 drivers/usb/gadget/composite.c remove_config(cdev, c); c 77 drivers/usb/gadget/configfs.c struct usb_configuration c; c 361 drivers/usb/gadget/configfs.c WARN_ON(!list_empty(&cfg->c.functions)); c 362 drivers/usb/gadget/configfs.c list_del(&cfg->c.list); c 363 drivers/usb/gadget/configfs.c kfree(cfg->c.label); c 372 drivers/usb/gadget/configfs.c struct usb_composite_dev *cdev = cfg->c.cdev; c 423 drivers/usb/gadget/configfs.c struct usb_composite_dev *cdev = cfg->c.cdev; c 464 drivers/usb/gadget/configfs.c return sprintf(page, "%u\n", to_config_usb_cfg(item)->c.MaxPower); c 477 drivers/usb/gadget/configfs.c to_config_usb_cfg(item)->c.MaxPower = val; c 485 drivers/usb/gadget/configfs.c to_config_usb_cfg(item)->c.bmAttributes); c 501 drivers/usb/gadget/configfs.c to_config_usb_cfg(item)->c.bmAttributes = val; c 662 drivers/usb/gadget/configfs.c cfg->c.label = kstrdup(buf, GFP_KERNEL); c 663 drivers/usb/gadget/configfs.c if (!cfg->c.label) { c 667 drivers/usb/gadget/configfs.c cfg->c.bConfigurationValue = num; c 668 drivers/usb/gadget/configfs.c cfg->c.MaxPower = CONFIG_USB_GADGET_VBUS_DRAW; c 669 drivers/usb/gadget/configfs.c cfg->c.bmAttributes = USB_CONFIG_ATT_ONE; c 680 drivers/usb/gadget/configfs.c ret = usb_add_config_only(&gi->cdev, &cfg->c); c 686 drivers/usb/gadget/configfs.c kfree(cfg->c.label); c 852 drivers/usb/gadget/configfs.c struct usb_configuration *c; c 856 drivers/usb/gadget/configfs.c list_for_each_entry(c, &cdev->configs, list) { c 857 drivers/usb/gadget/configfs.c if (c == &c_target->c) c 860 drivers/usb/gadget/configfs.c if (c != &c_target->c) { c 870 drivers/usb/gadget/configfs.c cdev->os_desc_config = &c_target->c; c 1212 drivers/usb/gadget/configfs.c struct usb_configuration *c; c 1214 drivers/usb/gadget/configfs.c list_for_each_entry(c, &gi->cdev.configs, list) { c 1218 drivers/usb/gadget/configfs.c cfg = container_of(c, struct config_usb_cfg, c); c 1220 drivers/usb/gadget/configfs.c list_for_each_entry_safe(f, tmp, &c->functions, list) { c 1227 drivers/usb/gadget/configfs.c f->unbind(c, f); c 1230 drivers/usb/gadget/configfs.c c->next_interface_id = 0; c 1231 drivers/usb/gadget/configfs.c memset(c->interface, 0, sizeof(c->interface)); c 1232 drivers/usb/gadget/configfs.c c->superspeed_plus = 0; c 1233 drivers/usb/gadget/configfs.c c->superspeed = 0; c 1234 drivers/usb/gadget/configfs.c c->highspeed = 0; c 1235 drivers/usb/gadget/configfs.c c->fullspeed = 0; c 1246 drivers/usb/gadget/configfs.c struct usb_configuration *c; c 1268 drivers/usb/gadget/configfs.c list_for_each_entry(c, &gi->cdev.configs, list) { c 1271 drivers/usb/gadget/configfs.c cfg = container_of(c, struct config_usb_cfg, c); c 1274 drivers/usb/gadget/configfs.c c->label, c->bConfigurationValue, c 1328 drivers/usb/gadget/configfs.c list_for_each_entry(c, &gi->cdev.configs, list) { c 1335 drivers/usb/gadget/configfs.c c->descriptors = otg_desc; c 1337 drivers/usb/gadget/configfs.c cfg = container_of(c, struct config_usb_cfg, c); c 1352 drivers/usb/gadget/configfs.c c->iConfiguration = s[0].id; c 1357 drivers/usb/gadget/configfs.c ret = usb_add_function(c, f); c 606 drivers/usb/gadget/function/f_acm.c acm_bind(struct usb_configuration *c, struct usb_function *f) c 608 drivers/usb/gadget/function/f_acm.c struct usb_composite_dev *cdev = c->cdev; c 628 drivers/usb/gadget/function/f_acm.c status = usb_interface_id(c, f); c 637 drivers/usb/gadget/function/f_acm.c status = usb_interface_id(c, f); c 694 drivers/usb/gadget/function/f_acm.c gadget_is_superspeed(c->cdev->gadget) ? "super" : c 695 drivers/usb/gadget/function/f_acm.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 709 drivers/usb/gadget/function/f_acm.c static void acm_unbind(struct usb_configuration *c, struct usb_function *f) c 683 drivers/usb/gadget/function/f_ecm.c ecm_bind(struct usb_configuration *c, struct usb_function *f) c 685 drivers/usb/gadget/function/f_ecm.c struct usb_composite_dev *cdev = c->cdev; c 727 drivers/usb/gadget/function/f_ecm.c status = usb_interface_id(c, f); c 736 drivers/usb/gadget/function/f_ecm.c status = usb_interface_id(c, f); c 807 drivers/usb/gadget/function/f_ecm.c gadget_is_superspeed(c->cdev->gadget) ? "super" : c 808 drivers/usb/gadget/function/f_ecm.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 905 drivers/usb/gadget/function/f_ecm.c static void ecm_unbind(struct usb_configuration *c, struct usb_function *f) c 909 drivers/usb/gadget/function/f_ecm.c DBG(c->cdev, "ecm unbind\n"); c 240 drivers/usb/gadget/function/f_eem.c static int eem_bind(struct usb_configuration *c, struct usb_function *f) c 242 drivers/usb/gadget/function/f_eem.c struct usb_composite_dev *cdev = c->cdev; c 275 drivers/usb/gadget/function/f_eem.c status = usb_interface_id(c, f); c 312 drivers/usb/gadget/function/f_eem.c gadget_is_superspeed(c->cdev->gadget) ? "super" : c 313 drivers/usb/gadget/function/f_eem.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 605 drivers/usb/gadget/function/f_eem.c static void eem_unbind(struct usb_configuration *c, struct usb_function *f) c 607 drivers/usb/gadget/function/f_eem.c DBG(c->cdev, "eem unbind\n"); c 3044 drivers/usb/gadget/function/f_fs.c struct usb_configuration *c) c 3069 drivers/usb/gadget/function/f_fs.c func->conf = c; c 3070 drivers/usb/gadget/function/f_fs.c func->gadget = c->cdev->gadget; c 3080 drivers/usb/gadget/function/f_fs.c ret = functionfs_bind(func->ffs, c->cdev); c 3090 drivers/usb/gadget/function/f_fs.c static int _ffs_func_bind(struct usb_configuration *c, c 3114 drivers/usb/gadget/function/f_fs.c c->cdev->use_os_string ? ffs->interfaces_count : 0); c 3116 drivers/usb/gadget/function/f_fs.c c->cdev->use_os_string ? ffs->interfaces_count : 0); c 3118 drivers/usb/gadget/function/f_fs.c c->cdev->use_os_string ? ffs->interfaces_count : 0); c 3221 drivers/usb/gadget/function/f_fs.c if (c->cdev->use_os_string) { c 3242 drivers/usb/gadget/function/f_fs.c c->cdev->use_os_string ? ffs->interfaces_count : 0; c 3253 drivers/usb/gadget/function/f_fs.c static int ffs_func_bind(struct usb_configuration *c, c 3256 drivers/usb/gadget/function/f_fs.c struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c); c 3263 drivers/usb/gadget/function/f_fs.c ret = _ffs_func_bind(c, f); c 3566 drivers/usb/gadget/function/f_fs.c static void ffs_func_unbind(struct usb_configuration *c, c 744 drivers/usb/gadget/function/f_hid.c static int hidg_bind(struct usb_configuration *c, struct usb_function *f) c 754 drivers/usb/gadget/function/f_hid.c us = usb_gstrings_attach(c->cdev, ct_func_strings, c 761 drivers/usb/gadget/function/f_hid.c status = usb_interface_id(c, f); c 768 drivers/usb/gadget/function/f_hid.c ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc); c 773 drivers/usb/gadget/function/f_hid.c ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc); c 1075 drivers/usb/gadget/function/f_hid.c static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) c 166 drivers/usb/gadget/function/f_loopback.c static int loopback_bind(struct usb_configuration *c, struct usb_function *f) c 168 drivers/usb/gadget/function/f_loopback.c struct usb_composite_dev *cdev = c->cdev; c 174 drivers/usb/gadget/function/f_loopback.c id = usb_interface_id(c, f); c 215 drivers/usb/gadget/function/f_loopback.c (gadget_is_superspeed(c->cdev->gadget) ? "super" : c 216 drivers/usb/gadget/function/f_loopback.c (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), c 2894 drivers/usb/gadget/function/f_mass_storage.c static int fsg_bind(struct usb_configuration *c, struct usb_function *f) c 2898 drivers/usb/gadget/function/f_mass_storage.c struct usb_gadget *gadget = c->cdev->gadget; c 2914 drivers/usb/gadget/function/f_mass_storage.c ret = fsg_common_set_cdev(fsg->common, c->cdev, c 2939 drivers/usb/gadget/function/f_mass_storage.c i = usb_interface_id(c, f); c 2994 drivers/usb/gadget/function/f_mass_storage.c static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) c 865 drivers/usb/gadget/function/f_midi.c static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) c 872 drivers/usb/gadget/function/f_midi.c struct usb_composite_dev *cdev = c->cdev; c 884 drivers/usb/gadget/function/f_midi.c us = usb_gstrings_attach(c->cdev, midi_strings, c 893 drivers/usb/gadget/function/f_midi.c status = usb_interface_id(c, f); c 898 drivers/usb/gadget/function/f_midi.c status = usb_interface_id(c, f); c 1024 drivers/usb/gadget/function/f_midi.c if (gadget_is_dualspeed(c->cdev->gadget)) { c 1032 drivers/usb/gadget/function/f_midi.c if (gadget_is_superspeed(c->cdev->gadget)) { c 1158 drivers/usb/gadget/function/f_midi.c char *c; c 1166 drivers/usb/gadget/function/f_midi.c c = kstrndup(page, len, GFP_KERNEL); c 1167 drivers/usb/gadget/function/f_midi.c if (!c) { c 1173 drivers/usb/gadget/function/f_midi.c opts->id = c; c 1269 drivers/usb/gadget/function/f_midi.c static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f) c 1386 drivers/usb/gadget/function/f_ncm.c static int ncm_bind(struct usb_configuration *c, struct usb_function *f) c 1388 drivers/usb/gadget/function/f_ncm.c struct usb_composite_dev *cdev = c->cdev; c 1438 drivers/usb/gadget/function/f_ncm.c status = usb_interface_id(c, f); c 1451 drivers/usb/gadget/function/f_ncm.c status = usb_interface_id(c, f); c 1523 drivers/usb/gadget/function/f_ncm.c gadget_is_superspeed(c->cdev->gadget) ? "super" : c 1524 drivers/usb/gadget/function/f_ncm.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 1642 drivers/usb/gadget/function/f_ncm.c static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) c 1646 drivers/usb/gadget/function/f_ncm.c DBG(c->cdev, "ncm unbind\n"); c 288 drivers/usb/gadget/function/f_obex.c static inline bool can_support_obex(struct usb_configuration *c) c 295 drivers/usb/gadget/function/f_obex.c if (!gadget_is_altset_supported(c->cdev->gadget)) c 302 drivers/usb/gadget/function/f_obex.c static int obex_bind(struct usb_configuration *c, struct usb_function *f) c 304 drivers/usb/gadget/function/f_obex.c struct usb_composite_dev *cdev = c->cdev; c 310 drivers/usb/gadget/function/f_obex.c if (!can_support_obex(c)) c 323 drivers/usb/gadget/function/f_obex.c status = usb_interface_id(c, f); c 331 drivers/usb/gadget/function/f_obex.c status = usb_interface_id(c, f); c 370 drivers/usb/gadget/function/f_obex.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 454 drivers/usb/gadget/function/f_obex.c static void obex_unbind(struct usb_configuration *c, struct usb_function *f) c 478 drivers/usb/gadget/function/f_phonet.c static int pn_bind(struct usb_configuration *c, struct usb_function *f) c 480 drivers/usb/gadget/function/f_phonet.c struct usb_composite_dev *cdev = c->cdev; c 506 drivers/usb/gadget/function/f_phonet.c status = usb_interface_id(c, f); c 512 drivers/usb/gadget/function/f_phonet.c status = usb_interface_id(c, f); c 649 drivers/usb/gadget/function/f_phonet.c static void pn_unbind(struct usb_configuration *c, struct usb_function *f) c 1013 drivers/usb/gadget/function/f_printer.c static int printer_func_bind(struct usb_configuration *c, c 1016 drivers/usb/gadget/function/f_printer.c struct usb_gadget *gadget = c->cdev->gadget; c 1019 drivers/usb/gadget/function/f_printer.c struct usb_composite_dev *cdev = c->cdev; c 1028 drivers/usb/gadget/function/f_printer.c id = usb_interface_id(c, f); c 1359 drivers/usb/gadget/function/f_printer.c static void printer_func_unbind(struct usb_configuration *c, c 658 drivers/usb/gadget/function/f_rndis.c static inline bool can_support_rndis(struct usb_configuration *c) c 667 drivers/usb/gadget/function/f_rndis.c rndis_bind(struct usb_configuration *c, struct usb_function *f) c 669 drivers/usb/gadget/function/f_rndis.c struct usb_composite_dev *cdev = c->cdev; c 677 drivers/usb/gadget/function/f_rndis.c if (!can_support_rndis(c)) c 721 drivers/usb/gadget/function/f_rndis.c status = usb_interface_id(c, f); c 734 drivers/usb/gadget/function/f_rndis.c status = usb_interface_id(c, f); c 813 drivers/usb/gadget/function/f_rndis.c gadget_is_superspeed(c->cdev->gadget) ? "super" : c 814 drivers/usb/gadget/function/f_rndis.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 968 drivers/usb/gadget/function/f_rndis.c static void rndis_unbind(struct usb_configuration *c, struct usb_function *f) c 186 drivers/usb/gadget/function/f_serial.c static int gser_bind(struct usb_configuration *c, struct usb_function *f) c 188 drivers/usb/gadget/function/f_serial.c struct usb_composite_dev *cdev = c->cdev; c 199 drivers/usb/gadget/function/f_serial.c status = usb_string_id(c->cdev); c 206 drivers/usb/gadget/function/f_serial.c status = usb_interface_id(c, f); c 241 drivers/usb/gadget/function/f_serial.c gadget_is_superspeed(c->cdev->gadget) ? "super" : c 242 drivers/usb/gadget/function/f_serial.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 325 drivers/usb/gadget/function/f_serial.c static void gser_unbind(struct usb_configuration *c, struct usb_function *f) c 317 drivers/usb/gadget/function/f_sourcesink.c sourcesink_bind(struct usb_configuration *c, struct usb_function *f) c 319 drivers/usb/gadget/function/f_sourcesink.c struct usb_composite_dev *cdev = c->cdev; c 325 drivers/usb/gadget/function/f_sourcesink.c id = usb_interface_id(c, f); c 439 drivers/usb/gadget/function/f_sourcesink.c (gadget_is_superspeed(c->cdev->gadget) ? "super" : c 440 drivers/usb/gadget/function/f_sourcesink.c (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), c 764 drivers/usb/gadget/function/f_sourcesink.c struct usb_configuration *c = f->config; c 765 drivers/usb/gadget/function/f_sourcesink.c struct usb_request *req = c->cdev->req; c 810 drivers/usb/gadget/function/f_sourcesink.c VDBG(c->cdev, c 818 drivers/usb/gadget/function/f_sourcesink.c VDBG(c->cdev, "source/sink req%02x.%02x v%04x i%04x l%d\n", c 823 drivers/usb/gadget/function/f_sourcesink.c value = usb_ep_queue(c->cdev->gadget->ep0, req, GFP_ATOMIC); c 825 drivers/usb/gadget/function/f_sourcesink.c ERROR(c->cdev, "source/sink response, err %d\n", c 292 drivers/usb/gadget/function/f_subset.c geth_bind(struct usb_configuration *c, struct usb_function *f) c 294 drivers/usb/gadget/function/f_subset.c struct usb_composite_dev *cdev = c->cdev; c 330 drivers/usb/gadget/function/f_subset.c status = usb_interface_id(c, f); c 371 drivers/usb/gadget/function/f_subset.c gadget_is_superspeed(c->cdev->gadget) ? "super" : c 372 drivers/usb/gadget/function/f_subset.c gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", c 459 drivers/usb/gadget/function/f_subset.c static void geth_unbind(struct usb_configuration *c, struct usb_function *f) c 1987 drivers/usb/gadget/function/f_tcm.c static int tcm_bind(struct usb_configuration *c, struct usb_function *f) c 1991 drivers/usb/gadget/function/f_tcm.c struct usb_gadget *gadget = c->cdev->gadget; c 2005 drivers/usb/gadget/function/f_tcm.c us = usb_gstrings_attach(c->cdev, tcm_strings, c 2012 drivers/usb/gadget/function/f_tcm.c iface = usb_interface_id(c, f); c 2274 drivers/usb/gadget/function/f_tcm.c static void tcm_unbind(struct usb_configuration *c, struct usb_function *f) c 508 drivers/usb/gadget/function/f_uac1.c static int f_audio_bind(struct usb_configuration *c, struct usb_function *f) c 510 drivers/usb/gadget/function/f_uac1.c struct usb_composite_dev *cdev = c->cdev; c 559 drivers/usb/gadget/function/f_uac1.c status = usb_interface_id(c, f); c 566 drivers/usb/gadget/function/f_uac1.c status = usb_interface_id(c, f); c 575 drivers/usb/gadget/function/f_uac1.c status = usb_interface_id(c, f); c 761 drivers/usb/gadget/function/f_uac1.c static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f) c 698 drivers/usb/gadget/function/f_uac1_legacy.c f_audio_bind(struct usb_configuration *c, struct usb_function *f) c 700 drivers/usb/gadget/function/f_uac1_legacy.c struct usb_composite_dev *cdev = c->cdev; c 708 drivers/usb/gadget/function/f_uac1_legacy.c audio->card.gadget = c->cdev->gadget; c 731 drivers/usb/gadget/function/f_uac1_legacy.c status = usb_interface_id(c, f); c 738 drivers/usb/gadget/function/f_uac1_legacy.c status = usb_interface_id(c, f); c 977 drivers/usb/gadget/function/f_uac1_legacy.c static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f) c 830 drivers/usb/gadget/function/f_uac2.c struct cntrl_cur_lay3 c; c 831 drivers/usb/gadget/function/f_uac2.c memset(&c, 0, sizeof(struct cntrl_cur_lay3)); c 834 drivers/usb/gadget/function/f_uac2.c c.dCUR = cpu_to_le32(p_srate); c 836 drivers/usb/gadget/function/f_uac2.c c.dCUR = cpu_to_le32(c_srate); c 838 drivers/usb/gadget/function/f_uac2.c value = min_t(unsigned, w_length, sizeof c); c 839 drivers/usb/gadget/function/f_uac2.c memcpy(req->buf, &c, value); c 1101 drivers/usb/gadget/function/f_uac2.c static void afunc_unbind(struct usb_configuration *c, struct usb_function *f) c 585 drivers/usb/gadget/function/f_uvc.c uvc_function_bind(struct usb_configuration *c, struct usb_function *f) c 587 drivers/usb/gadget/function/f_uvc.c struct usb_composite_dev *cdev = c->cdev; c 654 drivers/usb/gadget/function/f_uvc.c if (gadget_is_superspeed(c->cdev->gadget)) c 685 drivers/usb/gadget/function/f_uvc.c if ((ret = usb_interface_id(c, f)) < 0) c 692 drivers/usb/gadget/function/f_uvc.c if ((ret = usb_interface_id(c, f)) < 0) c 714 drivers/usb/gadget/function/f_uvc.c if (gadget_is_superspeed(c->cdev->gadget)) { c 880 drivers/usb/gadget/function/f_uvc.c static void uvc_unbind(struct usb_configuration *c, struct usb_function *f) c 882 drivers/usb/gadget/function/f_uvc.c struct usb_composite_dev *cdev = c->cdev; c 1125 drivers/usb/gadget/function/rndis.c char c; c 1126 drivers/usb/gadget/function/rndis.c if (get_user(c, buffer)) c 1128 drivers/usb/gadget/function/rndis.c switch (c) { c 1140 drivers/usb/gadget/function/rndis.c speed = speed * 10 + c - '0'; c 1152 drivers/usb/gadget/function/rndis.c else pr_debug("%c is not valid\n", c); c 23 drivers/usb/gadget/function/u_phonet.h int phonet_bind_config(struct usb_configuration *c, struct net_device *dev); c 65 drivers/usb/gadget/function/u_serial.h int gser_bind_config(struct usb_configuration *c, u8 port_num); c 66 drivers/usb/gadget/function/u_serial.h int obex_bind_config(struct usb_configuration *c, u8 port_num); c 106 drivers/usb/gadget/legacy/acm_ms.c static int acm_ms_do_config(struct usb_configuration *c) c 111 drivers/usb/gadget/legacy/acm_ms.c if (gadget_is_otg(c->cdev->gadget)) { c 112 drivers/usb/gadget/legacy/acm_ms.c c->descriptors = otg_desc; c 113 drivers/usb/gadget/legacy/acm_ms.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 128 drivers/usb/gadget/legacy/acm_ms.c status = usb_add_function(c, f_acm); c 132 drivers/usb/gadget/legacy/acm_ms.c status = usb_add_function(c, f_msg); c 138 drivers/usb/gadget/legacy/acm_ms.c usb_remove_function(c, f_acm); c 187 drivers/usb/gadget/legacy/audio.c static int audio_do_config(struct usb_configuration *c) c 193 drivers/usb/gadget/legacy/audio.c if (gadget_is_otg(c->cdev->gadget)) { c 194 drivers/usb/gadget/legacy/audio.c c->descriptors = otg_desc; c 195 drivers/usb/gadget/legacy/audio.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 205 drivers/usb/gadget/legacy/audio.c status = usb_add_function(c, f_uac1); c 217 drivers/usb/gadget/legacy/audio.c status = usb_add_function(c, f_uac2); c 89 drivers/usb/gadget/legacy/cdc2.c static int cdc_do_config(struct usb_configuration *c) c 93 drivers/usb/gadget/legacy/cdc2.c if (gadget_is_otg(c->cdev->gadget)) { c 94 drivers/usb/gadget/legacy/cdc2.c c->descriptors = otg_desc; c 95 drivers/usb/gadget/legacy/cdc2.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 104 drivers/usb/gadget/legacy/cdc2.c status = usb_add_function(c, f_ecm); c 114 drivers/usb/gadget/legacy/cdc2.c status = usb_add_function(c, f_acm); c 122 drivers/usb/gadget/legacy/cdc2.c usb_remove_function(c, f_ecm); c 68 drivers/usb/gadget/legacy/dbgp.c char c; c 73 drivers/usb/gadget/legacy/dbgp.c c = buf[len-1]; c 74 drivers/usb/gadget/legacy/dbgp.c if (c != 0) c 77 drivers/usb/gadget/legacy/dbgp.c printk(KERN_NOTICE "%s%c", buf, c); c 208 drivers/usb/gadget/legacy/ether.c static int rndis_do_config(struct usb_configuration *c) c 214 drivers/usb/gadget/legacy/ether.c if (gadget_is_otg(c->cdev->gadget)) { c 215 drivers/usb/gadget/legacy/ether.c c->descriptors = otg_desc; c 216 drivers/usb/gadget/legacy/ether.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 223 drivers/usb/gadget/legacy/ether.c status = usb_add_function(c, f_rndis); c 250 drivers/usb/gadget/legacy/ether.c static int eth_do_config(struct usb_configuration *c) c 256 drivers/usb/gadget/legacy/ether.c if (gadget_is_otg(c->cdev->gadget)) { c 257 drivers/usb/gadget/legacy/ether.c c->descriptors = otg_desc; c 258 drivers/usb/gadget/legacy/ether.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 266 drivers/usb/gadget/legacy/ether.c status = usb_add_function(c, f_eem); c 271 drivers/usb/gadget/legacy/ether.c } else if (can_support_ecm(c->cdev->gadget)) { c 276 drivers/usb/gadget/legacy/ether.c status = usb_add_function(c, f_ecm); c 286 drivers/usb/gadget/legacy/ether.c status = usb_add_function(c, f_geth); c 34 drivers/usb/gadget/legacy/g_ffs.c static int eth_bind_config(struct usb_configuration *c); c 41 drivers/usb/gadget/legacy/g_ffs.c static int bind_rndis_config(struct usb_configuration *c); c 115 drivers/usb/gadget/legacy/g_ffs.c struct usb_configuration c; c 116 drivers/usb/gadget/legacy/g_ffs.c int (*eth)(struct usb_configuration *c); c 145 drivers/usb/gadget/legacy/g_ffs.c static int gfs_do_config(struct usb_configuration *c); c 406 drivers/usb/gadget/legacy/g_ffs.c struct gfs_configuration *c = gfs_configurations + i; c 409 drivers/usb/gadget/legacy/g_ffs.c c->c.label = gfs_strings[sid].s; c 410 drivers/usb/gadget/legacy/g_ffs.c c->c.iConfiguration = gfs_strings[sid].id; c 411 drivers/usb/gadget/legacy/g_ffs.c c->c.bConfigurationValue = 1 + i; c 412 drivers/usb/gadget/legacy/g_ffs.c c->c.bmAttributes = USB_CONFIG_ATT_SELFPOWER; c 414 drivers/usb/gadget/legacy/g_ffs.c c->num = i; c 416 drivers/usb/gadget/legacy/g_ffs.c ret = usb_add_config(cdev, &c->c, gfs_do_config); c 478 drivers/usb/gadget/legacy/g_ffs.c static int gfs_do_config(struct usb_configuration *c) c 481 drivers/usb/gadget/legacy/g_ffs.c container_of(c, struct gfs_configuration, c); c 488 drivers/usb/gadget/legacy/g_ffs.c if (gadget_is_otg(c->cdev->gadget)) { c 489 drivers/usb/gadget/legacy/g_ffs.c c->descriptors = gfs_otg_desc; c 490 drivers/usb/gadget/legacy/g_ffs.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 494 drivers/usb/gadget/legacy/g_ffs.c ret = gc->eth(c); c 505 drivers/usb/gadget/legacy/g_ffs.c ret = usb_add_function(c, f_ffs[gc->num][i]); c 522 drivers/usb/gadget/legacy/g_ffs.c if (c->next_interface_id < ARRAY_SIZE(c->interface)) c 523 drivers/usb/gadget/legacy/g_ffs.c c->interface[c->next_interface_id] = NULL; c 529 drivers/usb/gadget/legacy/g_ffs.c usb_remove_function(c, f_ffs[gc->num][i]); c 537 drivers/usb/gadget/legacy/g_ffs.c static int eth_bind_config(struct usb_configuration *c) c 541 drivers/usb/gadget/legacy/g_ffs.c if (can_support_ecm(c->cdev->gadget)) { c 546 drivers/usb/gadget/legacy/g_ffs.c status = usb_add_function(c, f_ecm); c 555 drivers/usb/gadget/legacy/g_ffs.c status = usb_add_function(c, f_geth); c 566 drivers/usb/gadget/legacy/g_ffs.c static int bind_rndis_config(struct usb_configuration *c) c 574 drivers/usb/gadget/legacy/g_ffs.c status = usb_add_function(c, f_rndis); c 124 drivers/usb/gadget/legacy/gmidi.c static int midi_bind_config(struct usb_configuration *c) c 132 drivers/usb/gadget/legacy/gmidi.c status = usb_add_function(c, f_midi); c 90 drivers/usb/gadget/legacy/hid.c static int do_config(struct usb_configuration *c) c 95 drivers/usb/gadget/legacy/hid.c if (gadget_is_otg(c->cdev->gadget)) { c 96 drivers/usb/gadget/legacy/hid.c c->descriptors = otg_desc; c 97 drivers/usb/gadget/legacy/hid.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 104 drivers/usb/gadget/legacy/hid.c status = usb_add_function(c, e->f); c 116 drivers/usb/gadget/legacy/hid.c usb_remove_function(c, n->f); c 106 drivers/usb/gadget/legacy/mass_storage.c static int msg_do_config(struct usb_configuration *c) c 111 drivers/usb/gadget/legacy/mass_storage.c if (gadget_is_otg(c->cdev->gadget)) { c 112 drivers/usb/gadget/legacy/mass_storage.c c->descriptors = otg_desc; c 113 drivers/usb/gadget/legacy/mass_storage.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 122 drivers/usb/gadget/legacy/mass_storage.c ret = usb_add_function(c, f_msg); c 134 drivers/usb/gadget/legacy/multi.c static int rndis_do_config(struct usb_configuration *c) c 138 drivers/usb/gadget/legacy/multi.c if (gadget_is_otg(c->cdev->gadget)) { c 139 drivers/usb/gadget/legacy/multi.c c->descriptors = otg_desc; c 140 drivers/usb/gadget/legacy/multi.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 147 drivers/usb/gadget/legacy/multi.c ret = usb_add_function(c, f_rndis); c 157 drivers/usb/gadget/legacy/multi.c ret = usb_add_function(c, f_acm_rndis); c 167 drivers/usb/gadget/legacy/multi.c ret = usb_add_function(c, f_msg_rndis); c 175 drivers/usb/gadget/legacy/multi.c usb_remove_function(c, f_acm_rndis); c 179 drivers/usb/gadget/legacy/multi.c usb_remove_function(c, f_rndis); c 216 drivers/usb/gadget/legacy/multi.c static int cdc_do_config(struct usb_configuration *c) c 220 drivers/usb/gadget/legacy/multi.c if (gadget_is_otg(c->cdev->gadget)) { c 221 drivers/usb/gadget/legacy/multi.c c->descriptors = otg_desc; c 222 drivers/usb/gadget/legacy/multi.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 229 drivers/usb/gadget/legacy/multi.c ret = usb_add_function(c, f_ecm); c 240 drivers/usb/gadget/legacy/multi.c ret = usb_add_function(c, f_acm_multi); c 250 drivers/usb/gadget/legacy/multi.c ret = usb_add_function(c, f_msg_multi); c 258 drivers/usb/gadget/legacy/multi.c usb_remove_function(c, f_acm_multi); c 262 drivers/usb/gadget/legacy/multi.c usb_remove_function(c, f_ecm); c 93 drivers/usb/gadget/legacy/ncm.c static int ncm_do_config(struct usb_configuration *c) c 99 drivers/usb/gadget/legacy/ncm.c if (gadget_is_otg(c->cdev->gadget)) { c 100 drivers/usb/gadget/legacy/ncm.c c->descriptors = otg_desc; c 101 drivers/usb/gadget/legacy/ncm.c c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; c 108 drivers/usb/gadget/legacy/ncm.c status = usb_add_function(c, f_ncm); c 144 drivers/usb/gadget/legacy/nokia.c static int nokia_bind_config(struct usb_configuration *c) c 194 drivers/usb/gadget/legacy/nokia.c phonet_stat = usb_add_function(c, f_phonet); c 200 drivers/usb/gadget/legacy/nokia.c obex1_stat = usb_add_function(c, f_obex1); c 206 drivers/usb/gadget/legacy/nokia.c obex2_stat = usb_add_function(c, f_obex2); c 211 drivers/usb/gadget/legacy/nokia.c status = usb_add_function(c, f_acm); c 215 drivers/usb/gadget/legacy/nokia.c status = usb_add_function(c, f_ecm); c 221 drivers/usb/gadget/legacy/nokia.c status = usb_add_function(c, f_msg); c 225 drivers/usb/gadget/legacy/nokia.c if (c == &nokia_config_500ma_driver) { c 243 drivers/usb/gadget/legacy/nokia.c usb_remove_function(c, f_ecm); c 245 drivers/usb/gadget/legacy/nokia.c usb_remove_function(c, f_acm); c 248 drivers/usb/gadget/legacy/nokia.c usb_remove_function(c, f_obex2); c 250 drivers/usb/gadget/legacy/nokia.c usb_remove_function(c, f_obex1); c 252 drivers/usb/gadget/legacy/nokia.c usb_remove_function(c, f_phonet); c 114 drivers/usb/gadget/legacy/printer.c static int printer_do_config(struct usb_configuration *c) c 116 drivers/usb/gadget/legacy/printer.c struct usb_gadget *gadget = c->cdev->gadget; c 132 drivers/usb/gadget/legacy/printer.c status = usb_add_function(c, f_printer); c 113 drivers/usb/gadget/legacy/serial.c struct usb_configuration *c, const char *f_name) c 118 drivers/usb/gadget/legacy/serial.c ret = usb_add_config_only(cdev, c); c 136 drivers/usb/gadget/legacy/serial.c ret = usb_add_function(c, f_serial[i]); c 151 drivers/usb/gadget/legacy/serial.c usb_remove_function(c, f_serial[i]); c 72 drivers/usb/gadget/legacy/tcm_usb_gadget.c static int tcm_do_config(struct usb_configuration *c) c 80 drivers/usb/gadget/legacy/tcm_usb_gadget.c status = usb_add_function(c, f_tcm); c 331 drivers/usb/gadget/legacy/webcam.c webcam_config_bind(struct usb_configuration *c) c 339 drivers/usb/gadget/legacy/webcam.c status = usb_add_function(c, f_uvc); c 212 drivers/usb/gadget/legacy/zero.c static int ss_config_setup(struct usb_configuration *c, c 312 drivers/usb/gadget/udc/lpc32xx_udc.c #define USBD_CMD_CODE(c) ((c) << 16) c 281 drivers/usb/gadget/udc/pxa27x_udc.h #define PXA_EP_IN_BULK(i, adr, c, f, a) PXA_EP_BULK(i, adr, 1, c, f, a) c 282 drivers/usb/gadget/udc/pxa27x_udc.h #define PXA_EP_OUT_BULK(i, adr, c, f, a) PXA_EP_BULK(i, adr, 0, c, f, a) c 283 drivers/usb/gadget/udc/pxa27x_udc.h #define PXA_EP_IN_ISO(i, adr, c, f, a) PXA_EP_ISO(i, adr, 1, c, f, a) c 284 drivers/usb/gadget/udc/pxa27x_udc.h #define PXA_EP_OUT_ISO(i, adr, c, f, a) PXA_EP_ISO(i, adr, 0, c, f, a) c 285 drivers/usb/gadget/udc/pxa27x_udc.h #define PXA_EP_IN_INT(i, adr, c, f, a) PXA_EP_INT(i, adr, 1, c, f, a) c 238 drivers/usb/host/fotg210.h #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ c 126 drivers/usb/host/oxu210hp-hcd.c #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ c 799 drivers/usb/image/mdc800.c unsigned char c; c 806 drivers/usb/image/mdc800.c if(get_user(c, buf+i)) c 813 drivers/usb/image/mdc800.c if (c == 0x55) c 824 drivers/usb/image/mdc800.c mdc800->in[mdc800->in_count] = c; c 801 drivers/usb/misc/ftdi-elan.c u8 *c = buf; c 806 drivers/usb/misc/ftdi-elan.c d += sprintf(d, " %02X", *c++); c 927 drivers/usb/misc/ftdi-elan.c u8 *c = ftdi->bulk_in_buffer; c 932 drivers/usb/misc/ftdi-elan.c d += sprintf(d, " %02X", *c++); c 973 drivers/usb/misc/ftdi-elan.c u8 c = ftdi->bulk_in_buffer[++ftdi->bulk_in_last]; c 976 drivers/usb/misc/ftdi-elan.c if (ftdi->received == 0 && c == 0xFF) { c 979 drivers/usb/misc/ftdi-elan.c *b++ = c; c 991 drivers/usb/misc/ftdi-elan.c u8 *c = 4 + ftdi->response; c 996 drivers/usb/misc/ftdi-elan.c d += sprintf(d, " %02X", *c++); c 1823 drivers/usb/misc/ftdi-elan.c char c = *b++; c 1826 drivers/usb/misc/ftdi-elan.c 0x000000FF & c); c 1994 drivers/usb/misc/ftdi-elan.c unsigned char c = 0; c 1997 drivers/usb/misc/ftdi-elan.c c = *b++; c 1999 drivers/usb/misc/ftdi-elan.c d += sprintf(d, " %02X", c); c 2006 drivers/usb/misc/ftdi-elan.c if (c == 0x7E) { c 2009 drivers/usb/misc/ftdi-elan.c if (c == 0x55) { c 2091 drivers/usb/misc/ftdi-elan.c char c = *b++; c 2094 drivers/usb/misc/ftdi-elan.c 0x000000FF & c); c 80 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_memsetw(u16 *s, u16 c, unsigned int count) c 82 drivers/usb/misc/sisusbvga/sisusb_con.c memset16(s, c, count / 2); c 99 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_start_address(struct sisusb_usb_data *sisusb, struct vc_data *c) c 101 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb->cur_start_addr = (c->vc_visible_origin - sisusb->scrbuf) / 2; c 180 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_is_inactive(struct vc_data *c, struct sisusb_usb_data *sisusb) c 184 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_mode != KD_TEXT) c 199 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_init(struct vc_data *c, int init) c 210 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb(c->vc_num); c 221 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_can_do_color = 1; c 223 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_complement_mask = 0x7700; c 225 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_hi_font_mask = sisusb->current_font_512 ? 0x0800 : 0; c 229 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb->havethisconsole[c->vc_num] = 1; c 232 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_scan_lines = 400; c 234 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_font.height = sisusb->current_font_height; c 238 drivers/usb/misc/sisusbvga/sisusb_con.c rows = c->vc_scan_lines / c->vc_font.height; c 251 drivers/usb/misc/sisusbvga/sisusb_con.c if (!*c->vc_uni_pagedir_loc) c 252 drivers/usb/misc/sisusbvga/sisusb_con.c con_set_default_unimap(c); c 257 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_cols = cols; c 258 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_rows = rows; c 260 drivers/usb/misc/sisusbvga/sisusb_con.c vc_resize(c, cols, rows); c 265 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_deinit(struct vc_data *c) c 274 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb(c->vc_num); c 281 drivers/usb/misc/sisusbvga/sisusb_con.c mysisusbs[c->vc_num] = NULL; c 283 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb->havethisconsole[c->vc_num] = 0; c 288 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb->havethisconsole[c->vc_num]) c 305 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_build_attr(struct vc_data *c, u8 color, u8 intensity, c 311 drivers/usb/misc/sisusbvga/sisusb_con.c attr = (attr & 0xf0) | c->vc_ulcolor; c 313 drivers/usb/misc/sisusbvga/sisusb_con.c attr = (attr & 0xf0) | c->vc_halfcolor; c 349 drivers/usb/misc/sisusbvga/sisusb_con.c const struct vc_data *c, unsigned int x, unsigned int y) c 351 drivers/usb/misc/sisusbvga/sisusb_con.c return (u16 *)c->vc_origin + y * sisusb->sisusb_num_columns + x; c 355 drivers/usb/misc/sisusbvga/sisusb_con.c const struct vc_data *c, unsigned int x, unsigned int y) c 357 drivers/usb/misc/sisusbvga/sisusb_con.c unsigned long offset = c->vc_origin - sisusb->scrbuf; c 367 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_putc(struct vc_data *c, int ch, int y, int x) c 371 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 376 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 381 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y), c 382 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, x, y), 2); c 389 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_putcs(struct vc_data *c, const unsigned short *s, c 394 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 404 drivers/usb/misc/sisusbvga/sisusb_con.c memcpy(sisusb_vaddr(sisusb, c, x, y), s, count * 2); c 406 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 411 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y), c 412 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, x, y), count * 2); c 419 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width) c 422 drivers/usb/misc/sisusbvga/sisusb_con.c u16 eattr = c->vc_video_erase_char; c 429 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 439 drivers/usb/misc/sisusbvga/sisusb_con.c dest = sisusb_vaddr(sisusb, c, x, y); c 446 drivers/usb/misc/sisusbvga/sisusb_con.c if (x == 0 && width >= c->vc_cols) { c 457 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 465 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, x, y), c 466 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, x, y), length); c 473 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_switch(struct vc_data *c) c 483 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 490 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 499 drivers/usb/misc/sisusbvga/sisusb_con.c if (c->vc_origin == (unsigned long)c->vc_screenbuf) { c 506 drivers/usb/misc/sisusbvga/sisusb_con.c length = min((int)c->vc_screenbuf_size, c 507 drivers/usb/misc/sisusbvga/sisusb_con.c (int)(sisusb->scrbuf + sisusb->scrbuf_size - c->vc_origin)); c 510 drivers/usb/misc/sisusbvga/sisusb_con.c memcpy((u16 *)c->vc_origin, (u16 *)c->vc_screenbuf, length); c 512 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_copy_memory(sisusb, (char *)c->vc_origin, c 513 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, 0, 0), length); c 522 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_save_screen(struct vc_data *c) c 531 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 537 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 543 drivers/usb/misc/sisusbvga/sisusb_con.c length = min((int)c->vc_screenbuf_size, c 544 drivers/usb/misc/sisusbvga/sisusb_con.c (int)(sisusb->scrbuf + sisusb->scrbuf_size - c->vc_origin)); c 547 drivers/usb/misc/sisusbvga/sisusb_con.c memcpy((u16 *)c->vc_screenbuf, (u16 *)c->vc_origin, length); c 554 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_palette(struct vc_data *c, const unsigned char *table) c 561 drivers/usb/misc/sisusbvga/sisusb_con.c if (!con_is_visible(c)) c 564 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 570 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 578 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_setreg(sisusb, SISCOLDATA, c->vc_palette[j++] >> 2)) c 580 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_setreg(sisusb, SISCOLDATA, c->vc_palette[j++] >> 2)) c 582 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_setreg(sisusb, SISCOLDATA, c->vc_palette[j++] >> 2)) c 591 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_blank(struct vc_data *c, int blank, int mode_switch) c 597 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 606 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 615 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_memsetw((u16 *)c->vc_origin, c 616 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_video_erase_char, c 617 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size); c 618 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_copy_memory(sisusb, (char *)c->vc_origin, c 619 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, 0, 0), c 620 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size); c 672 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_scrolldelta(struct vc_data *c, int lines) c 676 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 682 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 687 drivers/usb/misc/sisusbvga/sisusb_con.c vc_scrolldelta_helper(c, lines, sisusb->con_rolled_over, c 690 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_start_address(sisusb, c); c 697 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_cursor(struct vc_data *c, int mode) c 702 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 708 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 713 drivers/usb/misc/sisusbvga/sisusb_con.c if (c->vc_origin != c->vc_visible_origin) { c 714 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_visible_origin = c->vc_origin; c 715 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_start_address(sisusb, c); c 725 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_set_cursor(sisusb, (c->vc_pos - sisusb->scrbuf) / 2); c 727 drivers/usb/misc/sisusbvga/sisusb_con.c baseline = c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2); c 729 drivers/usb/misc/sisusbvga/sisusb_con.c switch (c->vc_cursor_type & 0x0f) { c 731 drivers/usb/misc/sisusbvga/sisusb_con.c to = c->vc_font.height; c 733 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_TWO_THIRDS: from = c->vc_font.height / 3; c 736 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_LOWER_HALF: from = c->vc_font.height / 2; c 739 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_LOWER_THIRD: from = (c->vc_font.height * 2) / 3; c 765 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb, c 771 drivers/usb/misc/sisusbvga/sisusb_con.c u16 eattr = c->vc_video_erase_char; c 784 drivers/usb/misc/sisusbvga/sisusb_con.c memmove(sisusb_vaddr(sisusb, c, 0, t), c 785 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_vaddr(sisusb, c, 0, t + lines), c 787 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_memsetw(sisusb_vaddr(sisusb, c, 0, b - lines), c 792 drivers/usb/misc/sisusbvga/sisusb_con.c memmove(sisusb_vaddr(sisusb, c, 0, t + lines), c 793 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_vaddr(sisusb, c, 0, t), c 795 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_memsetw(sisusb_vaddr(sisusb, c, 0, t), eattr, c 800 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_copy_memory(sisusb, sisusb_vaddr(sisusb, c, 0, t), c 801 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, 0, t), length); c 810 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c 814 drivers/usb/misc/sisusbvga/sisusb_con.c u16 eattr = c->vc_video_erase_char; c 817 drivers/usb/misc/sisusbvga/sisusb_con.c unsigned int delta = lines * c->vc_size_row; c 829 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 835 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb)) { c 841 drivers/usb/misc/sisusbvga/sisusb_con.c if (t || b != c->vc_rows) c 842 drivers/usb/misc/sisusbvga/sisusb_con.c return sisusbcon_scroll_area(c, sisusb, t, b, dir, lines); c 844 drivers/usb/misc/sisusbvga/sisusb_con.c if (c->vc_origin != c->vc_visible_origin) { c 845 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_visible_origin = c->vc_origin; c 846 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_start_address(sisusb, c); c 850 drivers/usb/misc/sisusbvga/sisusb_con.c if (lines > c->vc_rows) c 851 drivers/usb/misc/sisusbvga/sisusb_con.c lines = c->vc_rows; c 853 drivers/usb/misc/sisusbvga/sisusb_con.c oldorigin = c->vc_origin; c 859 drivers/usb/misc/sisusbvga/sisusb_con.c if (c->vc_scr_end + delta >= c 863 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size - delta); c 864 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_origin = sisusb->scrbuf; c 868 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_origin += delta; c 871 drivers/usb/misc/sisusbvga/sisusb_con.c (u16 *)(c->vc_origin + c->vc_screenbuf_size - delta), c 880 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size + delta, c 882 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size - delta); c 883 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_origin = sisusb->scrbuf + c 885 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size; c 889 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_origin -= delta; c 891 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; c 893 drivers/usb/misc/sisusbvga/sisusb_con.c scr_memsetw((u16 *)(c->vc_origin), eattr, delta); c 900 drivers/usb/misc/sisusbvga/sisusb_con.c (char *)c->vc_origin, c 901 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, 0, 0), c 902 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size); c 905 drivers/usb/misc/sisusbvga/sisusb_con.c (char *)c->vc_origin + c->vc_screenbuf_size - delta, c 906 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, 0, 0) + c 907 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_screenbuf_size - delta, c 911 drivers/usb/misc/sisusbvga/sisusb_con.c (char *)c->vc_origin, c 912 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_haddr(sisusb, c, 0, 0), c 915 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; c 916 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_visible_origin = c->vc_origin; c 918 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_start_address(sisusb, c); c 920 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_pos = c->vc_pos - oldorigin + c->vc_origin; c 929 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_origin(struct vc_data *c) c 938 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 944 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb_is_inactive(c, sisusb) || sisusb->con_blanked) { c 949 drivers/usb/misc/sisusbvga/sisusb_con.c c->vc_origin = c->vc_visible_origin = sisusb->scrbuf; c 951 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_set_start_address(sisusb, c); c 962 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_resize(struct vc_data *c, unsigned int newcols, unsigned int newrows, c 968 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 983 drivers/usb/misc/sisusbvga/sisusb_con.c if (newcols != 80 || c->vc_scan_lines / fh != newrows) c 992 drivers/usb/misc/sisusbvga/sisusb_con.c struct vc_data *c, int fh, int uplock) c 1137 drivers/usb/misc/sisusbvga/sisusb_con.c if (c) { c 1140 drivers/usb/misc/sisusbvga/sisusb_con.c rows = c->vc_scan_lines / fh; c 1170 drivers/usb/misc/sisusbvga/sisusb_con.c if (dorecalc && c) { c 1171 drivers/usb/misc/sisusbvga/sisusb_con.c int rows = c->vc_scan_lines / fh; c 1199 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_font_set(struct vc_data *c, struct console_font *font, c 1208 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 1240 drivers/usb/misc/sisusbvga/sisusb_con.c c, font->height, 1); c 1245 drivers/usb/misc/sisusbvga/sisusb_con.c sisusbcon_font_get(struct vc_data *c, struct console_font *font) c 1249 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num); c 1256 drivers/usb/misc/sisusbvga/sisusb_con.c font->height = c->vc_font.height; c 1324 drivers/usb/misc/sisusbvga/sisusb_con.c static void sisusbdummycon_putc(struct vc_data *vc, int c, int ypos, c 836 drivers/usb/misc/sisusbvga/sisusb_init.h struct vc_data *c, int fh, int uplock); c 434 drivers/usb/misc/yurex.c unsigned long long c, c2 = 0; c 478 drivers/usb/misc/yurex.c c = c2 = simple_strtoull(data, NULL, 0); c 481 drivers/usb/misc/yurex.c dev->cntl_buffer[i] = (c>>32) & 0xff; c 482 drivers/usb/misc/yurex.c c <<= 8; c 61 drivers/usb/musb/cppi_dma.c static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) c 63 drivers/usb/musb/cppi_dma.c struct cppi_descriptor *bd = c->freelist; c 66 drivers/usb/musb/cppi_dma.c c->freelist = bd->next; c 71 drivers/usb/musb/cppi_dma.c cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) c 75 drivers/usb/musb/cppi_dma.c bd->next = c->freelist; c 76 drivers/usb/musb/cppi_dma.c c->freelist = bd; c 110 drivers/usb/musb/cppi_dma.c static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) c 115 drivers/usb/musb/cppi_dma.c c->head = NULL; c 116 drivers/usb/musb/cppi_dma.c c->tail = NULL; c 117 drivers/usb/musb/cppi_dma.c c->last_processed = NULL; c 118 drivers/usb/musb/cppi_dma.c c->channel.status = MUSB_DMA_STATUS_UNKNOWN; c 119 drivers/usb/musb/cppi_dma.c c->controller = cppi; c 120 drivers/usb/musb/cppi_dma.c c->is_rndis = 0; c 121 drivers/usb/musb/cppi_dma.c c->freelist = NULL; c 130 drivers/usb/musb/cppi_dma.c cppi_bd_free(c, bd); c 136 drivers/usb/musb/cppi_dma.c static void cppi_pool_free(struct cppi_channel *c) c 138 drivers/usb/musb/cppi_dma.c struct cppi *cppi = c->controller; c 141 drivers/usb/musb/cppi_dma.c (void) cppi_channel_abort(&c->channel); c 142 drivers/usb/musb/cppi_dma.c c->channel.status = MUSB_DMA_STATUS_UNKNOWN; c 143 drivers/usb/musb/cppi_dma.c c->controller = NULL; c 146 drivers/usb/musb/cppi_dma.c bd = c->last_processed; c 150 drivers/usb/musb/cppi_dma.c bd = cppi_bd_alloc(c); c 152 drivers/usb/musb/cppi_dma.c c->last_processed = NULL; c 281 drivers/usb/musb/cppi_dma.c cppi_channel_allocate(struct dma_controller *c, c 290 drivers/usb/musb/cppi_dma.c controller = container_of(c, struct cppi, controller); c 292 drivers/usb/musb/cppi_dma.c musb = c->musb; c 332 drivers/usb/musb/cppi_dma.c struct cppi_channel *c; c 337 drivers/usb/musb/cppi_dma.c c = container_of(channel, struct cppi_channel, channel); c 338 drivers/usb/musb/cppi_dma.c tibase = c->controller->tibase; c 339 drivers/usb/musb/cppi_dma.c if (!c->hw_ep) c 340 drivers/usb/musb/cppi_dma.c musb_dbg(c->controller->controller.musb, c 341 drivers/usb/musb/cppi_dma.c "releasing idle DMA channel %p", c); c 342 drivers/usb/musb/cppi_dma.c else if (!c->transmit) c 343 drivers/usb/musb/cppi_dma.c core_rxirq_enable(tibase, c->index + 1); c 346 drivers/usb/musb/cppi_dma.c c->hw_ep = NULL; c 352 drivers/usb/musb/cppi_dma.c cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) c 354 drivers/usb/musb/cppi_dma.c void __iomem *base = c->controller->mregs; c 355 drivers/usb/musb/cppi_dma.c struct cppi_rx_stateram __iomem *rx = c->state_ram; c 357 drivers/usb/musb/cppi_dma.c musb_ep_select(base, c->index + 1); c 359 drivers/usb/musb/cppi_dma.c musb_dbg(c->controller->controller.musb, c 363 drivers/usb/musb/cppi_dma.c c->index, tag, c 364 drivers/usb/musb/cppi_dma.c musb_readl(c->controller->tibase, c 365 drivers/usb/musb/cppi_dma.c DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), c 366 drivers/usb/musb/cppi_dma.c musb_readw(c->hw_ep->regs, MUSB_RXCSR), c 382 drivers/usb/musb/cppi_dma.c cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) c 384 drivers/usb/musb/cppi_dma.c void __iomem *base = c->controller->mregs; c 385 drivers/usb/musb/cppi_dma.c struct cppi_tx_stateram __iomem *tx = c->state_ram; c 387 drivers/usb/musb/cppi_dma.c musb_ep_select(base, c->index + 1); c 389 drivers/usb/musb/cppi_dma.c musb_dbg(c->controller->controller.musb, c 393 drivers/usb/musb/cppi_dma.c c->index, tag, c 394 drivers/usb/musb/cppi_dma.c musb_readw(c->hw_ep->regs, MUSB_TXCSR), c 410 drivers/usb/musb/cppi_dma.c cppi_rndis_update(struct cppi_channel *c, int is_rx, c 414 drivers/usb/musb/cppi_dma.c if (c->is_rndis != is_rndis) { c 416 drivers/usb/musb/cppi_dma.c u32 temp = 1 << (c->index); c 425 drivers/usb/musb/cppi_dma.c c->is_rndis = is_rndis; c 1351 drivers/usb/musb/cppi_dma.c void cppi_dma_controller_destroy(struct dma_controller *c) c 1355 drivers/usb/musb/cppi_dma.c cppi = container_of(c, struct cppi, controller); c 445 drivers/usb/musb/da8xx.c static void da8xx_dma_controller_callback(struct dma_controller *c) c 447 drivers/usb/musb/da8xx.c struct musb *musb = c->musb; c 400 drivers/usb/musb/musb_core.c void (*musb_dma_controller_destroy)(struct dma_controller *c); c 172 drivers/usb/musb/musb_core.h void (*dma_exit)(struct dma_controller *c); c 482 drivers/usb/musb/musb_cppi41.c static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, c 485 drivers/usb/musb/musb_cppi41.c struct cppi41_dma_controller *controller = container_of(c, c 736 drivers/usb/musb/musb_cppi41.c void cppi41_dma_controller_destroy(struct dma_controller *c) c 738 drivers/usb/musb/musb_cppi41.c struct cppi41_dma_controller *controller = container_of(c, c 133 drivers/usb/musb/musb_dma.h dma_channel_status(struct dma_channel *c) c 135 drivers/usb/musb/musb_dma.h return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN; c 193 drivers/usb/musb/musb_dma.h extern void musbhs_dma_controller_destroy(struct dma_controller *c); c 197 drivers/usb/musb/musb_dma.h extern void tusb_dma_controller_destroy(struct dma_controller *c); c 201 drivers/usb/musb/musb_dma.h extern void cppi_dma_controller_destroy(struct dma_controller *c); c 205 drivers/usb/musb/musb_dma.h extern void cppi41_dma_controller_destroy(struct dma_controller *c); c 209 drivers/usb/musb/musb_dma.h extern void ux500_dma_controller_destroy(struct dma_controller *c); c 641 drivers/usb/musb/musb_dsps.c static void dsps_dma_controller_callback(struct dma_controller *c) c 643 drivers/usb/musb/musb_dsps.c struct musb *musb = c->musb; c 167 drivers/usb/musb/musb_gadget.c struct dma_controller *c = ep->musb->dma_controller; c 187 drivers/usb/musb/musb_gadget.c value = c->channel_abort(ep->dma); c 189 drivers/usb/musb/musb_gadget.c c->channel_release(ep->dma); c 272 drivers/usb/musb/musb_gadget.c struct dma_controller *c = musb->dma_controller; c 289 drivers/usb/musb/musb_gadget.c use_dma = use_dma && c->channel_program( c 357 drivers/usb/musb/musb_gadget.c use_dma = use_dma && c->channel_program( c 363 drivers/usb/musb/musb_gadget.c c->channel_release(musb_ep->dma); c 370 drivers/usb/musb/musb_gadget.c use_dma = use_dma && c->channel_program( c 557 drivers/usb/musb/musb_gadget.c struct dma_controller *c = musb->dma_controller; c 565 drivers/usb/musb/musb_gadget.c if (c->channel_program(channel, c 602 drivers/usb/musb/musb_gadget.c struct dma_controller *c; c 607 drivers/usb/musb/musb_gadget.c c = musb->dma_controller; c 664 drivers/usb/musb/musb_gadget.c use_dma = c->channel_program( c 679 drivers/usb/musb/musb_gadget.c struct dma_controller *c; c 683 drivers/usb/musb/musb_gadget.c c = musb->dma_controller; c 715 drivers/usb/musb/musb_gadget.c if (c->channel_program(channel, c 734 drivers/usb/musb/musb_gadget.c struct dma_controller *c = musb->dma_controller; c 739 drivers/usb/musb/musb_gadget.c ret = c->channel_program(channel, c 1052 drivers/usb/musb/musb_gadget.c struct dma_controller *c = musb->dma_controller; c 1054 drivers/usb/musb/musb_gadget.c musb_ep->dma = c->channel_alloc(c, hw_ep, c 1297 drivers/usb/musb/musb_gadget.c struct dma_controller *c = musb->dma_controller; c 1300 drivers/usb/musb/musb_gadget.c if (c->channel_abort) c 1301 drivers/usb/musb/musb_gadget.c status = c->channel_abort(musb_ep->dma); c 1759 drivers/usb/musb/musb_host.c struct dma_controller *c = musb->dma_controller; c 1907 drivers/usb/musb/musb_host.c done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len); c 1944 drivers/usb/musb/musb_host.c if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb, c 103 drivers/usb/musb/musbhsdma.c static struct dma_channel *dma_channel_allocate(struct dma_controller *c, c 106 drivers/usb/musb/musbhsdma.c struct musb_dma_controller *controller = container_of(c, c 387 drivers/usb/musb/musbhsdma.c void musbhs_dma_controller_destroy(struct dma_controller *c) c 389 drivers/usb/musb/musbhsdma.c struct musb_dma_controller *controller = container_of(c, c 395 drivers/usb/musb/musbhsdma.c free_irq(controller->irq, c); c 320 drivers/usb/musb/sunxi.c static void sunxi_musb_dma_controller_destroy(struct dma_controller *c) c 20 drivers/usb/musb/tusb6010_omap.c #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) c 422 drivers/usb/musb/tusb6010_omap.c tusb_omap_dma_allocate(struct dma_controller *c, c 433 drivers/usb/musb/tusb6010_omap.c tusb_dma = container_of(c, struct tusb_omap_dma, controller); c 514 drivers/usb/musb/tusb6010_omap.c void tusb_dma_controller_destroy(struct dma_controller *c) c 519 drivers/usb/musb/tusb6010_omap.c tusb_dma = container_of(c, struct tusb_omap_dma, controller); c 128 drivers/usb/musb/ux500_dma.c static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, c 131 drivers/usb/musb/ux500_dma.c struct ux500_dma_controller *controller = container_of(c, c 347 drivers/usb/musb/ux500_dma.c void ux500_dma_controller_destroy(struct dma_controller *c) c 349 drivers/usb/musb/ux500_dma.c struct ux500_dma_controller *controller = container_of(c, c 108 drivers/usb/serial/belkin_sa.c #define BSA_USB_CMD(c, v) usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), \ c 109 drivers/usb/serial/belkin_sa.c (c), BELKIN_SA_SET_REQUEST_TYPE, \ c 2457 drivers/usb/serial/ftdi_sio.c int i, len, c; c 2463 drivers/usb/serial/ftdi_sio.c c = kfifo_out(&port->write_fifo, &buffer[i + 1], len); c 2464 drivers/usb/serial/ftdi_sio.c if (!c) c 2466 drivers/usb/serial/ftdi_sio.c port->icount.tx += c; c 2467 drivers/usb/serial/ftdi_sio.c buffer[i] = (c << 2) + 1; c 2468 drivers/usb/serial/ftdi_sio.c count += c + 1; c 672 drivers/usb/serial/garmin_gps.c __u8 c = *src++; c 673 drivers/usb/serial/garmin_gps.c *dst++ = c; c 674 drivers/usb/serial/garmin_gps.c cksum += c; c 675 drivers/usb/serial/garmin_gps.c if (c == DLE) c 639 drivers/usb/serial/generic.c int i, c = 0, r; c 650 drivers/usb/serial/generic.c c++; c 656 drivers/usb/serial/generic.c c++; c 660 drivers/usb/serial/generic.c return c ? -EIO : 0; c 176 drivers/usb/serial/io_ti.h #define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01) c 177 drivers/usb/serial/io_ti.h #define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) c 970 drivers/usb/serial/iuu_phoenix.c #define SOUP(a, b, c, d) do { \ c 973 drivers/usb/serial/iuu_phoenix.c b, a, c, d, NULL, 0, 1000); \ c 974 drivers/usb/serial/iuu_phoenix.c dev_dbg(dev, "0x%x:0x%x:0x%x:0x%x %d\n", a, b, c, d, result); } while (0) c 246 drivers/usb/serial/keyspan_usa26msg.h c, c 240 drivers/usb/serial/keyspan_usa67msg.h c, c 1261 drivers/usb/serial/mxuport.c int c = 0; c 1270 drivers/usb/serial/mxuport.c c++; c 1280 drivers/usb/serial/mxuport.c c++; c 1283 drivers/usb/serial/mxuport.c return c ? -EIO : 0; c 168 drivers/usb/serial/safe_serial.c #define CRC10_FCS(fcs, c) ((((fcs) << 8) & 0x3ff) ^ crc10_table[((fcs) >> 2) & 0xff] ^ (c)) c 44 drivers/usb/typec/tps6598x.c #define TPS_SYSCONF_PORTINFO(c) ((c) & 7) c 700 drivers/usb/typec/ucsi/ucsi.c struct ucsi_control c; c 707 drivers/usb/typec/ucsi/ucsi.c UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL); c 708 drivers/usb/typec/ucsi/ucsi.c ucsi_send_command(con->ucsi, &c, NULL, 0); c 1113 drivers/usb/usbip/vhci_hcd.c char *c; c 1117 drivers/usb/usbip/vhci_hcd.c c = strchr(name, '.'); c 1118 drivers/usb/usbip/vhci_hcd.c if (c == NULL) c 1121 drivers/usb/usbip/vhci_hcd.c ret = kstrtol(c+1, 10, &val); c 132 drivers/usb/usbip/vhci_sysfs.c char *c; c 136 drivers/usb/usbip/vhci_sysfs.c c = strchr(name, '.'); c 137 drivers/usb/usbip/vhci_sysfs.c if (c == NULL) c 140 drivers/usb/usbip/vhci_sysfs.c ret = kstrtol(c+1, 10, &val); c 921 drivers/vhost/scsi.c int ret, prot_bytes, c = 0; c 1121 drivers/vhost/scsi.c } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); c 1180 drivers/vhost/scsi.c int ret, c = 0; c 1273 drivers/vhost/scsi.c } while (likely(!vhost_exceeds_weight(vq, ++c, 0))); c 272 drivers/video/backlight/pm8941-wled.c u32 c; c 337 drivers/video/backlight/pm8941-wled.c c = UINT_MAX; c 338 drivers/video/backlight/pm8941-wled.c for (j = 0; c != val; j++) { c 339 drivers/video/backlight/pm8941-wled.c c = pm8941_wled_values(u32_opts[i].cfg, j); c 340 drivers/video/backlight/pm8941-wled.c if (c == UINT_MAX) { c 347 drivers/video/backlight/pm8941-wled.c dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c); c 52 drivers/video/console/dummycon.c static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) c 86 drivers/video/console/dummycon.c static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } c 355 drivers/video/console/mdacon.c static void mdacon_init(struct vc_data *c, int init) c 357 drivers/video/console/mdacon.c c->vc_complement_mask = 0x0800; /* reverse video */ c 358 drivers/video/console/mdacon.c c->vc_display_fg = &mda_display_fg; c 361 drivers/video/console/mdacon.c c->vc_cols = mda_num_columns; c 362 drivers/video/console/mdacon.c c->vc_rows = mda_num_lines; c 364 drivers/video/console/mdacon.c vc_resize(c, mda_num_columns, mda_num_lines); c 369 drivers/video/console/mdacon.c mda_display_fg = c; c 372 drivers/video/console/mdacon.c static void mdacon_deinit(struct vc_data *c) c 376 drivers/video/console/mdacon.c if (mda_display_fg == c) c 397 drivers/video/console/mdacon.c static u8 mdacon_build_attr(struct vc_data *c, u8 color, u8 intensity, c 415 drivers/video/console/mdacon.c static void mdacon_invert_region(struct vc_data *c, u16 *p, int count) c 428 drivers/video/console/mdacon.c static void mdacon_putc(struct vc_data *c, int ch, int y, int x) c 433 drivers/video/console/mdacon.c static void mdacon_putcs(struct vc_data *c, const unsigned short *s, c 443 drivers/video/console/mdacon.c static void mdacon_clear(struct vc_data *c, int y, int x, c 447 drivers/video/console/mdacon.c u16 eattr = mda_convert_attr(c->vc_video_erase_char); c 460 drivers/video/console/mdacon.c static int mdacon_switch(struct vc_data *c) c 465 drivers/video/console/mdacon.c static int mdacon_blank(struct vc_data *c, int blank, int mode_switch) c 470 drivers/video/console/mdacon.c mda_convert_attr(c->vc_video_erase_char), c 471 drivers/video/console/mdacon.c c->vc_screenbuf_size); c 484 drivers/video/console/mdacon.c static void mdacon_cursor(struct vc_data *c, int mode) c 491 drivers/video/console/mdacon.c mda_set_cursor(c->vc_y*mda_num_columns*2 + c->vc_x*2); c 493 drivers/video/console/mdacon.c switch (c->vc_cursor_type & 0x0f) { c 504 drivers/video/console/mdacon.c static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c 507 drivers/video/console/mdacon.c u16 eattr = mda_convert_attr(c->vc_video_erase_char); c 512 drivers/video/console/mdacon.c if (lines > c->vc_rows) /* maximum realistic size */ c 513 drivers/video/console/mdacon.c lines = c->vc_rows; c 56 drivers/video/console/newport_con.c #define BMASK(c) (c << 24) c 343 drivers/video/console/newport_con.c static void newport_deinit(struct vc_data *c) c 484 drivers/video/console/newport_con.c static int newport_blank(struct vc_data *c, int blank, int mode_switch) c 82 drivers/video/console/sticon.c static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos) c 98 drivers/video/console/sticon.c sti_putc(sticon_sti, c, ypos, xpos); c 181 drivers/video/console/sticon.c static void sticon_init(struct vc_data *c, int init) c 189 drivers/video/console/sticon.c c->vc_can_do_color = 1; c 192 drivers/video/console/sticon.c c->vc_cols = vc_cols; c 193 drivers/video/console/sticon.c c->vc_rows = vc_rows; c 197 drivers/video/console/sticon.c vc_resize(c, vc_cols, vc_rows); c 202 drivers/video/console/sticon.c static void sticon_deinit(struct vc_data *c) c 225 drivers/video/console/sticon.c static int sticon_blank(struct vc_data *c, int blank, int mode_switch) c 232 drivers/video/console/sticon.c sticon_set_origin(c); c 233 drivers/video/console/sticon.c sti_clear(sticon_sti, 0,0, c->vc_rows, c->vc_cols, BLANK); c 61 drivers/video/console/sticore.c #define c_fg(sti, c) col_trans[((c>> 8) & 7)] c 62 drivers/video/console/sticore.c #define c_bg(sti, c) col_trans[((c>>11) & 7)] c 63 drivers/video/console/sticore.c #define c_index(sti, c) ((c) & 0xff) c 136 drivers/video/console/sticore.c sti_putc(struct sti_struct *sti, int c, int y, int x) c 141 drivers/video/console/sticore.c .index = c_index(sti, c), c 142 drivers/video/console/sticore.c .fg_color = c_fg(sti, c), c 143 drivers/video/console/sticore.c .bg_color = c_bg(sti, c), c 196 drivers/video/console/sticore.c int height, int width, int c) c 200 drivers/video/console/sticore.c .fg_color = c_fg(sti, c), c 201 drivers/video/console/sticore.c .bg_color = c_bg(sti, c), c 69 drivers/video/console/vgacon.c static void vgacon_init(struct vc_data *c, int init); c 70 drivers/video/console/vgacon.c static void vgacon_deinit(struct vc_data *c); c 71 drivers/video/console/vgacon.c static void vgacon_cursor(struct vc_data *c, int mode); c 72 drivers/video/console/vgacon.c static int vgacon_switch(struct vc_data *c); c 73 drivers/video/console/vgacon.c static int vgacon_blank(struct vc_data *c, int blank, int mode_switch); c 74 drivers/video/console/vgacon.c static void vgacon_scrolldelta(struct vc_data *c, int lines); c 75 drivers/video/console/vgacon.c static int vgacon_set_origin(struct vc_data *c); c 76 drivers/video/console/vgacon.c static void vgacon_save_screen(struct vc_data *c); c 77 drivers/video/console/vgacon.c static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); c 163 drivers/video/console/vgacon.c static inline void vga_set_mem_top(struct vc_data *c) c 165 drivers/video/console/vgacon.c write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); c 243 drivers/video/console/vgacon.c static void vgacon_scrollback_update(struct vc_data *c, int t, int count) c 248 drivers/video/console/vgacon.c c->vc_num != fg_console) c 251 drivers/video/console/vgacon.c p = (void *) (c->vc_origin + t * c->vc_size_row); c 256 drivers/video/console/vgacon.c p, c->vc_size_row); c 259 drivers/video/console/vgacon.c p += c->vc_size_row; c 260 drivers/video/console/vgacon.c vgacon_scrollback_cur->tail += c->vc_size_row; c 272 drivers/video/console/vgacon.c static void vgacon_restore_screen(struct vc_data *c) c 274 drivers/video/console/vgacon.c c->vc_origin = c->vc_visible_origin; c 278 drivers/video/console/vgacon.c scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c 279 drivers/video/console/vgacon.c c->vc_screenbuf_size > vga_vram_size ? c 280 drivers/video/console/vgacon.c vga_vram_size : c->vc_screenbuf_size); c 286 drivers/video/console/vgacon.c static void vgacon_scrolldelta(struct vc_data *c, int lines) c 291 drivers/video/console/vgacon.c vgacon_restore_screen(c); c 299 drivers/video/console/vgacon.c vgacon_cursor(c, CM_ERASE); c 300 drivers/video/console/vgacon.c vgacon_save_screen(c); c 301 drivers/video/console/vgacon.c c->vc_origin = (unsigned long)c->vc_screenbuf; c 324 drivers/video/console/vgacon.c ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); c 325 drivers/video/console/vgacon.c soff -= count * c->vc_size_row; c 332 drivers/video/console/vgacon.c if (count > c->vc_rows) c 333 drivers/video/console/vgacon.c count = c->vc_rows; c 338 drivers/video/console/vgacon.c int diff = c->vc_rows - count; c 339 drivers/video/console/vgacon.c void *d = (void *) c->vc_visible_origin; c 340 drivers/video/console/vgacon.c void *s = (void *) c->vc_screenbuf; c 342 drivers/video/console/vgacon.c count *= c->vc_size_row; c 355 drivers/video/console/vgacon.c scr_memcpyw(d, s, diff * c->vc_size_row); c 357 drivers/video/console/vgacon.c vgacon_cursor(c, CM_MOVE); c 360 drivers/video/console/vgacon.c static void vgacon_flush_scrollback(struct vc_data *c) c 364 drivers/video/console/vgacon.c vgacon_scrollback_reset(c->vc_num, size); c 372 drivers/video/console/vgacon.c static void vgacon_restore_screen(struct vc_data *c) c 374 drivers/video/console/vgacon.c if (c->vc_origin != c->vc_visible_origin) c 375 drivers/video/console/vgacon.c vgacon_scrolldelta(c, 0); c 378 drivers/video/console/vgacon.c static void vgacon_scrolldelta(struct vc_data *c, int lines) c 380 drivers/video/console/vgacon.c vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base, c 382 drivers/video/console/vgacon.c vga_set_mem_top(c); c 385 drivers/video/console/vgacon.c static void vgacon_flush_scrollback(struct vc_data *c) c 580 drivers/video/console/vgacon.c static void vgacon_init(struct vc_data *c, int init) c 589 drivers/video/console/vgacon.c c->vc_can_do_color = vga_can_do_color; c 593 drivers/video/console/vgacon.c c->vc_cols = vga_video_num_columns; c 594 drivers/video/console/vgacon.c c->vc_rows = vga_video_num_lines; c 596 drivers/video/console/vgacon.c vc_resize(c, vga_video_num_columns, vga_video_num_lines); c 598 drivers/video/console/vgacon.c c->vc_scan_lines = vga_scan_lines; c 599 drivers/video/console/vgacon.c c->vc_font.height = vga_video_font_height; c 600 drivers/video/console/vgacon.c c->vc_complement_mask = 0x7700; c 602 drivers/video/console/vgacon.c c->vc_hi_font_mask = 0x0800; c 603 drivers/video/console/vgacon.c p = *c->vc_uni_pagedir_loc; c 604 drivers/video/console/vgacon.c if (c->vc_uni_pagedir_loc != &vgacon_uni_pagedir) { c 605 drivers/video/console/vgacon.c con_free_unimap(c); c 606 drivers/video/console/vgacon.c c->vc_uni_pagedir_loc = &vgacon_uni_pagedir; c 610 drivers/video/console/vgacon.c con_set_default_unimap(c); c 618 drivers/video/console/vgacon.c static void vgacon_deinit(struct vc_data *c) c 621 drivers/video/console/vgacon.c if (con_is_visible(c)) { c 622 drivers/video/console/vgacon.c c->vc_visible_origin = vga_vram_base; c 623 drivers/video/console/vgacon.c vga_set_mem_top(c); c 627 drivers/video/console/vgacon.c con_free_unimap(c); c 628 drivers/video/console/vgacon.c c->vc_uni_pagedir_loc = &c->vc_uni_pagedir; c 629 drivers/video/console/vgacon.c con_set_default_unimap(c); c 632 drivers/video/console/vgacon.c static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, c 639 drivers/video/console/vgacon.c attr = (attr & 0xF0) | c->vc_itcolor; c 641 drivers/video/console/vgacon.c attr = (attr & 0xf0) | c->vc_ulcolor; c 643 drivers/video/console/vgacon.c attr = (attr & 0xf0) | c->vc_halfcolor; c 664 drivers/video/console/vgacon.c static void vgacon_invert_region(struct vc_data *c, u16 * p, int count) c 710 drivers/video/console/vgacon.c static void vgacon_cursor(struct vc_data *c, int mode) c 712 drivers/video/console/vgacon.c if (c->vc_mode != KD_TEXT) c 715 drivers/video/console/vgacon.c vgacon_restore_screen(c); c 719 drivers/video/console/vgacon.c write_vga(14, (c->vc_pos - vga_vram_base) / 2); c 721 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, 31, 30); c 723 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, 31, 31); c 728 drivers/video/console/vgacon.c write_vga(14, (c->vc_pos - vga_vram_base) / 2); c 729 drivers/video/console/vgacon.c switch (c->vc_cursor_type & 0x0f) { c 731 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, c 732 drivers/video/console/vgacon.c c->vc_font.height - c 733 drivers/video/console/vgacon.c (c->vc_font.height < c 735 drivers/video/console/vgacon.c c->vc_font.height - c 736 drivers/video/console/vgacon.c (c->vc_font.height < c 740 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, c 741 drivers/video/console/vgacon.c c->vc_font.height / 3, c 742 drivers/video/console/vgacon.c c->vc_font.height - c 743 drivers/video/console/vgacon.c (c->vc_font.height < c 747 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, c 748 drivers/video/console/vgacon.c (c->vc_font.height * 2) / 3, c 749 drivers/video/console/vgacon.c c->vc_font.height - c 750 drivers/video/console/vgacon.c (c->vc_font.height < c 754 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, c 755 drivers/video/console/vgacon.c c->vc_font.height / 2, c 756 drivers/video/console/vgacon.c c->vc_font.height - c 757 drivers/video/console/vgacon.c (c->vc_font.height < c 762 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, 31, 30); c 764 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, 31, 31); c 767 drivers/video/console/vgacon.c vgacon_set_cursor_size(c->vc_x, 1, c 768 drivers/video/console/vgacon.c c->vc_font.height); c 775 drivers/video/console/vgacon.c static int vgacon_doresize(struct vc_data *c, c 779 drivers/video/console/vgacon.c unsigned int scanlines = height * c->vc_font.height; c 785 drivers/video/console/vgacon.c vgacon_yres = height * c->vc_font.height; c 837 drivers/video/console/vgacon.c static int vgacon_switch(struct vc_data *c) c 839 drivers/video/console/vgacon.c int x = c->vc_cols * VGA_FONTWIDTH; c 840 drivers/video/console/vgacon.c int y = c->vc_rows * c->vc_font.height; c 842 drivers/video/console/vgacon.c c->vc_font.height; c 848 drivers/video/console/vgacon.c vga_video_num_columns = c->vc_cols; c 849 drivers/video/console/vgacon.c vga_video_num_lines = c->vc_rows; c 855 drivers/video/console/vgacon.c scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c 856 drivers/video/console/vgacon.c c->vc_screenbuf_size > vga_vram_size ? c 857 drivers/video/console/vgacon.c vga_vram_size : c->vc_screenbuf_size); c 863 drivers/video/console/vgacon.c vgacon_doresize(c, c->vc_cols, c->vc_rows); c 866 drivers/video/console/vgacon.c vgacon_scrollback_switch(c->vc_num); c 1021 drivers/video/console/vgacon.c static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) c 1030 drivers/video/console/vgacon.c vga_set_palette(c, color_table); c 1044 drivers/video/console/vgacon.c vgacon_set_origin(c); c 1046 drivers/video/console/vgacon.c c->vc_screenbuf_size); c 1208 drivers/video/console/vgacon.c struct vc_data *c = vc_cons[i].d; c 1209 drivers/video/console/vgacon.c if (c && c->vc_sw == &vga_con) { c 1212 drivers/video/console/vgacon.c c->vc_hi_font_mask = 0x00; c 1213 drivers/video/console/vgacon.c clear_buffer_attributes(c); c 1214 drivers/video/console/vgacon.c c->vc_hi_font_mask = ch512 ? 0x0800 : 0; c 1265 drivers/video/console/vgacon.c struct vc_data *c = vc_cons[i].d; c 1267 drivers/video/console/vgacon.c if (c && c->vc_sw == &vga_con) { c 1268 drivers/video/console/vgacon.c if (con_is_visible(c)) { c 1272 drivers/video/console/vgacon.c c->vc_sw->con_cursor(c, CM_DRAW); c 1274 drivers/video/console/vgacon.c c->vc_font.height = fontheight; c 1275 drivers/video/console/vgacon.c vc_resize(c, 0, rows); /* Adjust console size */ c 1281 drivers/video/console/vgacon.c static int vgacon_font_set(struct vc_data *c, struct console_font *font, c 1299 drivers/video/console/vgacon.c rc = vgacon_adjust_height(c, font->height); c 1303 drivers/video/console/vgacon.c static int vgacon_font_get(struct vc_data *c, struct console_font *font) c 1309 drivers/video/console/vgacon.c font->height = c->vc_font.height; c 1316 drivers/video/console/vgacon.c static int vgacon_resize(struct vc_data *c, unsigned int width, c 1324 drivers/video/console/vgacon.c c->vc_font.height) c 1329 drivers/video/console/vgacon.c if (con_is_visible(c) && !vga_is_gfx) /* who knows */ c 1330 drivers/video/console/vgacon.c vgacon_doresize(c, width, height); c 1334 drivers/video/console/vgacon.c static int vgacon_set_origin(struct vc_data *c) c 1339 drivers/video/console/vgacon.c c->vc_origin = c->vc_visible_origin = vga_vram_base; c 1340 drivers/video/console/vgacon.c vga_set_mem_top(c); c 1345 drivers/video/console/vgacon.c static void vgacon_save_screen(struct vc_data *c) c 1355 drivers/video/console/vgacon.c c->vc_x = screen_info.orig_x; c 1356 drivers/video/console/vgacon.c c->vc_y = screen_info.orig_y; c 1363 drivers/video/console/vgacon.c scr_memcpyw((u16 *) c->vc_screenbuf, (u16 *) c->vc_origin, c 1364 drivers/video/console/vgacon.c c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); c 1367 drivers/video/console/vgacon.c static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c 1373 drivers/video/console/vgacon.c if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT) c 1376 drivers/video/console/vgacon.c if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2) c 1379 drivers/video/console/vgacon.c vgacon_restore_screen(c); c 1380 drivers/video/console/vgacon.c oldo = c->vc_origin; c 1381 drivers/video/console/vgacon.c delta = lines * c->vc_size_row; c 1383 drivers/video/console/vgacon.c vgacon_scrollback_update(c, t, lines); c 1384 drivers/video/console/vgacon.c if (c->vc_scr_end + delta >= vga_vram_end) { c 1387 drivers/video/console/vgacon.c c->vc_screenbuf_size - delta); c 1388 drivers/video/console/vgacon.c c->vc_origin = vga_vram_base; c 1391 drivers/video/console/vgacon.c c->vc_origin += delta; c 1392 drivers/video/console/vgacon.c scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - c 1393 drivers/video/console/vgacon.c delta), c->vc_video_erase_char, c 1398 drivers/video/console/vgacon.c c->vc_screenbuf_size + c 1400 drivers/video/console/vgacon.c c->vc_screenbuf_size - delta); c 1401 drivers/video/console/vgacon.c c->vc_origin = vga_vram_end - c->vc_screenbuf_size; c 1404 drivers/video/console/vgacon.c c->vc_origin -= delta; c 1405 drivers/video/console/vgacon.c c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; c 1406 drivers/video/console/vgacon.c scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, c 1409 drivers/video/console/vgacon.c c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; c 1410 drivers/video/console/vgacon.c c->vc_visible_origin = c->vc_origin; c 1411 drivers/video/console/vgacon.c vga_set_mem_top(c); c 1412 drivers/video/console/vgacon.c c->vc_pos = (c->vc_pos - oldo) + c->vc_origin; c 1422 drivers/video/console/vgacon.c static void vgacon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } c 123 drivers/video/fbdev/arkfb.c int i, c; c 133 drivers/video/fbdev/arkfb.c for (c = 0; c < map->length; c++) { c 140 drivers/video/fbdev/arkfb.c if ((c % 8) == 7) c 168 drivers/video/fbdev/arkfb.c static inline u32 expand_color(u32 c) c 170 drivers/video/fbdev/arkfb.c return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF; c 225 drivers/video/fbdev/arkfb.c static inline u32 expand_pixel(u32 c) c 227 drivers/video/fbdev/arkfb.c return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) | c 228 drivers/video/fbdev/arkfb.c ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF; c 261 drivers/video/fbdev/atafb_utils.h static inline void expand8_col2mask(u8 c, u32 m[]) c 263 drivers/video/fbdev/atafb_utils.h m[0] = four2long[c & 15]; c 265 drivers/video/fbdev/atafb_utils.h m[1] = four2long[c >> 4]; c 331 drivers/video/fbdev/atafb_utils.h static inline void expand16_col2mask(u8 c, u32 m[]) c 333 drivers/video/fbdev/atafb_utils.h m[0] = two2word[c & 3]; c 335 drivers/video/fbdev/atafb_utils.h m[1] = two2word[(c >> 2) & 3]; c 338 drivers/video/fbdev/atafb_utils.h m[2] = two2word[(c >> 4) & 3]; c 339 drivers/video/fbdev/atafb_utils.h m[3] = two2word[c >> 6]; c 1369 drivers/video/fbdev/aty/aty128fb.c const struct aty128_constants c = par->constants; c 1379 drivers/video/fbdev/aty/aty128fb.c if (vclk > c.ppll_max) c 1380 drivers/video/fbdev/aty/aty128fb.c vclk = c.ppll_max; c 1381 drivers/video/fbdev/aty/aty128fb.c if (vclk * 12 < c.ppll_min) c 1382 drivers/video/fbdev/aty/aty128fb.c vclk = c.ppll_min/12; c 1387 drivers/video/fbdev/aty/aty128fb.c if (output_freq >= c.ppll_min && output_freq <= c.ppll_max) { c 1397 drivers/video/fbdev/aty/aty128fb.c n = c.ref_divider * output_freq; c 1398 drivers/video/fbdev/aty/aty128fb.c d = c.ref_clk; c 1406 drivers/video/fbdev/aty/aty128fb.c c.ref_divider, period_in_ps); c 413 drivers/video/fbdev/au1100fb.c struct clk *c; c 450 drivers/video/fbdev/au1100fb.c c = clk_get(NULL, "lcd_intclk"); c 451 drivers/video/fbdev/au1100fb.c if (!IS_ERR(c)) { c 452 drivers/video/fbdev/au1100fb.c fbdev->lcdclk = c; c 453 drivers/video/fbdev/au1100fb.c clk_set_rate(c, 48000000); c 454 drivers/video/fbdev/au1100fb.c clk_prepare_enable(c); c 823 drivers/video/fbdev/au1200fb.c struct clk *c = clk_get(NULL, "lcd_intclk"); c 826 drivers/video/fbdev/au1200fb.c if (!IS_ERR(c)) { c 827 drivers/video/fbdev/au1200fb.c r = clk_round_rate(c, pc); c 829 drivers/video/fbdev/au1200fb.c clk_set_rate(c, r); c 830 drivers/video/fbdev/au1200fb.c clk_prepare_enable(c); c 832 drivers/video/fbdev/au1200fb.c clk_put(c); c 93 drivers/video/fbdev/c2p_iplan2.c const u8 *c; c 103 drivers/video/fbdev/c2p_iplan2.c c = src; c 110 drivers/video/fbdev/c2p_iplan2.c memcpy(d.pixels+dst_idx, c, width); c 111 drivers/video/fbdev/c2p_iplan2.c c += width; c 122 drivers/video/fbdev/c2p_iplan2.c memcpy(d.pixels+dst_idx, c, w); c 123 drivers/video/fbdev/c2p_iplan2.c c += w; c 131 drivers/video/fbdev/c2p_iplan2.c memcpy(d.pixels, c, 16); c 132 drivers/video/fbdev/c2p_iplan2.c c += 16; c 141 drivers/video/fbdev/c2p_iplan2.c memcpy(d.pixels, c, w); c 95 drivers/video/fbdev/c2p_planar.c const u8 *c; c 103 drivers/video/fbdev/c2p_planar.c c = src; c 110 drivers/video/fbdev/c2p_planar.c memcpy(d.pixels+dst_idx, c, width); c 111 drivers/video/fbdev/c2p_planar.c c += width; c 123 drivers/video/fbdev/c2p_planar.c memcpy(d.pixels+dst_idx, c, w); c 124 drivers/video/fbdev/c2p_planar.c c += w; c 133 drivers/video/fbdev/c2p_planar.c memcpy(d.pixels, c, 32); c 134 drivers/video/fbdev/c2p_planar.c c += 32; c 143 drivers/video/fbdev/c2p_planar.c memcpy(d.pixels, c, w); c 31 drivers/video/fbdev/core/bitblit.c u8 c; c 35 drivers/video/fbdev/core/bitblit.c c = src[i]; c 37 drivers/video/fbdev/core/bitblit.c c = 0xff; c 39 drivers/video/fbdev/core/bitblit.c c |= c >> 1; c 41 drivers/video/fbdev/core/bitblit.c c = ~c; c 42 drivers/video/fbdev/core/bitblit.c dst[i] = c; c 242 drivers/video/fbdev/core/bitblit.c int w = DIV_ROUND_UP(vc->vc_font.width, 8), c; c 259 drivers/video/fbdev/core/bitblit.c c = scr_readw((u16 *) vc->vc_pos); c 260 drivers/video/fbdev/core/bitblit.c attribute = get_attribute(info, c); c 261 drivers/video/fbdev/core/bitblit.c src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); c 189 drivers/video/fbdev/core/fbcon.c static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos); c 310 drivers/video/fbdev/core/fbcon.c u16 c, int is_fg) c 318 drivers/video/fbdev/core/fbcon.c c = vc->vc_video_erase_char & charmask; c 322 drivers/video/fbdev/core/fbcon.c color = (is_fg) ? attr_fgcol((vc->vc_hi_font_mask) ? 9 : 8, c) c 323 drivers/video/fbdev/core/fbcon.c : attr_bgcol((vc->vc_hi_font_mask) ? 13 : 12, c); c 393 drivers/video/fbdev/core/fbcon.c int c; c 414 drivers/video/fbdev/core/fbcon.c c = scr_readw((u16 *) vc->vc_pos); c 417 drivers/video/fbdev/core/fbcon.c ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), c 418 drivers/video/fbdev/core/fbcon.c get_color(vc, info, c, 0)); c 1361 drivers/video/fbdev/core/fbcon.c static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos) c 1365 drivers/video/fbdev/core/fbcon.c scr_writew(c, &chr); c 1383 drivers/video/fbdev/core/fbcon.c int c = scr_readw((u16 *) vc->vc_pos); c 1405 drivers/video/fbdev/core/fbcon.c ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), c 1406 drivers/video/fbdev/core/fbcon.c get_color(vc, info, c, 0)); c 1663 drivers/video/fbdev/core/fbcon.c unsigned short c; c 1670 drivers/video/fbdev/core/fbcon.c c = scr_readw(s); c 1671 drivers/video/fbdev/core/fbcon.c if (attr != (c & 0xff00)) { c 1672 drivers/video/fbdev/core/fbcon.c attr = c & 0xff00; c 1680 drivers/video/fbdev/core/fbcon.c if (c == scr_readw(d)) { c 1717 drivers/video/fbdev/core/fbcon.c unsigned short c; c 1722 drivers/video/fbdev/core/fbcon.c c = scr_readw(s); c 1723 drivers/video/fbdev/core/fbcon.c if (attr != (c & 0xff00)) { c 1724 drivers/video/fbdev/core/fbcon.c attr = c & 0xff00; c 1754 drivers/video/fbdev/core/fbcon.c unsigned short c; c 1758 drivers/video/fbdev/core/fbcon.c c = scr_readw(s); c 1760 drivers/video/fbdev/core/fbcon.c if (c == scr_readw(d)) { c 1772 drivers/video/fbdev/core/fbcon.c scr_writew(c, d); c 1802 drivers/video/fbdev/core/fbcon.c unsigned short c; c 1807 drivers/video/fbdev/core/fbcon.c c = scr_readw(s); c 1808 drivers/video/fbdev/core/fbcon.c if (attr != (c & 0xff00)) { c 1809 drivers/video/fbdev/core/fbcon.c attr = c & 0xff00; c 1817 drivers/video/fbdev/core/fbcon.c if (c == scr_readw(d)) { c 1828 drivers/video/fbdev/core/fbcon.c scr_writew(c, d); c 2519 drivers/video/fbdev/core/fbcon.c unsigned short c; c 2521 drivers/video/fbdev/core/fbcon.c c = scr_readw(cp); c 2522 drivers/video/fbdev/core/fbcon.c scr_writew(((c & 0xfe00) >> 1) | c 2523 drivers/video/fbdev/core/fbcon.c (c & 0xff), cp); c 2525 drivers/video/fbdev/core/fbcon.c c = vc->vc_video_erase_char; c 2527 drivers/video/fbdev/core/fbcon.c ((c & 0xfe00) >> 1) | (c & 0xff); c 2542 drivers/video/fbdev/core/fbcon.c unsigned short c; c 2545 drivers/video/fbdev/core/fbcon.c c = scr_readw(cp); c 2548 drivers/video/fbdev/core/fbcon.c ((c & 0xff00) << 1) | (c & c 2551 drivers/video/fbdev/core/fbcon.c newc = c & ~0x100; c 2554 drivers/video/fbdev/core/fbcon.c c = vc->vc_video_erase_char; c 2557 drivers/video/fbdev/core/fbcon.c ((c & 0xff00) << 1) | (c & 0xff); c 2560 drivers/video/fbdev/core/fbcon.c vc->vc_video_erase_char = c & ~0x100; c 237 drivers/video/fbdev/core/fbcon.h static inline int get_attribute(struct fb_info *info, u16 c) c 242 drivers/video/fbdev/core/fbcon.h if (attr_underline(c)) c 244 drivers/video/fbdev/core/fbcon.h if (attr_reverse(c)) c 246 drivers/video/fbdev/core/fbcon.h if (attr_bold(c)) c 31 drivers/video/fbdev/core/fbcon_ccw.c u8 c, msk = ~(0xff << offset), msk1 = 0; c 41 drivers/video/fbdev/core/fbcon_ccw.c c = *src; c 45 drivers/video/fbdev/core/fbcon_ccw.c c |= msk; c 48 drivers/video/fbdev/core/fbcon_ccw.c c |= msk1; c 52 drivers/video/fbdev/core/fbcon_ccw.c *(dst - width) |= c; c 55 drivers/video/fbdev/core/fbcon_ccw.c c = ~c; c 57 drivers/video/fbdev/core/fbcon_ccw.c *dst++ = c; c 227 drivers/video/fbdev/core/fbcon_ccw.c int w = (vc->vc_font.height + 7) >> 3, c; c 248 drivers/video/fbdev/core/fbcon_ccw.c c = scr_readw((u16 *) vc->vc_pos); c 249 drivers/video/fbdev/core/fbcon_ccw.c attribute = get_attribute(info, c); c 250 drivers/video/fbdev/core/fbcon_ccw.c src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); c 30 drivers/video/fbdev/core/fbcon_cw.c u8 c, msk = ~(0xff >> offset); c 34 drivers/video/fbdev/core/fbcon_cw.c c = *src; c 36 drivers/video/fbdev/core/fbcon_cw.c c |= msk; c 38 drivers/video/fbdev/core/fbcon_cw.c c |= *(src-width); c 40 drivers/video/fbdev/core/fbcon_cw.c c = ~c; c 42 drivers/video/fbdev/core/fbcon_cw.c *dst++ = c; c 210 drivers/video/fbdev/core/fbcon_cw.c int w = (vc->vc_font.height + 7) >> 3, c; c 231 drivers/video/fbdev/core/fbcon_cw.c c = scr_readw((u16 *) vc->vc_pos); c 232 drivers/video/fbdev/core/fbcon_cw.c attribute = get_attribute(info, c); c 233 drivers/video/fbdev/core/fbcon_cw.c src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); c 31 drivers/video/fbdev/core/fbcon_ud.c u8 c; c 36 drivers/video/fbdev/core/fbcon_ud.c c = src[i]; c 38 drivers/video/fbdev/core/fbcon_ud.c c = 0xff; c 40 drivers/video/fbdev/core/fbcon_ud.c c |= c << 1; c 42 drivers/video/fbdev/core/fbcon_ud.c c = ~c; c 43 drivers/video/fbdev/core/fbcon_ud.c dst[i] = c; c 257 drivers/video/fbdev/core/fbcon_ud.c int w = (vc->vc_font.width + 7) >> 3, c; c 279 drivers/video/fbdev/core/fbcon_ud.c c = scr_readw((u16 *) vc->vc_pos); c 280 drivers/video/fbdev/core/fbcon_ud.c attribute = get_attribute(info, c); c 281 drivers/video/fbdev/core/fbcon_ud.c src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); c 764 drivers/video/fbdev/core/fbmem.c int c, cnt = 0, err = 0; c 801 drivers/video/fbdev/core/fbmem.c c = (count > PAGE_SIZE) ? PAGE_SIZE : count; c 803 drivers/video/fbdev/core/fbmem.c fb_memcpy_fromfb(dst, src, c); c 804 drivers/video/fbdev/core/fbmem.c dst += c; c 805 drivers/video/fbdev/core/fbmem.c src += c; c 807 drivers/video/fbdev/core/fbmem.c if (copy_to_user(buf, buffer, c)) { c 811 drivers/video/fbdev/core/fbmem.c *ppos += c; c 812 drivers/video/fbdev/core/fbmem.c buf += c; c 813 drivers/video/fbdev/core/fbmem.c cnt += c; c 814 drivers/video/fbdev/core/fbmem.c count -= c; c 829 drivers/video/fbdev/core/fbmem.c int c, cnt = 0, err = 0; c 872 drivers/video/fbdev/core/fbmem.c c = (count > PAGE_SIZE) ? PAGE_SIZE : count; c 875 drivers/video/fbdev/core/fbmem.c if (copy_from_user(src, buf, c)) { c 880 drivers/video/fbdev/core/fbmem.c fb_memcpy_tofb(dst, src, c); c 881 drivers/video/fbdev/core/fbmem.c dst += c; c 882 drivers/video/fbdev/core/fbmem.c src += c; c 883 drivers/video/fbdev/core/fbmem.c *ppos += c; c 884 drivers/video/fbdev/core/fbmem.c buf += c; c 885 drivers/video/fbdev/core/fbmem.c cnt += c; c 886 drivers/video/fbdev/core/fbmem.c count -= c; c 86 drivers/video/fbdev/core/fbmon.c static void copy_string(unsigned char *c, unsigned char *s) c 89 drivers/video/fbdev/core/fbmon.c c = c + 5; c 90 drivers/video/fbdev/core/fbmon.c for (i = 0; (i < 13 && *c != 0x0A); i++) c 91 drivers/video/fbdev/core/fbmon.c *(s++) = *(c++); c 412 drivers/video/fbdev/core/fbmon.c unsigned char c; c 414 drivers/video/fbdev/core/fbmon.c c = block[0]; c 415 drivers/video/fbdev/core/fbmon.c if (c&0x80) { c 420 drivers/video/fbdev/core/fbmon.c if (c&0x40) { c 425 drivers/video/fbdev/core/fbmon.c if (c&0x20) { c 429 drivers/video/fbdev/core/fbmon.c if (c&0x10) { c 434 drivers/video/fbdev/core/fbmon.c if (c&0x08) { c 438 drivers/video/fbdev/core/fbmon.c if (c&0x04) { c 442 drivers/video/fbdev/core/fbmon.c if (c&0x02) { c 446 drivers/video/fbdev/core/fbmon.c if (c&0x01) { c 451 drivers/video/fbdev/core/fbmon.c c = block[1]; c 452 drivers/video/fbdev/core/fbmon.c if (c&0x80) { c 456 drivers/video/fbdev/core/fbmon.c if (c&0x40) { c 460 drivers/video/fbdev/core/fbmon.c if (c&0x20) { c 465 drivers/video/fbdev/core/fbmon.c if (c&0x10) { c 469 drivers/video/fbdev/core/fbmon.c if (c&0x08) { c 473 drivers/video/fbdev/core/fbmon.c if (c&0x04) { c 477 drivers/video/fbdev/core/fbmon.c if (c&0x02) { c 481 drivers/video/fbdev/core/fbmon.c if (c&0x01) { c 485 drivers/video/fbdev/core/fbmon.c c = block[2]; c 486 drivers/video/fbdev/core/fbmon.c if (c&0x80) { c 490 drivers/video/fbdev/core/fbmon.c DPRINTK(" Manufacturer's mask: %x\n",c&0x7F); c 777 drivers/video/fbdev/core/fbmon.c unsigned char c, *block; c 783 drivers/video/fbdev/core/fbmon.c c = block[0] & 0x80; c 785 drivers/video/fbdev/core/fbmon.c if (c) { c 810 drivers/video/fbdev/core/fbmon.c c = block[0] & 0x10; c 811 drivers/video/fbdev/core/fbmon.c if (c) c 813 drivers/video/fbdev/core/fbmon.c c = block[0] & 0x0f; c 815 drivers/video/fbdev/core/fbmon.c if (c & 0x10) { c 819 drivers/video/fbdev/core/fbmon.c if (c & 0x08) { c 823 drivers/video/fbdev/core/fbmon.c if (c & 0x04) { c 827 drivers/video/fbdev/core/fbmon.c if (c & 0x02) { c 831 drivers/video/fbdev/core/fbmon.c if (c & 0x01) { c 849 drivers/video/fbdev/core/fbmon.c c = block[3]; c 850 drivers/video/fbdev/core/fbmon.c specs->gamma = c+100; c 878 drivers/video/fbdev/core/fbmon.c c = block[4] & 0x7; c 879 drivers/video/fbdev/core/fbmon.c if (c & 0x04) { c 883 drivers/video/fbdev/core/fbmon.c if (c & 0x02) { c 887 drivers/video/fbdev/core/fbmon.c if (c & 0x01) { c 197 drivers/video/fbdev/core/svgalib.c int i, c; c 207 drivers/video/fbdev/core/svgalib.c for (c = 0; c < map->length; c++) { c 1600 drivers/video/fbdev/matrox/matroxfb_base.c char c = *ptr++; c 1602 drivers/video/fbdev/matrox/matroxfb_base.c if (c == 0) { c 1605 drivers/video/fbdev/matrox/matroxfb_base.c if (c == '0') { c 1607 drivers/video/fbdev/matrox/matroxfb_base.c } else if (c == '1') { c 1609 drivers/video/fbdev/matrox/matroxfb_base.c } else if (c == '2' && minfo->devflags.crtc2) { c 136 drivers/video/fbdev/matrox/matroxfb_g450.c const int c = minfo->altout.tvo_params.contrast; c 138 drivers/video/fbdev/matrox/matroxfb_g450.c *bl = max(b - c, BLMIN); c 139 drivers/video/fbdev/matrox/matroxfb_g450.c *wl = min(b + c, WLMAX); c 139 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_get_reg(struct i2c_client* c, char reg) { c 143 drivers/video/fbdev/matrox/matroxfb_maven.c .addr = c->addr, c 149 drivers/video/fbdev/matrox/matroxfb_maven.c .addr = c->addr, c 157 drivers/video/fbdev/matrox/matroxfb_maven.c err = i2c_transfer(c->adapter, msgs, 2); c 163 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_set_reg(struct i2c_client* c, int reg, int val) { c 166 drivers/video/fbdev/matrox/matroxfb_maven.c err = i2c_smbus_write_byte_data(c, reg, val); c 172 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_set_reg_pair(struct i2c_client* c, int reg, int val) { c 175 drivers/video/fbdev/matrox/matroxfb_maven.c err = i2c_smbus_write_word_data(c, reg, val); c 361 drivers/video/fbdev/matrox/matroxfb_maven.c const int c = md->primary_head->altout.tvo_params.contrast; c 363 drivers/video/fbdev/matrox/matroxfb_maven.c *bl = max(b - c, BLMIN); c 364 drivers/video/fbdev/matrox/matroxfb_maven.c *wl = min(b + c, WLMAX); c 523 drivers/video/fbdev/matrox/matroxfb_maven.c #define LR(x) maven_set_reg(c, (x), m->regs[(x)]) c 524 drivers/video/fbdev/matrox/matroxfb_maven.c #define LRP(x) maven_set_reg_pair(c, (x), m->regs[(x)] | (m->regs[(x)+1] << 8)) c 525 drivers/video/fbdev/matrox/matroxfb_maven.c static void maven_init_TV(struct i2c_client* c, const struct mavenregs* m) { c 529 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x3E, 0x01); c 530 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0x82); /* fetch oscillator state? */ c 531 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x8C, 0x00); c 532 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0x94); /* get 0x82 */ c 533 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x94, 0xA2); c 536 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg_pair(c, 0x8E, 0x1EFF); c 537 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0xC6, 0x01); c 541 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0x06); c 542 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x06, 0xF9); /* or read |= 0xF0 ? */ c 562 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x35, 0x10); /* ... */ c 564 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x35, 0x0F); /* ... */ c 600 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x35, 0x1D); /* ... */ c 602 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x35, 0x1C); c 607 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0xB3, 0x01); c 609 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0xB0); /* read 0x80 */ c 610 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0xB0, 0x08); /* ugh... */ c 611 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0xB9); /* read 0x7C */ c 612 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0xB9, 0x78); c 613 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0xBF); /* read 0x00 */ c 614 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0xBF, 0x02); c 615 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0x94); /* read 0x82 */ c 616 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x94, 0xB3); c 622 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x8C, 0x20); c 623 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0x8D); c 624 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x8D, 0x10); c 647 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0x8D); c 648 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x8D, 0x04); c 672 drivers/video/fbdev/matrox/matroxfb_maven.c val = maven_get_reg(c, 0x8D); c 674 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x8D, val); c 700 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x35, 0x1D); c 702 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x35, 0x1C); c 707 drivers/video/fbdev/matrox/matroxfb_maven.c maven_get_reg(c, 0xB0); c 719 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x3E, 0x00); c 720 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x95, 0x20); c 734 drivers/video/fbdev/matrox/matroxfb_maven.c unsigned int c; c 739 drivers/video/fbdev/matrox/matroxfb_maven.c if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) { c 746 drivers/video/fbdev/matrox/matroxfb_maven.c m->regs[0x82] = c | 0x80; c 759 drivers/video/fbdev/matrox/matroxfb_maven.c unsigned int a, bv, c; c 922 drivers/video/fbdev/matrox/matroxfb_maven.c DAC1064_calcclock(mt->pixclock, 450000, &a, &bv, &c); c 925 drivers/video/fbdev/matrox/matroxfb_maven.c m->regs[0x82] = c | 0x80; c 991 drivers/video/fbdev/matrox/matroxfb_maven.c struct i2c_client *c = md->client; c 1022 drivers/video/fbdev/matrox/matroxfb_maven.c maven_init_TV(c, m); c 1028 drivers/video/fbdev/matrox/matroxfb_maven.c struct i2c_client *c = md->client; c 1029 drivers/video/fbdev/matrox/matroxfb_maven.c maven_set_reg(c, 0x95, 0x20); /* start whole thing */ c 191 drivers/video/fbdev/mmp/hw/mmp_ctrl.h #define SPU_DMA_PITCH_C(c) ((c)<<16) c 43 drivers/video/fbdev/nvidia/nv_proto.h #define nvidia_probe_i2c_connector(p, c, edid) (-1) c 415 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c unsigned c, m, r; c 432 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c c = (ptr[1] >> 6) & 0x3; c 451 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c (c << 6) | (m << 4) | (r << 0)); c 124 drivers/video/fbdev/omap2/omapfb/omapfb-main.c unsigned c; c 140 drivers/video/fbdev/omap2/omapfb/omapfb-main.c c = (r << 16) | (g << 8) | (b << 0); c 141 drivers/video/fbdev/omap2/omapfb/omapfb-main.c draw_pixel(fbi, x, y, c); c 18 drivers/video/fbdev/pxa168fb.h #define SPU_DMA_PITCH_C(c) ((c) << 16) c 550 drivers/video/fbdev/riva/riva_hw.h int RivaGetConfig(RIVA_HW_INST *chip, struct pci_dev *pdev, unsigned int c); c 294 drivers/video/fbdev/s3fb.c int i, c; c 305 drivers/video/fbdev/s3fb.c for (c = 0; c < map->length; c++) { c 306 drivers/video/fbdev/s3fb.c fb_writeb(font[c * map->height + i], fb + c * 4); c 341 drivers/video/fbdev/s3fb.c static inline u32 expand_color(u32 c) c 343 drivers/video/fbdev/s3fb.c return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF; c 397 drivers/video/fbdev/s3fb.c static inline u32 expand_pixel(u32 c) c 399 drivers/video/fbdev/s3fb.c return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) | c 400 drivers/video/fbdev/s3fb.c ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF; c 118 drivers/video/fbdev/sbuslib.c struct fbcmap __user *c = (struct fbcmap __user *) arg; c 127 drivers/video/fbdev/sbuslib.c if (get_user(index, &c->index) || c 128 drivers/video/fbdev/sbuslib.c get_user(count, &c->count) || c 129 drivers/video/fbdev/sbuslib.c get_user(ured, &c->red) || c 130 drivers/video/fbdev/sbuslib.c get_user(ugreen, &c->green) || c 131 drivers/video/fbdev/sbuslib.c get_user(ublue, &c->blue)) c 159 drivers/video/fbdev/sbuslib.c struct fbcmap __user *c = (struct fbcmap __user *) arg; c 167 drivers/video/fbdev/sbuslib.c if (get_user(index, &c->index) || c 168 drivers/video/fbdev/sbuslib.c get_user(count, &c->count) || c 169 drivers/video/fbdev/sbuslib.c get_user(ured, &c->red) || c 170 drivers/video/fbdev/sbuslib.c get_user(ugreen, &c->green) || c 171 drivers/video/fbdev/sbuslib.c get_user(ublue, &c->blue)) c 49 drivers/video/fbdev/sh7760fb.c struct completion *c = data; c 51 drivers/video/fbdev/sh7760fb.c complete(c); c 1027 drivers/video/fbdev/sm712fb.c int c, i, cnt = 0, err = 0; c 1060 drivers/video/fbdev/sm712fb.c c = (count > PAGE_SIZE) ? PAGE_SIZE : count; c 1062 drivers/video/fbdev/sm712fb.c for (i = c >> 2; i--;) { c 1067 drivers/video/fbdev/sm712fb.c if (c & 3) { c 1071 drivers/video/fbdev/sm712fb.c for (i = c & 3; i--;) { c 1082 drivers/video/fbdev/sm712fb.c if (copy_to_user(buf, buffer, c)) { c 1086 drivers/video/fbdev/sm712fb.c *ppos += c; c 1087 drivers/video/fbdev/sm712fb.c buf += c; c 1088 drivers/video/fbdev/sm712fb.c cnt += c; c 1089 drivers/video/fbdev/sm712fb.c count -= c; c 1104 drivers/video/fbdev/sm712fb.c int c, i, cnt = 0, err = 0; c 1143 drivers/video/fbdev/sm712fb.c c = (count > PAGE_SIZE) ? PAGE_SIZE : count; c 1146 drivers/video/fbdev/sm712fb.c if (copy_from_user(src, buf, c)) { c 1151 drivers/video/fbdev/sm712fb.c for (i = c >> 2; i--;) { c 1155 drivers/video/fbdev/sm712fb.c if (c & 3) { c 1159 drivers/video/fbdev/sm712fb.c for (i = c & 3; i--;) { c 1170 drivers/video/fbdev/sm712fb.c *ppos += c; c 1171 drivers/video/fbdev/sm712fb.c buf += c; c 1172 drivers/video/fbdev/sm712fb.c cnt += c; c 1173 drivers/video/fbdev/sm712fb.c count -= c; c 394 drivers/video/fbdev/sticore.h void sti_putc(struct sti_struct *sti, int c, int y, int x); c 398 drivers/video/fbdev/sticore.h int height, int width, int c); c 280 drivers/video/fbdev/tdfxfb.c static inline void do_setpalentry(struct tdfx_par *par, unsigned regno, u32 c) c 286 drivers/video/fbdev/tdfxfb.c tdfx_outl(par, DACDATA, c); c 329 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) c 331 drivers/video/fbdev/tridentfb.c writemmr(par, COLOR, c); c 340 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 b) c 344 drivers/video/fbdev/tridentfb.c writemmr(par, COLOR, c); c 431 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) c 434 drivers/video/fbdev/tridentfb.c writemmr(par, 0x2158, c); c 503 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) c 508 drivers/video/fbdev/tridentfb.c writemmr(par, 0x2144, c); c 571 drivers/video/fbdev/tridentfb.c u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) c 574 drivers/video/fbdev/tridentfb.c writemmr(par, OLDCLR, c); c 135 drivers/video/fbdev/vt8623fb.c static inline u32 expand_color(u32 c) c 137 drivers/video/fbdev/vt8623fb.c return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF; c 190 drivers/video/fbdev/vt8623fb.c static inline u32 expand_pixel(u32 c) c 192 drivers/video/fbdev/vt8623fb.c return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) | c 193 drivers/video/fbdev/vt8623fb.c ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF; c 503 drivers/video/fbdev/xen-fbfront.c struct console *c; c 509 drivers/video/fbdev/xen-fbfront.c for_each_console(c) { c 510 drivers/video/fbdev/xen-fbfront.c if (!strcmp(c->name, "tty") && c->index == 0) c 514 drivers/video/fbdev/xen-fbfront.c if (c) { c 515 drivers/video/fbdev/xen-fbfront.c unregister_console(c); c 516 drivers/video/fbdev/xen-fbfront.c c->flags |= CON_CONSDEV; c 517 drivers/video/fbdev/xen-fbfront.c c->flags &= ~CON_PRINTBUFFER; /* don't print again */ c 518 drivers/video/fbdev/xen-fbfront.c register_console(c); c 1103 drivers/virtio/virtio_ring.c unsigned int i, n, c, descs_used, err_idx; c 1145 drivers/virtio/virtio_ring.c c = 0; c 1154 drivers/virtio/virtio_ring.c (++c == total_sg ? 0 : VRING_DESC_F_NEXT) | c 40 drivers/w1/slaves/w1_ds2423.c ssize_t c; c 45 drivers/w1/slaves/w1_ds2423.c c = PAGE_SIZE; c 65 drivers/w1/slaves/w1_ds2423.c c -= snprintf(out_buf + PAGE_SIZE - c, c 66 drivers/w1/slaves/w1_ds2423.c c, "%02x ", c 74 drivers/w1/slaves/w1_ds2423.c c -= snprintf(out_buf + PAGE_SIZE - c, c 75 drivers/w1/slaves/w1_ds2423.c c, "crc=NO\n"); c 97 drivers/w1/slaves/w1_ds2423.c c -= snprintf(out_buf + PAGE_SIZE - c, c 98 drivers/w1/slaves/w1_ds2423.c c, "crc=YES c=%d\n", result); c 100 drivers/w1/slaves/w1_ds2423.c c -= snprintf(out_buf + PAGE_SIZE - c, c 101 drivers/w1/slaves/w1_ds2423.c c, "crc=NO\n"); c 106 drivers/w1/slaves/w1_ds2423.c c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error"); c 109 drivers/w1/slaves/w1_ds2423.c return PAGE_SIZE - c; c 574 drivers/w1/slaves/w1_therm.c ssize_t c = PAGE_SIZE; c 582 drivers/w1/slaves/w1_therm.c c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", info.rom[i]); c 583 drivers/w1/slaves/w1_therm.c c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n", c 591 drivers/w1/slaves/w1_therm.c c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", c 594 drivers/w1/slaves/w1_therm.c c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n", c 596 drivers/w1/slaves/w1_therm.c ret = PAGE_SIZE - c; c 646 drivers/w1/slaves/w1_therm.c ssize_t c = PAGE_SIZE; c 708 drivers/w1/slaves/w1_therm.c c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", seq); c 709 drivers/w1/slaves/w1_therm.c return PAGE_SIZE - c; c 370 drivers/w1/w1.c int c = PAGE_SIZE; c 379 drivers/w1/w1.c c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", sl->name); c 382 drivers/w1/w1.c c -= snprintf(buf + PAGE_SIZE - c, c, "not found.\n"); c 386 drivers/w1/w1.c return PAGE_SIZE - c; c 392 drivers/w1/w1.c int c = PAGE_SIZE; c 393 drivers/w1/w1.c c -= snprintf(buf+PAGE_SIZE - c, c, c 395 drivers/w1/w1.c return PAGE_SIZE - c; c 489 drivers/w1/w1.c int c = PAGE_SIZE; c 490 drivers/w1/w1.c c -= snprintf(buf+PAGE_SIZE - c, c, c 492 drivers/w1/w1.c return PAGE_SIZE - c; c 130 drivers/watchdog/acquirewdt.c char c; c 131 drivers/watchdog/acquirewdt.c if (get_user(c, buf + i)) c 133 drivers/watchdog/acquirewdt.c if (c == 'V') c 119 drivers/watchdog/advantechwdt.c char c; c 120 drivers/watchdog/advantechwdt.c if (get_user(c, buf + i)) c 122 drivers/watchdog/advantechwdt.c if (c == 'V') c 152 drivers/watchdog/alim1535_wdt.c char c; c 153 drivers/watchdog/alim1535_wdt.c if (get_user(c, data + i)) c 155 drivers/watchdog/alim1535_wdt.c if (c == 'V') c 198 drivers/watchdog/alim7101_wdt.c char c; c 199 drivers/watchdog/alim7101_wdt.c if (get_user(c, buf + ofs)) c 201 drivers/watchdog/alim7101_wdt.c if (c == 'V') c 192 drivers/watchdog/ar7_wdt.c char c; c 193 drivers/watchdog/ar7_wdt.c if (get_user(c, data + i)) c 195 drivers/watchdog/ar7_wdt.c if (c == 'V') c 159 drivers/watchdog/ath79_wdt.c char c; c 161 drivers/watchdog/ath79_wdt.c if (get_user(c, data + i)) c 164 drivers/watchdog/ath79_wdt.c if (c == 'V') c 146 drivers/watchdog/bcm63xx_wdt.c char c; c 147 drivers/watchdog/bcm63xx_wdt.c if (get_user(c, data + i)) c 149 drivers/watchdog/bcm63xx_wdt.c if (c == 'V') c 208 drivers/watchdog/eurotechwdt.c char c; c 209 drivers/watchdog/eurotechwdt.c if (get_user(c, buf + i)) c 211 drivers/watchdog/eurotechwdt.c if (c == 'V') c 567 drivers/watchdog/f71808e_wdt.c char c; c 568 drivers/watchdog/f71808e_wdt.c if (get_user(c, buf + i)) c 570 drivers/watchdog/f71808e_wdt.c if (c == 'V') c 145 drivers/watchdog/gef_wdt.c char c; c 146 drivers/watchdog/gef_wdt.c if (get_user(c, data + i)) c 148 drivers/watchdog/gef_wdt.c if (c == 'V') c 120 drivers/watchdog/geodewdt.c char c; c 122 drivers/watchdog/geodewdt.c if (get_user(c, data + i)) c 125 drivers/watchdog/geodewdt.c if (c == 'V') c 155 drivers/watchdog/ib700wdt.c char c; c 156 drivers/watchdog/ib700wdt.c if (get_user(c, buf + i)) c 158 drivers/watchdog/ib700wdt.c if (c == 'V') c 258 drivers/watchdog/ibmasr.c char c; c 259 drivers/watchdog/ibmasr.c if (get_user(c, buf + i)) c 261 drivers/watchdog/ibmasr.c if (c == 'V') c 105 drivers/watchdog/iop_wdt.c char c; c 107 drivers/watchdog/iop_wdt.c if (get_user(c, data + i)) c 109 drivers/watchdog/iop_wdt.c if (c == 'V') c 243 drivers/watchdog/it8712f_wdt.c char c; c 244 drivers/watchdog/it8712f_wdt.c if (get_user(c, data + i)) c 246 drivers/watchdog/it8712f_wdt.c if (c == 'V') c 82 drivers/watchdog/ixp4xx_wdt.c char c; c 84 drivers/watchdog/ixp4xx_wdt.c if (get_user(c, data + i)) c 86 drivers/watchdog/ixp4xx_wdt.c if (c == 'V') c 101 drivers/watchdog/m54xx_wdt.c char c; c 103 drivers/watchdog/m54xx_wdt.c if (get_user(c, data + i)) c 105 drivers/watchdog/m54xx_wdt.c if (c == 'V') c 283 drivers/watchdog/machzwd.c char c; c 284 drivers/watchdog/machzwd.c if (get_user(c, buf + ofs)) c 286 drivers/watchdog/machzwd.c if (c == 'V') { c 32 drivers/watchdog/meson_wdt.c #define MESON_SEC_TO_TC(s, c) ((s) * (c)) c 180 drivers/watchdog/mixcomwd.c char c; c 181 drivers/watchdog/mixcomwd.c if (get_user(c, data + i)) c 183 drivers/watchdog/mixcomwd.c if (c == 'V') c 162 drivers/watchdog/mv64x60_wdt.c char c; c 163 drivers/watchdog/mv64x60_wdt.c if (get_user(c, data + i)) c 165 drivers/watchdog/mv64x60_wdt.c if (c == 'V') c 196 drivers/watchdog/nv_tco.c char c; c 197 drivers/watchdog/nv_tco.c if (get_user(c, data + i)) c 199 drivers/watchdog/nv_tco.c if (c == 'V') c 153 drivers/watchdog/octeon-wdt-main.c extern int prom_putchar(char c); c 353 drivers/watchdog/pc87413_wdt.c char c; c 354 drivers/watchdog/pc87413_wdt.c if (get_user(c, data + i)) c 356 drivers/watchdog/pc87413_wdt.c if (c == 'V') c 677 drivers/watchdog/pcwd.c char c; c 679 drivers/watchdog/pcwd.c if (get_user(c, buf + i)) c 681 drivers/watchdog/pcwd.c if (c == 'V') c 449 drivers/watchdog/pcwd_pci.c char c; c 450 drivers/watchdog/pcwd_pci.c if (get_user(c, data + i)) c 452 drivers/watchdog/pcwd_pci.c if (c == 'V') c 374 drivers/watchdog/pcwd_usb.c char c; c 375 drivers/watchdog/pcwd_usb.c if (get_user(c, data + i)) c 377 drivers/watchdog/pcwd_usb.c if (c == 'V') c 155 drivers/watchdog/pika_wdt.c char c; c 156 drivers/watchdog/pika_wdt.c if (get_user(c, data + i)) c 158 drivers/watchdog/pika_wdt.c if (c == 'V') { c 175 drivers/watchdog/rc32434_wdt.c char c; c 176 drivers/watchdog/rc32434_wdt.c if (get_user(c, data + i)) c 178 drivers/watchdog/rc32434_wdt.c if (c == 'V') c 156 drivers/watchdog/sb_wdog.c char c; c 158 drivers/watchdog/sb_wdog.c if (get_user(c, data + i)) c 160 drivers/watchdog/sb_wdog.c if (c == 'V') c 179 drivers/watchdog/sbc60xxwdt.c char c; c 180 drivers/watchdog/sbc60xxwdt.c if (get_user(c, buf + ofs)) c 182 drivers/watchdog/sbc60xxwdt.c if (c == 'V') c 98 drivers/watchdog/sbc7240_wdt.c char c; c 107 drivers/watchdog/sbc7240_wdt.c if (get_user(c, buf + i)) c 109 drivers/watchdog/sbc7240_wdt.c if (c == SBC7240_MAGIC_CHAR) { c 248 drivers/watchdog/sbc8360.c char c; c 249 drivers/watchdog/sbc8360.c if (get_user(c, buf + i)) c 251 drivers/watchdog/sbc8360.c if (c == 'V') c 97 drivers/watchdog/sbc_fitpc2_wdt.c char c; c 99 drivers/watchdog/sbc_fitpc2_wdt.c if (get_user(c, data + i)) c 102 drivers/watchdog/sbc_fitpc2_wdt.c if (c == 'V') c 274 drivers/watchdog/sc1200wdt.c char c; c 276 drivers/watchdog/sc1200wdt.c if (get_user(c, data + i)) c 278 drivers/watchdog/sc1200wdt.c if (c == 'V') c 232 drivers/watchdog/sc520_wdt.c char c; c 233 drivers/watchdog/sc520_wdt.c if (get_user(c, buf + ofs)) c 235 drivers/watchdog/sc520_wdt.c if (c == 'V') c 231 drivers/watchdog/sch311x_wdt.c char c; c 232 drivers/watchdog/sch311x_wdt.c if (get_user(c, buf + i)) c 234 drivers/watchdog/sch311x_wdt.c if (c == 'V') c 142 drivers/watchdog/scx200_wdt.c char c; c 143 drivers/watchdog/scx200_wdt.c if (get_user(c, data + i)) c 145 drivers/watchdog/scx200_wdt.c if (c == 'V') c 403 drivers/watchdog/smsc37b787_wdt.c char c; c 404 drivers/watchdog/smsc37b787_wdt.c if (get_user(c, data + i)) c 406 drivers/watchdog/smsc37b787_wdt.c if (c == 'V') c 201 drivers/watchdog/w83877f_wdt.c char c; c 202 drivers/watchdog/w83877f_wdt.c if (get_user(c, buf + ofs)) c 204 drivers/watchdog/w83877f_wdt.c if (c == 'V') c 342 drivers/watchdog/w83977f_wdt.c char c; c 343 drivers/watchdog/w83977f_wdt.c if (get_user(c, buf + ofs)) c 345 drivers/watchdog/w83977f_wdt.c if (c == 'V') c 108 drivers/watchdog/wafer5823wdt.c char c; c 109 drivers/watchdog/wafer5823wdt.c if (get_user(c, buf + i)) c 111 drivers/watchdog/wafer5823wdt.c if (c == 'V') c 648 drivers/watchdog/watchdog_dev.c char c; c 661 drivers/watchdog/watchdog_dev.c if (get_user(c, data + i)) c 663 drivers/watchdog/watchdog_dev.c if (c == 'V') c 238 drivers/watchdog/wdrtas.c char c; c 247 drivers/watchdog/wdrtas.c if (get_user(c, buf + i)) c 250 drivers/watchdog/wdrtas.c if (c == 'V') c 240 drivers/watchdog/wdt.c unsigned short c; c 244 drivers/watchdog/wdt.c c = inb_p(WDT_RT); c 246 drivers/watchdog/wdt.c return (c * 11 / 15) + 7; c 328 drivers/watchdog/wdt.c char c; c 329 drivers/watchdog/wdt.c if (get_user(c, buf + i)) c 331 drivers/watchdog/wdt.c if (c == 'V') c 315 drivers/watchdog/wdt977.c char c; c 316 drivers/watchdog/wdt977.c if (get_user(c, buf + i)) c 318 drivers/watchdog/wdt977.c if (c == 'V') c 276 drivers/watchdog/wdt_pci.c unsigned short c; c 279 drivers/watchdog/wdt_pci.c c = inb(WDT_RT); c 282 drivers/watchdog/wdt_pci.c *temperature = (c * 11 / 15) + 7; c 364 drivers/watchdog/wdt_pci.c char c; c 365 drivers/watchdog/wdt_pci.c if (get_user(c, buf + i)) c 367 drivers/watchdog/wdt_pci.c if (c == 'V') c 194 drivers/xen/evtchn.c unsigned int c, p, bytes1 = 0, bytes2 = 0; c 213 drivers/xen/evtchn.c c = u->ring_cons; c 215 drivers/xen/evtchn.c if (c != p) c 230 drivers/xen/evtchn.c if (((c ^ p) & u->ring_size) != 0) { c 231 drivers/xen/evtchn.c bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) * c 235 drivers/xen/evtchn.c bytes1 = (p - c) * sizeof(evtchn_port_t); c 249 drivers/xen/evtchn.c if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) || c 533 drivers/xen/xenbus/xenbus_probe.c static unsigned int char_count(const char *str, char c) c 538 drivers/xen/xenbus/xenbus_probe.c if (str[i] == c) c 543 drivers/xen/xenbus/xenbus_probe.c static int strsep_len(const char *str, char c, unsigned int len) c 548 drivers/xen/xenbus/xenbus_probe.c if (str[i] == c) { c 18 drivers/zorro/gen-devlist.c pq(FILE *f, const char *c) c 20 drivers/zorro/gen-devlist.c while (*c) { c 21 drivers/zorro/gen-devlist.c if (*c == '"') c 24 drivers/zorro/gen-devlist.c fputc(*c, f); c 25 drivers/zorro/gen-devlist.c c++; c 32 drivers/zorro/gen-devlist.c char line[1024], *c, *bra, manuf[8]; c 47 drivers/zorro/gen-devlist.c if ((c = strchr(line, '\n'))) c 48 drivers/zorro/gen-devlist.c *c = 0; c 55 drivers/zorro/gen-devlist.c c = line + 5; c 56 drivers/zorro/gen-devlist.c while (*c == ' ') c 57 drivers/zorro/gen-devlist.c *c++ = 0; c 58 drivers/zorro/gen-devlist.c if (manuf_len + strlen(c) + 1 > MAX_NAME_SIZE) { c 60 drivers/zorro/gen-devlist.c bra = strchr(c, '['); c 61 drivers/zorro/gen-devlist.c if (bra && bra > c && bra[-1] == ' ') c 63 drivers/zorro/gen-devlist.c if (manuf_len + strlen(c) + 1 > MAX_NAME_SIZE) { c 69 drivers/zorro/gen-devlist.c pq(devf, c); c 77 drivers/zorro/gen-devlist.c c = line + 4; c 78 drivers/zorro/gen-devlist.c while (*c == ' ') c 79 drivers/zorro/gen-devlist.c *c++ = 0; c 84 drivers/zorro/gen-devlist.c manuf_len = strlen(c); c 90 drivers/zorro/gen-devlist.c pq(devf, c); c 138 fs/adfs/dir.c static unsigned char adfs_tolower(unsigned char c) c 140 fs/adfs/dir.c if (c >= 'A' && c <= 'Z') c 141 fs/adfs/dir.c c += 'a' - 'A'; c 142 fs/adfs/dir.c return c; c 145 fs/adfs/dir_fplus.c char *c = (char *)to; c 149 fs/adfs/dir_fplus.c memcpy(c, c 153 fs/adfs/dir_fplus.c memcpy(c + partial, c 321 fs/affs/namei.c char c, lc; c 353 fs/affs/namei.c while (i < maxlen && (c = *symname++)) { c 354 fs/affs/namei.c if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') { c 359 fs/affs/namei.c } else if (c == '.' && lc == '/' && *symname == '/') { c 363 fs/affs/namei.c *p++ = c; c 364 fs/affs/namei.c lc = c; c 21 fs/affs/symlink.c char c; c 39 fs/affs/symlink.c while (i < 1023 && (c = pf[i])) c 40 fs/affs/symlink.c link[i++] = c; c 49 fs/affs/symlink.c while (i < 1023 && (c = lf->symname[j])) { c 50 fs/affs/symlink.c if (c == '/' && lc == '/' && i < 1020) { /* parent dir */ c 54 fs/affs/symlink.c link[i++] = c; c 55 fs/affs/symlink.c lc = c; c 92 fs/binfmt_aout.c char c; c 95 fs/binfmt_aout.c get_user(c,p++); c 96 fs/binfmt_aout.c } while (c); c 101 fs/binfmt_aout.c char c; c 104 fs/binfmt_aout.c get_user(c,p++); c 105 fs/binfmt_aout.c } while (c); c 262 fs/binfmt_misc.c char c; c 264 fs/binfmt_misc.c while ((c = *s++) != del) { c 265 fs/binfmt_misc.c if (c == '\\' && *s == 'x') { c 18 fs/binfmt_script.c static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } c 3392 fs/btrfs/ctree.c struct extent_buffer *c; c 3406 fs/btrfs/ctree.c c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level, c 3408 fs/btrfs/ctree.c if (IS_ERR(c)) c 3409 fs/btrfs/ctree.c return PTR_ERR(c); c 3413 fs/btrfs/ctree.c btrfs_set_header_nritems(c, 1); c 3414 fs/btrfs/ctree.c btrfs_set_node_key(c, &lower_key, 0); c 3415 fs/btrfs/ctree.c btrfs_set_node_blockptr(c, 0, lower->start); c 3419 fs/btrfs/ctree.c btrfs_set_node_ptr_generation(c, 0, lower_gen); c 3421 fs/btrfs/ctree.c btrfs_mark_buffer_dirty(c); c 3424 fs/btrfs/ctree.c ret = tree_mod_log_insert_root(root->node, c, 0); c 3426 fs/btrfs/ctree.c rcu_assign_pointer(root->node, c); c 3432 fs/btrfs/ctree.c extent_buffer_get(c); c 3433 fs/btrfs/ctree.c path->nodes[level] = c; c 3499 fs/btrfs/ctree.c struct extent_buffer *c; c 3506 fs/btrfs/ctree.c c = path->nodes[level]; c 3507 fs/btrfs/ctree.c WARN_ON(btrfs_header_generation(c) != trans->transid); c 3508 fs/btrfs/ctree.c if (c == root->node) { c 3524 fs/btrfs/ctree.c c = path->nodes[level]; c 3525 fs/btrfs/ctree.c if (!ret && btrfs_header_nritems(c) < c 3532 fs/btrfs/ctree.c c_nritems = btrfs_header_nritems(c); c 3534 fs/btrfs/ctree.c btrfs_node_key(c, &disk_key, mid); c 3537 fs/btrfs/ctree.c c->start, 0); c 3542 fs/btrfs/ctree.c ASSERT(btrfs_header_level(c) == level); c 3544 fs/btrfs/ctree.c ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); c 3549 fs/btrfs/ctree.c copy_extent_buffer(split, c, c 3554 fs/btrfs/ctree.c btrfs_set_header_nritems(c, mid); c 3557 fs/btrfs/ctree.c btrfs_mark_buffer_dirty(c); c 3565 fs/btrfs/ctree.c btrfs_tree_unlock(c); c 3566 fs/btrfs/ctree.c free_extent_buffer(c); c 5274 fs/btrfs/ctree.c struct extent_buffer *c; c 5282 fs/btrfs/ctree.c c = path->nodes[level]; c 5284 fs/btrfs/ctree.c if (slot >= btrfs_header_nritems(c)) { c 5297 fs/btrfs/ctree.c slot = btrfs_header_nritems(c) - 1; c 5299 fs/btrfs/ctree.c btrfs_item_key_to_cpu(c, &cur_key, slot); c 5301 fs/btrfs/ctree.c btrfs_node_key_to_cpu(c, &cur_key, slot); c 5312 fs/btrfs/ctree.c c = path->nodes[level]; c 5320 fs/btrfs/ctree.c btrfs_item_key_to_cpu(c, key, slot); c 5322 fs/btrfs/ctree.c u64 gen = btrfs_node_ptr_generation(c, slot); c 5328 fs/btrfs/ctree.c btrfs_node_key_to_cpu(c, key, slot); c 5350 fs/btrfs/ctree.c struct extent_buffer *c; c 5420 fs/btrfs/ctree.c c = path->nodes[level]; c 5421 fs/btrfs/ctree.c if (slot >= btrfs_header_nritems(c)) { c 5435 fs/btrfs/ctree.c next = c; c 5473 fs/btrfs/ctree.c c = path->nodes[level]; c 5475 fs/btrfs/ctree.c btrfs_tree_unlock_rw(c, path->locks[level]); c 5477 fs/btrfs/ctree.c free_extent_buffer(c); c 1493 fs/btrfs/ctree.h static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, c 1496 fs/btrfs/ctree.h unsigned long offset = (unsigned long)c; c 1502 fs/btrfs/ctree.h static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) c 1504 fs/btrfs/ctree.h return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); c 1508 fs/btrfs/ctree.h struct btrfs_chunk *c, int nr) c 1510 fs/btrfs/ctree.h return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); c 1514 fs/btrfs/ctree.h struct btrfs_chunk *c, int nr) c 1516 fs/btrfs/ctree.h return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); c 109 fs/btrfs/extent_io.c #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) c 4068 fs/btrfs/ioctl.c int i, c; c 4093 fs/btrfs/ioctl.c for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { c 4094 fs/btrfs/ioctl.c if (!list_empty(&info->block_groups[c])) c 4148 fs/btrfs/ioctl.c for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { c 4149 fs/btrfs/ioctl.c if (!list_empty(&info->block_groups[c])) { c 4150 fs/btrfs/ioctl.c get_block_group_info(&info->block_groups[c], c 4442 fs/btrfs/ioctl.c const size_t c = 3 * sizeof(u64); c 4444 fs/btrfs/ioctl.c if (inodes->bytes_left >= c) { c 4445 fs/btrfs/ioctl.c inodes->bytes_left -= c; c 4451 fs/btrfs/ioctl.c inodes->bytes_missing += c - inodes->bytes_left; c 324 fs/btrfs/print-tree.c void btrfs_print_tree(struct extent_buffer *c, bool follow) c 331 fs/btrfs/print-tree.c if (!c) c 333 fs/btrfs/print-tree.c fs_info = c->fs_info; c 334 fs/btrfs/print-tree.c nr = btrfs_header_nritems(c); c 335 fs/btrfs/print-tree.c level = btrfs_header_level(c); c 337 fs/btrfs/print-tree.c btrfs_print_leaf(c); c 342 fs/btrfs/print-tree.c btrfs_header_bytenr(c), level, btrfs_header_generation(c), c 344 fs/btrfs/print-tree.c btrfs_header_owner(c)); c 345 fs/btrfs/print-tree.c print_eb_refs_lock(c); c 347 fs/btrfs/print-tree.c btrfs_node_key_to_cpu(c, &key, i); c 350 fs/btrfs/print-tree.c btrfs_node_blockptr(c, i), c 351 fs/btrfs/print-tree.c btrfs_node_ptr_generation(c, i)); c 359 fs/btrfs/print-tree.c btrfs_node_key_to_cpu(c, &first_key, i); c 360 fs/btrfs/print-tree.c next = read_tree_block(fs_info, btrfs_node_blockptr(c, i), c 361 fs/btrfs/print-tree.c btrfs_node_ptr_generation(c, i), c 10 fs/btrfs/print-tree.h void btrfs_print_tree(struct extent_buffer *c, bool follow); c 59 fs/ceph/caps.c static char *gcap_string(char *s, int c) c 61 fs/ceph/caps.c if (c & CEPH_CAP_GSHARED) c 63 fs/ceph/caps.c if (c & CEPH_CAP_GEXCL) c 65 fs/ceph/caps.c if (c & CEPH_CAP_GCACHE) c 67 fs/ceph/caps.c if (c & CEPH_CAP_GRD) c 69 fs/ceph/caps.c if (c & CEPH_CAP_GWR) c 71 fs/ceph/caps.c if (c & CEPH_CAP_GBUFFER) c 73 fs/ceph/caps.c if (c & CEPH_CAP_GWREXTEND) c 75 fs/ceph/caps.c if (c & CEPH_CAP_GLAZYIO) c 84 fs/ceph/caps.c int c; c 97 fs/ceph/caps.c c = (caps >> CEPH_CAP_SAUTH) & 3; c 98 fs/ceph/caps.c if (c) { c 100 fs/ceph/caps.c s = gcap_string(s, c); c 103 fs/ceph/caps.c c = (caps >> CEPH_CAP_SLINK) & 3; c 104 fs/ceph/caps.c if (c) { c 106 fs/ceph/caps.c s = gcap_string(s, c); c 109 fs/ceph/caps.c c = (caps >> CEPH_CAP_SXATTR) & 3; c 110 fs/ceph/caps.c if (c) { c 112 fs/ceph/caps.c s = gcap_string(s, c); c 115 fs/ceph/caps.c c = caps >> CEPH_CAP_SFILE; c 116 fs/ceph/caps.c if (c) { c 118 fs/ceph/caps.c s = gcap_string(s, c); c 121 fs/ceph/inode.c int c; c 127 fs/ceph/inode.c c = ceph_frag_compare(f, frag->frag); c 128 fs/ceph/inode.c if (c < 0) c 130 fs/ceph/inode.c else if (c > 0) c 163 fs/ceph/inode.c int c = ceph_frag_compare(f, frag->frag); c 164 fs/ceph/inode.c if (c < 0) c 166 fs/ceph/inode.c else if (c > 0) c 237 fs/ceph/super.c static int parse_fsopt_token(char *c, void *private) c 243 fs/ceph/super.c token = match_token((char *)c, fsopt_tokens, argstr); c 250 fs/ceph/super.c pr_err("bad option arg (not int) at '%s'\n", c); c 1033 fs/ceph/super.h extern const char *ceph_cap_string(int c); c 442 fs/ceph/xattr.c int c; c 449 fs/ceph/xattr.c c = strncmp(name, xattr->name, min(name_len, xattr->name_len)); c 450 fs/ceph/xattr.c if (c < 0) c 452 fs/ceph/xattr.c else if (c > 0) c 539 fs/ceph/xattr.c int c; c 545 fs/ceph/xattr.c c = strncmp(name, xattr->name, xattr->name_len); c 546 fs/ceph/xattr.c if (c == 0 && name_len > xattr->name_len) c 547 fs/ceph/xattr.c c = 1; c 548 fs/ceph/xattr.c if (c < 0) c 550 fs/ceph/xattr.c else if (c > 0) c 728 fs/cifs/cifs_debug.c char c[2] = { '\0' }; c 732 fs/cifs/cifs_debug.c rc = get_user(c[0], buffer); c 735 fs/cifs/cifs_debug.c if (strtobool(c, &bv) == 0) c 737 fs/cifs/cifs_debug.c else if ((c[0] > '1') && (c[0] <= '9')) c 738 fs/cifs/cifs_debug.c cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */ c 197 fs/cifs/dfs_cache.c char c; c 200 fs/cifs/dfs_cache.c rc = get_user(c, buffer); c 204 fs/cifs/dfs_cache.c if (c != '0') c 811 fs/cifs/dir.c wchar_t c; c 816 fs/cifs/dir.c charlen = codepage->char2uni(&q->name[i], q->len - i, &c); c 820 fs/cifs/dir.c hash = partial_name_hash(cifs_toupper(c), hash); c 50 fs/d_path.c char c = *dname++; c 51 fs/d_path.c if (!c) c 53 fs/d_path.c *p++ = c; c 235 fs/dlm/lowcomms.c static void foreach_conn(void (*conn_func)(struct connection *c)) c 1634 fs/exec.c #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) c 91 fs/ext4/balloc.c int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c; c 138 fs/ext4/balloc.c c = EXT4_B2C(sbi, itbl_blk + i - start); c 139 fs/ext4/balloc.c if ((c < num_clusters) || (c == inode_cluster) || c 140 fs/ext4/balloc.c (c == block_cluster) || (c == itbl_cluster)) c 142 fs/ext4/balloc.c if (c == num_clusters) { c 147 fs/ext4/balloc.c itbl_cluster = c; c 20 fs/ext4/hash.c __u32 a = in[0], b = in[1], c = in[2], d = in[3]; c 26 fs/ext4/hash.c b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); c 44 fs/ext4/hash.c #define ROUND(f, a, b, c, d, x, s) \ c 45 fs/ext4/hash.c (a += f(b, c, d) + x, a = rol32(a, s)) c 55 fs/ext4/hash.c __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; c 58 fs/ext4/hash.c ROUND(F, a, b, c, d, in[0] + K1, 3); c 59 fs/ext4/hash.c ROUND(F, d, a, b, c, in[1] + K1, 7); c 60 fs/ext4/hash.c ROUND(F, c, d, a, b, in[2] + K1, 11); c 61 fs/ext4/hash.c ROUND(F, b, c, d, a, in[3] + K1, 19); c 62 fs/ext4/hash.c ROUND(F, a, b, c, d, in[4] + K1, 3); c 63 fs/ext4/hash.c ROUND(F, d, a, b, c, in[5] + K1, 7); c 64 fs/ext4/hash.c ROUND(F, c, d, a, b, in[6] + K1, 11); c 65 fs/ext4/hash.c ROUND(F, b, c, d, a, in[7] + K1, 19); c 68 fs/ext4/hash.c ROUND(G, a, b, c, d, in[1] + K2, 3); c 69 fs/ext4/hash.c ROUND(G, d, a, b, c, in[3] + K2, 5); c 70 fs/ext4/hash.c ROUND(G, c, d, a, b, in[5] + K2, 9); c 71 fs/ext4/hash.c ROUND(G, b, c, d, a, in[7] + K2, 13); c 72 fs/ext4/hash.c ROUND(G, a, b, c, d, in[0] + K2, 3); c 73 fs/ext4/hash.c ROUND(G, d, a, b, c, in[2] + K2, 5); c 74 fs/ext4/hash.c ROUND(G, c, d, a, b, in[4] + K2, 9); c 75 fs/ext4/hash.c ROUND(G, b, c, d, a, in[6] + K2, 13); c 78 fs/ext4/hash.c ROUND(H, a, b, c, d, in[3] + K3, 3); c 79 fs/ext4/hash.c ROUND(H, d, a, b, c, in[7] + K3, 9); c 80 fs/ext4/hash.c ROUND(H, c, d, a, b, in[2] + K3, 11); c 81 fs/ext4/hash.c ROUND(H, b, c, d, a, in[6] + K3, 15); c 82 fs/ext4/hash.c ROUND(H, a, b, c, d, in[1] + K3, 3); c 83 fs/ext4/hash.c ROUND(H, d, a, b, c, in[5] + K3, 9); c 84 fs/ext4/hash.c ROUND(H, c, d, a, b, in[0] + K3, 11); c 85 fs/ext4/hash.c ROUND(H, b, c, d, a, in[4] + K3, 15); c 89 fs/ext4/hash.c buf[2] += c; c 456 fs/ext4/namei.c struct dx_countlimit *c; c 463 fs/ext4/namei.c c = get_dx_countlimit(inode, dirent, &count_offset); c 464 fs/ext4/namei.c if (!c) { c 468 fs/ext4/namei.c limit = le16_to_cpu(c->limit); c 469 fs/ext4/namei.c count = le16_to_cpu(c->count); c 475 fs/ext4/namei.c t = (struct dx_tail *)(((struct dx_entry *)c) + limit); c 485 fs/ext4/namei.c struct dx_countlimit *c; c 492 fs/ext4/namei.c c = get_dx_countlimit(inode, dirent, &count_offset); c 493 fs/ext4/namei.c if (!c) { c 497 fs/ext4/namei.c limit = le16_to_cpu(c->limit); c 498 fs/ext4/namei.c count = le16_to_cpu(c->count); c 504 fs/ext4/namei.c t = (struct dx_tail *)(((struct dx_entry *)c) + limit); c 3492 fs/ext4/super.c int c = EXT4_B2C(sbi, b - first_block); c 3493 fs/ext4/super.c ext4_set_bit(c, buf); c 30 fs/f2fs/hash.c __u32 a = in[0], b = in[1], c = in[2], d = in[3]; c 36 fs/f2fs/hash.c b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); c 36 fs/fat/dir.c static inline unsigned char fat_tolower(unsigned char c) c 38 fs/fat/dir.c return ((c >= 'A') && (c <= 'Z')) ? c+32 : c; c 193 fs/fat/dir.c fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni) c 197 fs/fat/dir.c charlen = t->char2uni(c, clen, uni); c 206 fs/fat/dir.c fat_short2lower_uni(struct nls_table *t, unsigned char *c, c 212 fs/fat/dir.c charlen = t->char2uni(c, clen, &wc); c 217 fs/fat/dir.c unsigned char nc = t->charset2lower[*c]; c 220 fs/fat/dir.c nc = *c; c 359 fs/fat/dir.c unsigned char c, work[MSDOS_NAME]; c 379 fs/fat/dir.c c = work[i]; c 380 fs/fat/dir.c if (!c) c 387 fs/fat/dir.c ptname[i] = nocase ? c : fat_tolower(c); c 389 fs/fat/dir.c if (c != ' ') { c 415 fs/fat/dir.c c = work[k]; c 416 fs/fat/dir.c if (!c) c 424 fs/fat/dir.c ptname[i] = nocase ? c : fat_tolower(c); c 426 fs/fat/dir.c if (c != ' ') { c 29 fs/fat/namei_msdos.c unsigned char c; c 44 fs/fat/namei_msdos.c c = 0; c 46 fs/fat/namei_msdos.c c = *name++; c 48 fs/fat/namei_msdos.c if (opts->name_check != 'r' && strchr(bad_chars, c)) c 50 fs/fat/namei_msdos.c if (opts->name_check == 's' && strchr(bad_if_strict, c)) c 52 fs/fat/namei_msdos.c if (c >= 'A' && c <= 'Z' && opts->name_check == 's') c 54 fs/fat/namei_msdos.c if (c < ' ' || c == ':' || c == '\\') c 64 fs/fat/namei_msdos.c if ((res == walk) && (c == 0xE5)) c 65 fs/fat/namei_msdos.c c = 0x05; c 66 fs/fat/namei_msdos.c if (c == '.') c 68 fs/fat/namei_msdos.c space = (c == ' '); c 69 fs/fat/namei_msdos.c *walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c; c 73 fs/fat/namei_msdos.c if (opts->name_check == 's' && len && c != '.') { c 74 fs/fat/namei_msdos.c c = *name++; c 76 fs/fat/namei_msdos.c if (c != '.') c 79 fs/fat/namei_msdos.c while (c != '.' && len--) c 80 fs/fat/namei_msdos.c c = *name++; c 81 fs/fat/namei_msdos.c if (c == '.') { c 85 fs/fat/namei_msdos.c c = *name++; c 87 fs/fat/namei_msdos.c if (opts->name_check != 'r' && strchr(bad_chars, c)) c 90 fs/fat/namei_msdos.c strchr(bad_if_strict, c)) c 92 fs/fat/namei_msdos.c if (c < ' ' || c == ':' || c == '\\') c 94 fs/fat/namei_msdos.c if (c == '.') { c 99 fs/fat/namei_msdos.c if (c >= 'A' && c <= 'Z' && opts->name_check == 's') c 101 fs/fat/namei_msdos.c space = c == ' '; c 102 fs/fat/namei_msdos.c if (!opts->nocase && c >= 'a' && c <= 'z') c 103 fs/fat/namei_msdos.c *walk++ = c - 32; c 105 fs/fat/namei_msdos.c *walk++ = c; c 331 fs/fs_parser.c int c = strcmp(tbl[i-1].name, tbl[i].name); c 333 fs/fs_parser.c if (c == 0) { c 338 fs/fs_parser.c if (c > 0) { c 2725 fs/fuse/file.c size_t in_size, out_size, c; c 2812 fs/fuse/file.c c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii); c 2813 fs/fuse/file.c if (c != PAGE_SIZE && iov_iter_count(&ii)) c 2881 fs/fuse/file.c c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii); c 2882 fs/fuse/file.c if (c != PAGE_SIZE && iov_iter_count(&ii)) c 241 fs/gfs2/lock_dlm.c static void gfs2_reverse_hex(char *c, u64 value) c 243 fs/gfs2/lock_dlm.c *c = '0'; c 245 fs/gfs2/lock_dlm.c *c-- = hex_asc[value & 0x0f]; c 19 fs/hfsplus/unicode.c static inline u16 case_fold(u16 c) c 23 fs/hfsplus/unicode.c tmp = hfsplus_case_fold_table[c >> 8]; c 25 fs/hfsplus/unicode.c tmp = hfsplus_case_fold_table[tmp + (c & 0xff)]; c 27 fs/hfsplus/unicode.c tmp = c; c 350 fs/hfsplus/unicode.c wchar_t c; c 355 fs/hfsplus/unicode.c size = asc2unichar(sb, astr, len, &c); c 358 fs/hfsplus/unicode.c dstr = decompose_unichar(c, &dsize, dhangul); c 368 fs/hfsplus/unicode.c ustr->unicode[outlen++] = cpu_to_be16(c); c 391 fs/hfsplus/unicode.c wchar_t c; c 402 fs/hfsplus/unicode.c size = asc2unichar(sb, astr, len, &c); c 407 fs/hfsplus/unicode.c dstr = decompose_unichar(c, &dsize, dhangul); c 419 fs/hfsplus/unicode.c c2 = c; c 445 fs/hfsplus/unicode.c wchar_t c; c 459 fs/hfsplus/unicode.c size = asc2unichar(sb, astr1, len1, &c); c 464 fs/hfsplus/unicode.c dstr1 = decompose_unichar(c, &dsize1, c 467 fs/hfsplus/unicode.c c1 = c; c 474 fs/hfsplus/unicode.c size = asc2unichar(sb, astr2, len2, &c); c 479 fs/hfsplus/unicode.c dstr2 = decompose_unichar(c, &dsize2, c 482 fs/hfsplus/unicode.c c2 = c; c 96 fs/hpfs/dnode.c static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c) c 99 fs/hpfs/dnode.c int n = (*p & 0x3f) + c; c 102 fs/hpfs/dnode.c __func__, (int)*p, (int)c >> 8); c 108 fs/hpfs/dnode.c static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c) c 111 fs/hpfs/dnode.c int n = (*p & 0x3f) - c; c 114 fs/hpfs/dnode.c __func__, (int)*p, (int)c >> 8); c 181 fs/hpfs/dnode.c int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); c 182 fs/hpfs/dnode.c if (!c) { c 186 fs/hpfs/dnode.c if (c < 0) break; c 396 fs/hpfs/dnode.c int c; c 405 fs/hpfs/dnode.c if (!(c = hpfs_compare_names(i->i_sb, name, namelen, de->name, de->namelen, de->last))) { c 409 fs/hpfs/dnode.c if (c < 0) { c 420 fs/hpfs/dnode.c c = 1; c 423 fs/hpfs/dnode.c c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); c 425 fs/hpfs/dnode.c return c; c 848 fs/hpfs/dnode.c unsigned c; c 886 fs/hpfs/dnode.c c = 0; c 889 fs/hpfs/dnode.c if (!(++c & 077)) hpfs_error(inode->i_sb, c 892 fs/hpfs/dnode.c *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; c 1009 fs/hpfs/dnode.c int c; c 1061 fs/hpfs/dnode.c c = hpfs_compare_names(s, name1, name1len, de->name, de->namelen, de->last); c 1062 fs/hpfs/dnode.c if (c < 0 && de->down) { c 1077 fs/hpfs/dnode.c c = hpfs_compare_names(s, name2, name2len, de->name, de->namelen, de->last); c 1078 fs/hpfs/dnode.c if (c < 0 && !de->last) goto not_found; c 12 fs/hpfs/name.c static inline int not_allowed_char(unsigned char c) c 14 fs/hpfs/name.c return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' || c 15 fs/hpfs/name.c c=='>' || c=='?' || c=='\\' || c=='|'; c 18 fs/hpfs/name.c static inline int no_dos_char(unsigned char c) c 20 fs/hpfs/name.c return c=='+' || c==',' || c==';' || c=='=' || c=='[' || c==']'; c 156 fs/hpfs/super.c unsigned c; c 158 fs/hpfs/super.c c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); c 159 fs/hpfs/super.c if (c != (unsigned)-1) c 160 fs/hpfs/super.c count += c; c 169 fs/hpfs/super.c unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap); c 170 fs/hpfs/super.c if (c == (unsigned)-1) c 172 fs/hpfs/super.c sbi->sb_n_free_dnodes = c; c 24 fs/isofs/dir.c unsigned char c = old[i]; c 25 fs/isofs/dir.c if (!c) c 28 fs/isofs/dir.c if (c >= 'A' && c <= 'Z') c 29 fs/isofs/dir.c c |= 0x20; /* lower case */ c 32 fs/isofs/dir.c if (c == '.' && i == len - 3 && old[i + 1] == ';' && old[i + 2] == '1') c 36 fs/isofs/dir.c if (c == ';' && i == len - 2 && old[i + 1] == '1') c 41 fs/isofs/dir.c if (c == ';' || c == '/') c 42 fs/isofs/dir.c c = '.'; c 44 fs/isofs/dir.c new[i] = c; c 180 fs/isofs/inode.c char c; c 192 fs/isofs/inode.c c = tolower(*name++); c 193 fs/isofs/inode.c hash = partial_name_hash(c, hash); c 27 fs/jffs2/background.c void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) c 29 fs/jffs2/background.c assert_spin_locked(&c->erase_completion_lock); c 30 fs/jffs2/background.c if (c->gc_task && jffs2_thread_should_wake(c)) c 31 fs/jffs2/background.c send_sig(SIGHUP, c->gc_task, 1); c 35 fs/jffs2/background.c int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) c 40 fs/jffs2/background.c BUG_ON(c->gc_task); c 42 fs/jffs2/background.c init_completion(&c->gc_thread_start); c 43 fs/jffs2/background.c init_completion(&c->gc_thread_exit); c 45 fs/jffs2/background.c tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); c 49 fs/jffs2/background.c complete(&c->gc_thread_exit); c 54 fs/jffs2/background.c wait_for_completion(&c->gc_thread_start); c 61 fs/jffs2/background.c void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) c 64 fs/jffs2/background.c spin_lock(&c->erase_completion_lock); c 65 fs/jffs2/background.c if (c->gc_task) { c 66 fs/jffs2/background.c jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid); c 67 fs/jffs2/background.c send_sig(SIGKILL, c->gc_task, 1); c 70 fs/jffs2/background.c spin_unlock(&c->erase_completion_lock); c 72 fs/jffs2/background.c wait_for_completion(&c->gc_thread_exit); c 77 fs/jffs2/background.c struct jffs2_sb_info *c = _c; c 85 fs/jffs2/background.c c->gc_task = current; c 86 fs/jffs2/background.c complete(&c->gc_thread_start); c 94 fs/jffs2/background.c spin_lock(&c->erase_completion_lock); c 95 fs/jffs2/background.c if (!jffs2_thread_should_wake(c)) { c 97 fs/jffs2/background.c spin_unlock(&c->erase_completion_lock); c 101 fs/jffs2/background.c spin_unlock(&c->erase_completion_lock); c 155 fs/jffs2/background.c if (jffs2_garbage_collect_pass(c) == -ENOSPC) { c 161 fs/jffs2/background.c spin_lock(&c->erase_completion_lock); c 162 fs/jffs2/background.c c->gc_task = NULL; c 163 fs/jffs2/background.c spin_unlock(&c->erase_completion_lock); c 164 fs/jffs2/background.c complete_and_exit(&c->gc_thread_exit, 0); c 27 fs/jffs2/build.c first_inode_chain(int *i, struct jffs2_sb_info *c) c 29 fs/jffs2/build.c for (; *i < c->inocache_hashsize; (*i)++) { c 30 fs/jffs2/build.c if (c->inocache_list[*i]) c 31 fs/jffs2/build.c return c->inocache_list[*i]; c 37 fs/jffs2/build.c next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) c 43 fs/jffs2/build.c return first_inode_chain(i, c); c 46 fs/jffs2/build.c #define for_each_inode(i, c, ic) \ c 47 fs/jffs2/build.c for (i = 0, ic = first_inode_chain(&i, (c)); \ c 49 fs/jffs2/build.c ic = next_inode(&i, ic, (c))) c 52 fs/jffs2/build.c static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, c 68 fs/jffs2/build.c child_ic = jffs2_get_ino_cache(c, fd->ino); c 72 fs/jffs2/build.c jffs2_mark_node_obsolete(c, fd->raw); c 99 fs/jffs2/build.c static int jffs2_build_filesystem(struct jffs2_sb_info *c) c 111 fs/jffs2/build.c c->flags |= JFFS2_SB_FLAG_SCANNING; c 112 fs/jffs2/build.c ret = jffs2_scan_medium(c); c 113 fs/jffs2/build.c c->flags &= ~JFFS2_SB_FLAG_SCANNING; c 118 fs/jffs2/build.c jffs2_dbg_dump_block_lists_nolock(c); c 121 fs/jffs2/build.c c->flags |= JFFS2_SB_FLAG_BUILDING; c 123 fs/jffs2/build.c for_each_inode(i, c, ic) { c 125 fs/jffs2/build.c jffs2_build_inode_pass1(c, ic, &dir_hardlinks); c 139 fs/jffs2/build.c for_each_inode(i, c, ic) { c 143 fs/jffs2/build.c jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); c 153 fs/jffs2/build.c ic = jffs2_get_ino_cache(c, fd->ino); c 156 fs/jffs2/build.c jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); c 170 fs/jffs2/build.c for_each_inode(i, c, ic) { c 178 fs/jffs2/build.c for_each_inode(i, c, ic) { c 214 fs/jffs2/build.c jffs2_build_xattr_subsystem(c); c 215 fs/jffs2/build.c c->flags &= ~JFFS2_SB_FLAG_BUILDING; c 220 fs/jffs2/build.c jffs2_rotate_lists(c); c 226 fs/jffs2/build.c for_each_inode(i, c, ic) { c 233 fs/jffs2/build.c jffs2_clear_xattr_subsystem(c); c 239 fs/jffs2/build.c static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, c 252 fs/jffs2/build.c jffs2_mark_node_obsolete(c, raw); c 277 fs/jffs2/build.c child_ic = jffs2_get_ino_cache(c, fd->ino); c 308 fs/jffs2/build.c static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) c 315 fs/jffs2/build.c c->resv_blocks_deletion = 2; c 322 fs/jffs2/build.c size = c->flash_size / 50; /* 2% of flash size */ c 323 fs/jffs2/build.c size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ c 324 fs/jffs2/build.c size += c->sector_size - 1; /* ... and round up */ c 326 fs/jffs2/build.c c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); c 330 fs/jffs2/build.c c->resv_blocks_gctrigger = c->resv_blocks_write + 1; c 334 fs/jffs2/build.c c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; c 338 fs/jffs2/build.c c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; c 344 fs/jffs2/build.c c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; c 345 fs/jffs2/build.c if (jffs2_can_mark_obsolete(c)) c 346 fs/jffs2/build.c c->vdirty_blocks_gctrigger *= 10; c 350 fs/jffs2/build.c c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); c 353 fs/jffs2/build.c c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); c 355 fs/jffs2/build.c c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); c 357 fs/jffs2/build.c c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); c 359 fs/jffs2/build.c c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); c 361 fs/jffs2/build.c c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); c 363 fs/jffs2/build.c c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); c 365 fs/jffs2/build.c c->nospc_dirty_size); c 367 fs/jffs2/build.c c->vdirty_blocks_gctrigger); c 370 fs/jffs2/build.c int jffs2_do_mount_fs(struct jffs2_sb_info *c) c 376 fs/jffs2/build.c c->free_size = c->flash_size; c 377 fs/jffs2/build.c c->nr_blocks = c->flash_size / c->sector_size; c 378 fs/jffs2/build.c size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; c 380 fs/jffs2/build.c if (jffs2_blocks_use_vmalloc(c)) c 381 fs/jffs2/build.c c->blocks = vzalloc(size); c 384 fs/jffs2/build.c c->blocks = kzalloc(size, GFP_KERNEL); c 385 fs/jffs2/build.c if (!c->blocks) c 388 fs/jffs2/build.c for (i=0; i<c->nr_blocks; i++) { c 389 fs/jffs2/build.c INIT_LIST_HEAD(&c->blocks[i].list); c 390 fs/jffs2/build.c c->blocks[i].offset = i * c->sector_size; c 391 fs/jffs2/build.c c->blocks[i].free_size = c->sector_size; c 394 fs/jffs2/build.c INIT_LIST_HEAD(&c->clean_list); c 395 fs/jffs2/build.c INIT_LIST_HEAD(&c->very_dirty_list); c 396 fs/jffs2/build.c INIT_LIST_HEAD(&c->dirty_list); c 397 fs/jffs2/build.c INIT_LIST_HEAD(&c->erasable_list); c 398 fs/jffs2/build.c INIT_LIST_HEAD(&c->erasing_list); c 399 fs/jffs2/build.c INIT_LIST_HEAD(&c->erase_checking_list); c 400 fs/jffs2/build.c INIT_LIST_HEAD(&c->erase_pending_list); c 401 fs/jffs2/build.c INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); c 402 fs/jffs2/build.c INIT_LIST_HEAD(&c->erase_complete_list); c 403 fs/jffs2/build.c INIT_LIST_HEAD(&c->free_list); c 404 fs/jffs2/build.c INIT_LIST_HEAD(&c->bad_list); c 405 fs/jffs2/build.c INIT_LIST_HEAD(&c->bad_used_list); c 406 fs/jffs2/build.c c->highest_ino = 1; c 407 fs/jffs2/build.c c->summary = NULL; c 409 fs/jffs2/build.c ret = jffs2_sum_init(c); c 413 fs/jffs2/build.c if (jffs2_build_filesystem(c)) { c 415 fs/jffs2/build.c jffs2_free_ino_caches(c); c 416 fs/jffs2/build.c jffs2_free_raw_node_refs(c); c 421 fs/jffs2/build.c jffs2_calc_trigger_levels(c); c 426 fs/jffs2/build.c kvfree(c->blocks); c 148 fs/jffs2/compr.c uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 159 fs/jffs2/compr.c if (c->mount_opts.override_compr) c 160 fs/jffs2/compr.c mode = c->mount_opts.compr; c 253 fs/jffs2/compr.c int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 73 fs/jffs2/compr.h uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 77 fs/jffs2/compr.h int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 90 fs/jffs2/compr_rubin.c int c; c 97 fs/jffs2/compr_rubin.c for (c=0; c<8; c++) c 98 fs/jffs2/compr_rubin.c rs->bits[c] = bits[c]; c 168 fs/jffs2/compr_rubin.c int c, bits = 0; c 192 fs/jffs2/compr_rubin.c c = pullbit(&rs->pp); c 195 fs/jffs2/compr_rubin.c rec_q += c; c 398 fs/jffs2/compr_rubin.c int c; c 400 fs/jffs2/compr_rubin.c for (c=0; c<8; c++) c 401 fs/jffs2/compr_rubin.c bits[c] = data_in[c]; c 28 fs/jffs2/debug.c __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, c 33 fs/jffs2/debug.c jeb->unchecked_size != c->sector_size)) { c 37 fs/jffs2/debug.c jeb->wasted_size, jeb->unchecked_size, c->sector_size); c 41 fs/jffs2/debug.c if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size c 42 fs/jffs2/debug.c + c->wasted_size + c->unchecked_size != c->flash_size)) { c 45 fs/jffs2/debug.c c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c 46 fs/jffs2/debug.c c->wasted_size, c->unchecked_size, c->flash_size); c 52 fs/jffs2/debug.c __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, c 55 fs/jffs2/debug.c spin_lock(&c->erase_completion_lock); c 56 fs/jffs2/debug.c jffs2_dbg_acct_sanity_check_nolock(c, jeb); c 57 fs/jffs2/debug.c spin_unlock(&c->erase_completion_lock); c 125 fs/jffs2/debug.c __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, c 136 fs/jffs2/debug.c ret = jffs2_flash_read(c, ofs, len, &retlen, buf); c 160 fs/jffs2/debug.c void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) c 168 fs/jffs2/debug.c if (c->gcblock) { c 170 fs/jffs2/debug.c free += c->gcblock->free_size; c 171 fs/jffs2/debug.c dirty += c->gcblock->dirty_size; c 172 fs/jffs2/debug.c used += c->gcblock->used_size; c 173 fs/jffs2/debug.c wasted += c->gcblock->wasted_size; c 174 fs/jffs2/debug.c unchecked += c->gcblock->unchecked_size; c 176 fs/jffs2/debug.c if (c->nextblock) { c 178 fs/jffs2/debug.c free += c->nextblock->free_size; c 179 fs/jffs2/debug.c dirty += c->nextblock->dirty_size; c 180 fs/jffs2/debug.c used += c->nextblock->used_size; c 181 fs/jffs2/debug.c wasted += c->nextblock->wasted_size; c 182 fs/jffs2/debug.c unchecked += c->nextblock->unchecked_size; c 184 fs/jffs2/debug.c list_for_each_entry(jeb, &c->clean_list, list) { c 192 fs/jffs2/debug.c list_for_each_entry(jeb, &c->very_dirty_list, list) { c 200 fs/jffs2/debug.c list_for_each_entry(jeb, &c->dirty_list, list) { c 208 fs/jffs2/debug.c list_for_each_entry(jeb, &c->erasable_list, list) { c 216 fs/jffs2/debug.c list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) { c 224 fs/jffs2/debug.c list_for_each_entry(jeb, &c->erase_pending_list, list) { c 232 fs/jffs2/debug.c list_for_each_entry(jeb, &c->free_list, list) { c 240 fs/jffs2/debug.c list_for_each_entry(jeb, &c->bad_used_list, list) { c 249 fs/jffs2/debug.c list_for_each_entry(jeb, &c->erasing_list, list) { c 251 fs/jffs2/debug.c erasing += c->sector_size; c 253 fs/jffs2/debug.c list_for_each_entry(jeb, &c->erase_checking_list, list) { c 255 fs/jffs2/debug.c erasing += c->sector_size; c 257 fs/jffs2/debug.c list_for_each_entry(jeb, &c->erase_complete_list, list) { c 259 fs/jffs2/debug.c erasing += c->sector_size; c 261 fs/jffs2/debug.c list_for_each_entry(jeb, &c->bad_list, list) { c 263 fs/jffs2/debug.c bad += c->sector_size; c 268 fs/jffs2/debug.c if (sz != c->sz##_size) { \ c 270 fs/jffs2/debug.c #sz, sz, #sz, c->sz##_size); \ c 285 fs/jffs2/debug.c if (nr_counted != c->nr_blocks) { c 287 fs/jffs2/debug.c __func__, nr_counted, c->nr_blocks); c 292 fs/jffs2/debug.c __jffs2_dbg_dump_block_lists_nolock(c); c 301 fs/jffs2/debug.c __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, c 304 fs/jffs2/debug.c spin_lock(&c->erase_completion_lock); c 305 fs/jffs2/debug.c __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 306 fs/jffs2/debug.c spin_unlock(&c->erase_completion_lock); c 310 fs/jffs2/debug.c __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, c 319 fs/jffs2/debug.c uint32_t totlen = ref_totlen(c, jeb, ref2); c 322 fs/jffs2/debug.c ref_offset(ref2) > jeb->offset + c->sector_size) { c 365 fs/jffs2/debug.c && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { c 368 fs/jffs2/debug.c c->sector_size); c 373 fs/jffs2/debug.c if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING))) c 374 fs/jffs2/debug.c __jffs2_dbg_superblock_counts(c); c 379 fs/jffs2/debug.c __jffs2_dbg_dump_node_refs_nolock(c, jeb); c 381 fs/jffs2/debug.c __jffs2_dbg_dump_block_lists_nolock(c); c 392 fs/jffs2/debug.c __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, c 395 fs/jffs2/debug.c spin_lock(&c->erase_completion_lock); c 396 fs/jffs2/debug.c __jffs2_dbg_dump_node_refs_nolock(c, jeb); c 397 fs/jffs2/debug.c spin_unlock(&c->erase_completion_lock); c 401 fs/jffs2/debug.c __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, c 435 fs/jffs2/debug.c __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) c 437 fs/jffs2/debug.c spin_lock(&c->erase_completion_lock); c 439 fs/jffs2/debug.c spin_unlock(&c->erase_completion_lock); c 459 fs/jffs2/debug.c __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) c 461 fs/jffs2/debug.c spin_lock(&c->erase_completion_lock); c 462 fs/jffs2/debug.c __jffs2_dbg_dump_block_lists_nolock(c); c 463 fs/jffs2/debug.c spin_unlock(&c->erase_completion_lock); c 467 fs/jffs2/debug.c __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) c 471 fs/jffs2/debug.c printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); c 472 fs/jffs2/debug.c printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); c 473 fs/jffs2/debug.c printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); c 474 fs/jffs2/debug.c printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); c 475 fs/jffs2/debug.c printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); c 476 fs/jffs2/debug.c printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); c 477 fs/jffs2/debug.c printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); c 478 fs/jffs2/debug.c printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); c 479 fs/jffs2/debug.c printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); c 481 fs/jffs2/debug.c c->sector_size * c->resv_blocks_write); c 483 fs/jffs2/debug.c if (c->nextblock) c 485 fs/jffs2/debug.c c->nextblock->offset, c->nextblock->used_size, c 486 fs/jffs2/debug.c c->nextblock->dirty_size, c->nextblock->wasted_size, c 487 fs/jffs2/debug.c c->nextblock->unchecked_size, c->nextblock->free_size); c 491 fs/jffs2/debug.c if (c->gcblock) c 493 fs/jffs2/debug.c c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c 494 fs/jffs2/debug.c c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); c 498 fs/jffs2/debug.c if (list_empty(&c->clean_list)) { c 505 fs/jffs2/debug.c list_for_each(this, &c->clean_list) { c 520 fs/jffs2/debug.c if (list_empty(&c->very_dirty_list)) { c 527 fs/jffs2/debug.c list_for_each(this, &c->very_dirty_list) { c 543 fs/jffs2/debug.c if (list_empty(&c->dirty_list)) { c 550 fs/jffs2/debug.c list_for_each(this, &c->dirty_list) { c 566 fs/jffs2/debug.c if (list_empty(&c->erasable_list)) { c 571 fs/jffs2/debug.c list_for_each(this, &c->erasable_list) { c 582 fs/jffs2/debug.c if (list_empty(&c->erasing_list)) { c 587 fs/jffs2/debug.c list_for_each(this, &c->erasing_list) { c 597 fs/jffs2/debug.c if (list_empty(&c->erase_checking_list)) { c 602 fs/jffs2/debug.c list_for_each(this, &c->erase_checking_list) { c 613 fs/jffs2/debug.c if (list_empty(&c->erase_pending_list)) { c 618 fs/jffs2/debug.c list_for_each(this, &c->erase_pending_list) { c 629 fs/jffs2/debug.c if (list_empty(&c->erasable_pending_wbuf_list)) { c 634 fs/jffs2/debug.c list_for_each(this, &c->erasable_pending_wbuf_list) { c 645 fs/jffs2/debug.c if (list_empty(&c->free_list)) { c 650 fs/jffs2/debug.c list_for_each(this, &c->free_list) { c 661 fs/jffs2/debug.c if (list_empty(&c->bad_list)) { c 666 fs/jffs2/debug.c list_for_each(this, &c->bad_list) { c 677 fs/jffs2/debug.c if (list_empty(&c->bad_used_list)) { c 682 fs/jffs2/debug.c list_for_each(this, &c->bad_used_list) { c 773 fs/jffs2/debug.c __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) c 783 fs/jffs2/debug.c ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); c 173 fs/jffs2/debug.h __jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, c 176 fs/jffs2/debug.h __jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, c 185 fs/jffs2/debug.h __jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, c 188 fs/jffs2/debug.h __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, c 191 fs/jffs2/debug.h __jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, c 196 fs/jffs2/debug.h __jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 200 fs/jffs2/debug.h __jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c); c 202 fs/jffs2/debug.h __jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c); c 204 fs/jffs2/debug.h __jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, c 207 fs/jffs2/debug.h __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, c 216 fs/jffs2/debug.h __jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs); c 223 fs/jffs2/debug.h #define jffs2_dbg_acct_paranoia_check(c, jeb) \ c 224 fs/jffs2/debug.h __jffs2_dbg_acct_paranoia_check(c,jeb) c 225 fs/jffs2/debug.h #define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) \ c 226 fs/jffs2/debug.h __jffs2_dbg_acct_paranoia_check_nolock(c,jeb) c 227 fs/jffs2/debug.h #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \ c 228 fs/jffs2/debug.h __jffs2_dbg_prewrite_paranoia_check(c, ofs, len) c 232 fs/jffs2/debug.h #define jffs2_dbg_acct_paranoia_check(c, jeb) c 233 fs/jffs2/debug.h #define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) c 234 fs/jffs2/debug.h #define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) c 238 fs/jffs2/debug.h #define jffs2_dbg_dump_jeb(c, jeb) \ c 239 fs/jffs2/debug.h __jffs2_dbg_dump_jeb(c, jeb); c 242 fs/jffs2/debug.h #define jffs2_dbg_dump_block_lists(c) \ c 243 fs/jffs2/debug.h __jffs2_dbg_dump_block_lists(c) c 244 fs/jffs2/debug.h #define jffs2_dbg_dump_block_lists_nolock(c) \ c 245 fs/jffs2/debug.h __jffs2_dbg_dump_block_lists_nolock(c) c 252 fs/jffs2/debug.h #define jffs2_dbg_dump_node(c, ofs) \ c 253 fs/jffs2/debug.h __jffs2_dbg_dump_node(c, ofs); c 255 fs/jffs2/debug.h #define jffs2_dbg_dump_jeb(c, jeb) c 257 fs/jffs2/debug.h #define jffs2_dbg_dump_block_lists(c) c 258 fs/jffs2/debug.h #define jffs2_dbg_dump_block_lists_nolock(c) c 262 fs/jffs2/debug.h #define jffs2_dbg_dump_node(c, ofs) c 266 fs/jffs2/debug.h #define jffs2_dbg_acct_sanity_check(c, jeb) \ c 267 fs/jffs2/debug.h __jffs2_dbg_acct_sanity_check(c, jeb) c 268 fs/jffs2/debug.h #define jffs2_dbg_acct_sanity_check_nolock(c, jeb) \ c 269 fs/jffs2/debug.h __jffs2_dbg_acct_sanity_check_nolock(c, jeb) c 271 fs/jffs2/debug.h #define jffs2_dbg_acct_sanity_check(c, jeb) c 272 fs/jffs2/debug.h #define jffs2_dbg_acct_sanity_check_nolock(c, jeb) c 165 fs/jffs2/dir.c struct jffs2_sb_info *c; c 173 fs/jffs2/dir.c c = JFFS2_SB_INFO(dir_i->i_sb); c 200 fs/jffs2/dir.c ret = jffs2_do_create(c, dir_f, f, ri, &dentry->d_name); c 226 fs/jffs2/dir.c struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); c 232 fs/jffs2/dir.c ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, c 245 fs/jffs2/dir.c struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_sb); c 264 fs/jffs2/dir.c ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now); c 282 fs/jffs2/dir.c struct jffs2_sb_info *c; c 302 fs/jffs2/dir.c c = JFFS2_SB_INFO(dir_i->i_sb); c 308 fs/jffs2/dir.c ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen, c 320 fs/jffs2/dir.c jffs2_complete_reservation(c); c 337 fs/jffs2/dir.c fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL); c 344 fs/jffs2/dir.c jffs2_complete_reservation(c); c 354 fs/jffs2/dir.c jffs2_complete_reservation(c); c 369 fs/jffs2/dir.c jffs2_complete_reservation(c); c 379 fs/jffs2/dir.c ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, c 387 fs/jffs2/dir.c jffs2_complete_reservation(c); c 409 fs/jffs2/dir.c fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); c 414 fs/jffs2/dir.c jffs2_complete_reservation(c); c 427 fs/jffs2/dir.c jffs2_add_fd_to_list(c, fd, &dir_f->dents); c 430 fs/jffs2/dir.c jffs2_complete_reservation(c); c 444 fs/jffs2/dir.c struct jffs2_sb_info *c; c 460 fs/jffs2/dir.c c = JFFS2_SB_INFO(dir_i->i_sb); c 466 fs/jffs2/dir.c ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, c 478 fs/jffs2/dir.c jffs2_complete_reservation(c); c 495 fs/jffs2/dir.c fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); c 502 fs/jffs2/dir.c jffs2_complete_reservation(c); c 512 fs/jffs2/dir.c jffs2_complete_reservation(c); c 522 fs/jffs2/dir.c ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, c 530 fs/jffs2/dir.c jffs2_complete_reservation(c); c 552 fs/jffs2/dir.c fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); c 557 fs/jffs2/dir.c jffs2_complete_reservation(c); c 571 fs/jffs2/dir.c jffs2_add_fd_to_list(c, fd, &dir_f->dents); c 574 fs/jffs2/dir.c jffs2_complete_reservation(c); c 586 fs/jffs2/dir.c struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); c 598 fs/jffs2/dir.c ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, c 611 fs/jffs2/dir.c struct jffs2_sb_info *c; c 627 fs/jffs2/dir.c c = JFFS2_SB_INFO(dir_i->i_sb); c 636 fs/jffs2/dir.c ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &alloclen, c 648 fs/jffs2/dir.c jffs2_complete_reservation(c); c 664 fs/jffs2/dir.c fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL); c 671 fs/jffs2/dir.c jffs2_complete_reservation(c); c 681 fs/jffs2/dir.c jffs2_complete_reservation(c); c 691 fs/jffs2/dir.c ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, c 699 fs/jffs2/dir.c jffs2_complete_reservation(c); c 724 fs/jffs2/dir.c fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); c 729 fs/jffs2/dir.c jffs2_complete_reservation(c); c 742 fs/jffs2/dir.c jffs2_add_fd_to_list(c, fd, &dir_f->dents); c 745 fs/jffs2/dir.c jffs2_complete_reservation(c); c 760 fs/jffs2/dir.c struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); c 801 fs/jffs2/dir.c ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), c 832 fs/jffs2/dir.c ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), c 24 fs/jffs2/erase.c static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); c 25 fs/jffs2/erase.c static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 26 fs/jffs2/erase.c static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 28 fs/jffs2/erase.c static void jffs2_erase_block(struct jffs2_sb_info *c, c 34 fs/jffs2/erase.c ret = jffs2_flash_erase(c, jeb); c 36 fs/jffs2/erase.c jffs2_erase_succeeded(c, jeb); c 45 fs/jffs2/erase.c jeb->offset, jeb->offset, jeb->offset + c->sector_size); c 49 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 50 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 51 fs/jffs2/erase.c list_move(&jeb->list, &c->erase_pending_list); c 52 fs/jffs2/erase.c c->erasing_size -= c->sector_size; c 53 fs/jffs2/erase.c c->dirty_size += c->sector_size; c 54 fs/jffs2/erase.c jeb->dirty_size = c->sector_size; c 55 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 56 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 63 fs/jffs2/erase.c instr->len = c->sector_size; c 65 fs/jffs2/erase.c ret = mtd_erase(c->mtd, instr); c 67 fs/jffs2/erase.c jffs2_erase_succeeded(c, jeb); c 80 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 81 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 82 fs/jffs2/erase.c list_move(&jeb->list, &c->erase_pending_list); c 83 fs/jffs2/erase.c c->erasing_size -= c->sector_size; c 84 fs/jffs2/erase.c c->dirty_size += c->sector_size; c 85 fs/jffs2/erase.c jeb->dirty_size = c->sector_size; c 86 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 87 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 98 fs/jffs2/erase.c jffs2_erase_failed(c, jeb, bad_offset); c 101 fs/jffs2/erase.c int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) c 106 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 108 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 110 fs/jffs2/erase.c while (!list_empty(&c->erase_complete_list) || c 111 fs/jffs2/erase.c !list_empty(&c->erase_pending_list)) { c 113 fs/jffs2/erase.c if (!list_empty(&c->erase_complete_list)) { c 114 fs/jffs2/erase.c jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); c 115 fs/jffs2/erase.c list_move(&jeb->list, &c->erase_checking_list); c 116 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 117 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 118 fs/jffs2/erase.c jffs2_mark_erased_block(c, jeb); c 126 fs/jffs2/erase.c } else if (!list_empty(&c->erase_pending_list)) { c 127 fs/jffs2/erase.c jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); c 131 fs/jffs2/erase.c c->erasing_size += c->sector_size; c 132 fs/jffs2/erase.c c->wasted_size -= jeb->wasted_size; c 133 fs/jffs2/erase.c c->free_size -= jeb->free_size; c 134 fs/jffs2/erase.c c->used_size -= jeb->used_size; c 135 fs/jffs2/erase.c c->dirty_size -= jeb->dirty_size; c 137 fs/jffs2/erase.c jffs2_free_jeb_node_refs(c, jeb); c 138 fs/jffs2/erase.c list_add(&jeb->list, &c->erasing_list); c 139 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 140 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 142 fs/jffs2/erase.c jffs2_erase_block(c, jeb); c 150 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 151 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 154 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 155 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 161 fs/jffs2/erase.c static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) c 164 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 165 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 166 fs/jffs2/erase.c list_move_tail(&jeb->list, &c->erase_complete_list); c 168 fs/jffs2/erase.c jffs2_garbage_collect_trigger(c); c 169 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 170 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 171 fs/jffs2/erase.c wake_up(&c->erase_wait); c 174 fs/jffs2/erase.c static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) c 178 fs/jffs2/erase.c if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) { c 181 fs/jffs2/erase.c if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { c 183 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 184 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 185 fs/jffs2/erase.c list_move(&jeb->list, &c->erase_pending_list); c 186 fs/jffs2/erase.c c->erasing_size -= c->sector_size; c 187 fs/jffs2/erase.c c->dirty_size += c->sector_size; c 188 fs/jffs2/erase.c jeb->dirty_size = c->sector_size; c 189 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 190 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 195 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 196 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 197 fs/jffs2/erase.c c->erasing_size -= c->sector_size; c 198 fs/jffs2/erase.c c->bad_size += c->sector_size; c 199 fs/jffs2/erase.c list_move(&jeb->list, &c->bad_list); c 200 fs/jffs2/erase.c c->nr_erasing_blocks--; c 201 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 202 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 203 fs/jffs2/erase.c wake_up(&c->erase_wait); c 208 fs/jffs2/erase.c static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, c 252 fs/jffs2/erase.c jeb->offset, jeb->offset + c->sector_size, ic->ino); c 277 fs/jffs2/erase.c jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); c 280 fs/jffs2/erase.c jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); c 285 fs/jffs2/erase.c jffs2_del_ino_cache(c, ic); c 289 fs/jffs2/erase.c void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) c 305 fs/jffs2/erase.c jffs2_remove_node_refs_from_ino_list(c, ref, jeb); c 313 fs/jffs2/erase.c static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) c 321 fs/jffs2/erase.c ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, c 328 fs/jffs2/erase.c if (retlen < c->sector_size) { c 332 fs/jffs2/erase.c mtd_unpoint(c->mtd, jeb->offset, retlen); c 341 fs/jffs2/erase.c mtd_unpoint(c->mtd, jeb->offset, c->sector_size); c 346 fs/jffs2/erase.c c->sector_size-retlen * sizeof(*wordebuf)); c 361 fs/jffs2/erase.c for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { c 362 fs/jffs2/erase.c uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); c 367 fs/jffs2/erase.c ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); c 400 fs/jffs2/erase.c static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) c 406 fs/jffs2/erase.c switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { c 416 fs/jffs2/erase.c if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { c 418 fs/jffs2/erase.c if (jffs2_cleanmarker_oob(c)) { c 419 fs/jffs2/erase.c if (jffs2_write_nand_cleanmarker(c, jeb)) c 428 fs/jffs2/erase.c .totlen = cpu_to_je32(c->cleanmarker_size) c 431 fs/jffs2/erase.c jffs2_prealloc_raw_node_refs(c, jeb, 1); c 437 fs/jffs2/erase.c ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); c 451 fs/jffs2/erase.c jeb->free_size = c->sector_size; c 453 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 454 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 456 fs/jffs2/erase.c c->erasing_size -= c->sector_size; c 457 fs/jffs2/erase.c c->free_size += c->sector_size; c 460 fs/jffs2/erase.c if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c)) c 461 fs/jffs2/erase.c jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); c 463 fs/jffs2/erase.c list_move_tail(&jeb->list, &c->free_list); c 464 fs/jffs2/erase.c c->nr_erasing_blocks--; c 465 fs/jffs2/erase.c c->nr_free_blocks++; c 467 fs/jffs2/erase.c jffs2_dbg_acct_sanity_check_nolock(c, jeb); c 468 fs/jffs2/erase.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 470 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 471 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 472 fs/jffs2/erase.c wake_up(&c->erase_wait); c 476 fs/jffs2/erase.c jffs2_erase_failed(c, jeb, bad_offset); c 481 fs/jffs2/erase.c mutex_lock(&c->erase_free_sem); c 482 fs/jffs2/erase.c spin_lock(&c->erase_completion_lock); c 483 fs/jffs2/erase.c jffs2_garbage_collect_trigger(c); c 484 fs/jffs2/erase.c list_move(&jeb->list, &c->erase_complete_list); c 485 fs/jffs2/erase.c spin_unlock(&c->erase_completion_lock); c 486 fs/jffs2/erase.c mutex_unlock(&c->erase_free_sem); c 35 fs/jffs2/file.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 44 fs/jffs2/file.c jffs2_flush_wbuf_gc(c, inode->i_ino); c 82 fs/jffs2/file.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 94 fs/jffs2/file.c ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT, c 151 fs/jffs2/file.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 159 fs/jffs2/file.c ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, c 186 fs/jffs2/file.c fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL); c 190 fs/jffs2/file.c jffs2_complete_reservation(c); c 194 fs/jffs2/file.c ret = jffs2_add_full_dnode_to_inode(c, f, fn); c 196 fs/jffs2/file.c jffs2_mark_node_obsolete(c, f->metadata->raw); c 203 fs/jffs2/file.c jffs2_mark_node_obsolete(c, fn->raw); c 205 fs/jffs2/file.c jffs2_complete_reservation(c); c 209 fs/jffs2/file.c jffs2_complete_reservation(c); c 244 fs/jffs2/file.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 292 fs/jffs2/file.c ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, c 30 fs/jffs2/fs.c static int jffs2_flash_setup(struct jffs2_sb_info *c); c 36 fs/jffs2/fs.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 67 fs/jffs2/fs.c ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); c 85 fs/jffs2/fs.c ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, c 139 fs/jffs2/fs.c new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type); c 144 fs/jffs2/fs.c jffs2_complete_reservation(c); c 161 fs/jffs2/fs.c jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); c 164 fs/jffs2/fs.c jffs2_add_full_dnode_to_inode(c, f, new_metadata); c 172 fs/jffs2/fs.c jffs2_mark_node_obsolete(c, old_metadata->raw); c 178 fs/jffs2/fs.c jffs2_complete_reservation(c); c 211 fs/jffs2/fs.c struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb); c 216 fs/jffs2/fs.c buf->f_blocks = c->flash_size >> PAGE_SHIFT; c 221 fs/jffs2/fs.c buf->f_fsid.val[1] = c->mtd->index; c 223 fs/jffs2/fs.c spin_lock(&c->erase_completion_lock); c 224 fs/jffs2/fs.c avail = c->dirty_size + c->free_size; c 225 fs/jffs2/fs.c if (avail > c->sector_size * c->resv_blocks_write) c 226 fs/jffs2/fs.c avail -= c->sector_size * c->resv_blocks_write; c 229 fs/jffs2/fs.c spin_unlock(&c->erase_completion_lock); c 242 fs/jffs2/fs.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 249 fs/jffs2/fs.c jffs2_do_clear_inode(c, f); c 255 fs/jffs2/fs.c struct jffs2_sb_info *c; c 271 fs/jffs2/fs.c c = JFFS2_SB_INFO(inode->i_sb); c 276 fs/jffs2/fs.c ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); c 333 fs/jffs2/fs.c ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); c 397 fs/jffs2/fs.c struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); c 399 fs/jffs2/fs.c if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb)) c 407 fs/jffs2/fs.c jffs2_stop_garbage_collect_thread(c); c 408 fs/jffs2/fs.c mutex_lock(&c->alloc_sem); c 409 fs/jffs2/fs.c jffs2_flush_wbuf_pad(c); c 410 fs/jffs2/fs.c mutex_unlock(&c->alloc_sem); c 414 fs/jffs2/fs.c jffs2_start_garbage_collect_thread(c); c 426 fs/jffs2/fs.c struct jffs2_sb_info *c; c 433 fs/jffs2/fs.c c = JFFS2_SB_INFO(sb); c 465 fs/jffs2/fs.c ret = jffs2_do_new_inode (c, f, mode, ri); c 515 fs/jffs2/fs.c struct jffs2_sb_info *c; c 520 fs/jffs2/fs.c c = JFFS2_SB_INFO(sb); c 523 fs/jffs2/fs.c if (c->mtd->type == MTD_MLCNANDFLASH) c 527 fs/jffs2/fs.c if (c->mtd->type == MTD_NANDFLASH) { c 531 fs/jffs2/fs.c if (c->mtd->type == MTD_DATAFLASH) { c 537 fs/jffs2/fs.c c->flash_size = c->mtd->size; c 538 fs/jffs2/fs.c c->sector_size = c->mtd->erasesize; c 539 fs/jffs2/fs.c blocks = c->flash_size / c->sector_size; c 544 fs/jffs2/fs.c if ((c->sector_size * blocks) != c->flash_size) { c 545 fs/jffs2/fs.c c->flash_size = c->sector_size * blocks; c 547 fs/jffs2/fs.c c->flash_size / 1024); c 550 fs/jffs2/fs.c if (c->flash_size < 5*c->sector_size) { c 552 fs/jffs2/fs.c c->flash_size / c->sector_size); c 556 fs/jffs2/fs.c c->cleanmarker_size = sizeof(struct jffs2_unknown_node); c 559 fs/jffs2/fs.c ret = jffs2_flash_setup(c); c 563 fs/jffs2/fs.c c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size); c 564 fs/jffs2/fs.c c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL); c 565 fs/jffs2/fs.c if (!c->inocache_list) { c 570 fs/jffs2/fs.c jffs2_init_xattr_subsystem(c); c 572 fs/jffs2/fs.c if ((ret = jffs2_do_mount_fs(c))) c 598 fs/jffs2/fs.c jffs2_start_garbage_collect_thread(c); c 602 fs/jffs2/fs.c jffs2_free_ino_caches(c); c 603 fs/jffs2/fs.c jffs2_free_raw_node_refs(c); c 604 fs/jffs2/fs.c kvfree(c->blocks); c 606 fs/jffs2/fs.c jffs2_clear_xattr_subsystem(c); c 607 fs/jffs2/fs.c kfree(c->inocache_list); c 609 fs/jffs2/fs.c jffs2_flash_cleanup(c); c 614 fs/jffs2/fs.c void jffs2_gc_release_inode(struct jffs2_sb_info *c, c 620 fs/jffs2/fs.c struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, c 641 fs/jffs2/fs.c inode = ilookup(OFNI_BS_2SFFJ(c), inum); c 646 fs/jffs2/fs.c spin_lock(&c->inocache_lock); c 647 fs/jffs2/fs.c ic = jffs2_get_ino_cache(c, inum); c 651 fs/jffs2/fs.c spin_unlock(&c->inocache_lock); c 658 fs/jffs2/fs.c sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); c 660 fs/jffs2/fs.c spin_unlock(&c->inocache_lock); c 670 fs/jffs2/fs.c inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum); c 685 fs/jffs2/fs.c static int jffs2_flash_setup(struct jffs2_sb_info *c) { c 688 fs/jffs2/fs.c if (jffs2_cleanmarker_oob(c)) { c 690 fs/jffs2/fs.c ret = jffs2_nand_flash_setup(c); c 696 fs/jffs2/fs.c if (jffs2_dataflash(c)) { c 697 fs/jffs2/fs.c ret = jffs2_dataflash_setup(c); c 703 fs/jffs2/fs.c if (jffs2_nor_wbuf_flash(c)) { c 704 fs/jffs2/fs.c ret = jffs2_nor_wbuf_flash_setup(c); c 710 fs/jffs2/fs.c if (jffs2_ubivol(c)) { c 711 fs/jffs2/fs.c ret = jffs2_ubivol_setup(c); c 719 fs/jffs2/fs.c void jffs2_flash_cleanup(struct jffs2_sb_info *c) { c 721 fs/jffs2/fs.c if (jffs2_cleanmarker_oob(c)) { c 722 fs/jffs2/fs.c jffs2_nand_flash_cleanup(c); c 726 fs/jffs2/fs.c if (jffs2_dataflash(c)) { c 727 fs/jffs2/fs.c jffs2_dataflash_cleanup(c); c 731 fs/jffs2/fs.c if (jffs2_nor_wbuf_flash(c)) { c 732 fs/jffs2/fs.c jffs2_nor_wbuf_flash_cleanup(c); c 736 fs/jffs2/fs.c if (jffs2_ubivol(c)) { c 737 fs/jffs2/fs.c jffs2_ubivol_cleanup(c); c 25 fs/jffs2/gc.c static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, c 28 fs/jffs2/gc.c static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 30 fs/jffs2/gc.c static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 32 fs/jffs2/gc.c static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 34 fs/jffs2/gc.c static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 37 fs/jffs2/gc.c static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 40 fs/jffs2/gc.c static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 44 fs/jffs2/gc.c static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c) c 55 fs/jffs2/gc.c if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { c 57 fs/jffs2/gc.c nextlist = &c->bad_used_list; c 58 fs/jffs2/gc.c } else if (n < 50 && !list_empty(&c->erasable_list)) { c 62 fs/jffs2/gc.c nextlist = &c->erasable_list; c 63 fs/jffs2/gc.c } else if (n < 110 && !list_empty(&c->very_dirty_list)) { c 66 fs/jffs2/gc.c nextlist = &c->very_dirty_list; c 67 fs/jffs2/gc.c } else if (n < 126 && !list_empty(&c->dirty_list)) { c 69 fs/jffs2/gc.c nextlist = &c->dirty_list; c 70 fs/jffs2/gc.c } else if (!list_empty(&c->clean_list)) { c 72 fs/jffs2/gc.c nextlist = &c->clean_list; c 73 fs/jffs2/gc.c } else if (!list_empty(&c->dirty_list)) { c 76 fs/jffs2/gc.c nextlist = &c->dirty_list; c 77 fs/jffs2/gc.c } else if (!list_empty(&c->very_dirty_list)) { c 79 fs/jffs2/gc.c nextlist = &c->very_dirty_list; c 80 fs/jffs2/gc.c } else if (!list_empty(&c->erasable_list)) { c 83 fs/jffs2/gc.c nextlist = &c->erasable_list; c 84 fs/jffs2/gc.c } else if (!list_empty(&c->erasable_pending_wbuf_list)) { c 87 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 88 fs/jffs2/gc.c jffs2_flush_wbuf_pad(c); c 89 fs/jffs2/gc.c spin_lock(&c->erase_completion_lock); c 99 fs/jffs2/gc.c c->gcblock = ret; c 112 fs/jffs2/gc.c c->wasted_size -= ret->wasted_size; c 113 fs/jffs2/gc.c c->dirty_size += ret->wasted_size; c 124 fs/jffs2/gc.c int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) c 134 fs/jffs2/gc.c if (mutex_lock_interruptible(&c->alloc_sem)) c 143 fs/jffs2/gc.c spin_lock(&c->erase_completion_lock); c 144 fs/jffs2/gc.c if (!c->unchecked_size) c 146 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 149 fs/jffs2/gc.c xattr = jffs2_verify_xattr(c); c 151 fs/jffs2/gc.c spin_lock(&c->inocache_lock); c 157 fs/jffs2/gc.c want_ino = c->check_ino; c 158 fs/jffs2/gc.c for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) { c 159 fs/jffs2/gc.c for (ic = c->inocache_list[bucket]; ic; ic = ic->next) { c 174 fs/jffs2/gc.c c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) & c 175 fs/jffs2/gc.c ~c->inocache_hashsize) - 1; c 177 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 180 fs/jffs2/gc.c c->unchecked_size); c 181 fs/jffs2/gc.c jffs2_dbg_dump_block_lists_nolock(c); c 182 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 189 fs/jffs2/gc.c c->check_ino = ic->ino + c->inocache_hashsize; c 194 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 195 fs/jffs2/gc.c jffs2_xattr_delete_inode(c, ic); c 201 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 208 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 219 fs/jffs2/gc.c c->check_ino = ic->ino; c 221 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 222 fs/jffs2/gc.c sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); c 232 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 237 fs/jffs2/gc.c ret = jffs2_do_crccheck_inode(c, ic); c 242 fs/jffs2/gc.c jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); c 243 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 248 fs/jffs2/gc.c if (!list_empty(&c->erase_complete_list) || c 249 fs/jffs2/gc.c !list_empty(&c->erase_pending_list)) { c 250 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 251 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 253 fs/jffs2/gc.c if (jffs2_erase_pending_blocks(c, 1)) c 257 fs/jffs2/gc.c mutex_lock(&c->alloc_sem); c 258 fs/jffs2/gc.c spin_lock(&c->erase_completion_lock); c 262 fs/jffs2/gc.c jeb = c->gcblock; c 265 fs/jffs2/gc.c jeb = jffs2_find_gc_block(c); c 269 fs/jffs2/gc.c if (c->nr_erasing_blocks) { c 270 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 271 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 275 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 276 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 282 fs/jffs2/gc.c D1(if (c->nextblock) c 283 fs/jffs2/gc.c printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); c 286 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 303 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 304 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 315 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 318 fs/jffs2/gc.c jffs2_garbage_collect_pristine(c, NULL, raw); c 321 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, raw); c 323 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 334 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 337 fs/jffs2/gc.c ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw); c 339 fs/jffs2/gc.c ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw); c 348 fs/jffs2/gc.c spin_lock(&c->inocache_lock); c 350 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 393 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 394 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 404 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 407 fs/jffs2/gc.c sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); c 427 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 429 fs/jffs2/gc.c ret = jffs2_garbage_collect_pristine(c, ic, raw); c 431 fs/jffs2/gc.c spin_lock(&c->inocache_lock); c 433 fs/jffs2/gc.c wake_up(&c->inocache_wq); c 436 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 452 fs/jffs2/gc.c spin_unlock(&c->inocache_lock); c 454 fs/jffs2/gc.c f = jffs2_gc_fetch_inode(c, inum, !nlink); c 464 fs/jffs2/gc.c ret = jffs2_garbage_collect_live(c, jeb, raw, f); c 466 fs/jffs2/gc.c jffs2_gc_release_inode(c, f); c 476 fs/jffs2/gc.c mutex_unlock(&c->alloc_sem); c 480 fs/jffs2/gc.c spin_lock(&c->erase_completion_lock); c 483 fs/jffs2/gc.c if (c->gcblock && !c->gcblock->used_size) { c 485 fs/jffs2/gc.c c->gcblock->offset); c 487 fs/jffs2/gc.c list_add_tail(&c->gcblock->list, &c->erase_pending_list); c 488 fs/jffs2/gc.c c->gcblock = NULL; c 489 fs/jffs2/gc.c c->nr_erasing_blocks++; c 490 fs/jffs2/gc.c jffs2_garbage_collect_trigger(c); c 492 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 497 fs/jffs2/gc.c static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 511 fs/jffs2/gc.c spin_lock(&c->erase_completion_lock); c 513 fs/jffs2/gc.c if (c->gcblock != jeb) { c 514 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 519 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 524 fs/jffs2/gc.c spin_unlock(&c->erase_completion_lock); c 529 fs/jffs2/gc.c ret = jffs2_garbage_collect_metadata(c, jeb, f, fn); c 546 fs/jffs2/gc.c ret = jffs2_garbage_collect_pristine(c, f->inocache, raw); c 557 fs/jffs2/gc.c ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); c 560 fs/jffs2/gc.c ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end); c 572 fs/jffs2/gc.c ret = jffs2_garbage_collect_dirent(c, jeb, f, fd); c 574 fs/jffs2/gc.c ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); c 581 fs/jffs2/gc.c jffs2_dbg_dump_node(c, ref_offset(raw)); c 591 fs/jffs2/gc.c static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, c 605 fs/jffs2/gc.c alloclen = rawlen = ref_totlen(c, c->gcblock, raw); c 613 fs/jffs2/gc.c ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen); c 628 fs/jffs2/gc.c ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node); c 698 fs/jffs2/gc.c phys_ofs = write_ofs(c); c 700 fs/jffs2/gc.c ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); c 706 fs/jffs2/gc.c jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL); c 714 fs/jffs2/gc.c struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; c 720 fs/jffs2/gc.c jffs2_dbg_acct_sanity_check(c,jeb); c 721 fs/jffs2/gc.c jffs2_dbg_acct_paranoia_check(c, jeb); c 723 fs/jffs2/gc.c ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen); c 731 fs/jffs2/gc.c jffs2_dbg_acct_sanity_check(c,jeb); c 732 fs/jffs2/gc.c jffs2_dbg_acct_paranoia_check(c, jeb); c 744 fs/jffs2/gc.c jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); c 746 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, raw); c 758 fs/jffs2/gc.c static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 784 fs/jffs2/gc.c ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); c 796 fs/jffs2/gc.c ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen, c 834 fs/jffs2/gc.c new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); c 841 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, fn->raw); c 850 fs/jffs2/gc.c static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 877 fs/jffs2/gc.c ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen, c 884 fs/jffs2/gc.c new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); c 891 fs/jffs2/gc.c jffs2_add_fd_to_list(c, new_fd, &f->dents); c 895 fs/jffs2/gc.c static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 906 fs/jffs2/gc.c if (!jffs2_can_mark_obsolete(c)) { c 913 fs/jffs2/gc.c uint32_t rawlen = ref_totlen(c, jeb, fd->raw); c 922 fs/jffs2/gc.c mutex_lock(&c->erase_free_sem); c 933 fs/jffs2/gc.c if (ref_totlen(c, NULL, raw) != rawlen) c 946 fs/jffs2/gc.c ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd); c 978 fs/jffs2/gc.c mutex_unlock(&c->erase_free_sem); c 985 fs/jffs2/gc.c return jffs2_garbage_collect_dirent(c, jeb, f, fd); c 988 fs/jffs2/gc.c mutex_unlock(&c->erase_free_sem); c 1008 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, fd->raw); c 1013 fs/jffs2/gc.c static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 1033 fs/jffs2/gc.c ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); c 1101 fs/jffs2/gc.c ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen, c 1108 fs/jffs2/gc.c new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); c 1115 fs/jffs2/gc.c jffs2_add_full_dnode_to_inode(c, f, new_fn); c 1117 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, f->metadata->raw); c 1158 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, fn->raw); c 1164 fs/jffs2/gc.c static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb, c 1185 fs/jffs2/gc.c if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { c 1230 fs/jffs2/gc.c jeb = &c->blocks[raw->flash_offset / c->sector_size]; c 1232 fs/jffs2/gc.c if (jeb == c->gcblock) { c 1286 fs/jffs2/gc.c jeb = &c->blocks[raw->flash_offset / c->sector_size]; c 1288 fs/jffs2/gc.c if (jeb == c->gcblock) { c 1347 fs/jffs2/gc.c ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, c 1360 fs/jffs2/gc.c comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); c 1384 fs/jffs2/gc.c new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC); c 1394 fs/jffs2/gc.c ret = jffs2_add_full_dnode_to_inode(c, f, new_fn); c 1397 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, f->metadata->raw); c 201 fs/jffs2/malloc.c int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, c 23 fs/jffs2/nodelist.c static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, c 26 fs/jffs2/nodelist.c void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) c 38 fs/jffs2/nodelist.c jffs2_mark_node_obsolete(c, new->raw); c 47 fs/jffs2/nodelist.c jffs2_mark_node_obsolete(c, ((*prev)->raw)); c 59 fs/jffs2/nodelist.c uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) c 76 fs/jffs2/nodelist.c jffs2_obsolete_node_frag(c, frag); c 101 fs/jffs2/nodelist.c static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, c 110 fs/jffs2/nodelist.c jffs2_mark_node_obsolete(c, this->node->raw); c 169 fs/jffs2/nodelist.c static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, c 215 fs/jffs2/nodelist.c static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) c 246 fs/jffs2/nodelist.c return no_overlapping_node(c, root, newfrag, this, lastend); c 321 fs/jffs2/nodelist.c jffs2_obsolete_node_frag(c, this); c 339 fs/jffs2/nodelist.c jffs2_obsolete_node_frag(c, this); c 363 fs/jffs2/nodelist.c int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) c 379 fs/jffs2/nodelist.c ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); c 408 fs/jffs2/nodelist.c void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) c 410 fs/jffs2/nodelist.c spin_lock(&c->inocache_lock); c 412 fs/jffs2/nodelist.c wake_up(&c->inocache_wq); c 413 fs/jffs2/nodelist.c spin_unlock(&c->inocache_lock); c 421 fs/jffs2/nodelist.c struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) c 425 fs/jffs2/nodelist.c ret = c->inocache_list[ino % c->inocache_hashsize]; c 436 fs/jffs2/nodelist.c void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new) c 440 fs/jffs2/nodelist.c spin_lock(&c->inocache_lock); c 442 fs/jffs2/nodelist.c new->ino = ++c->highest_ino; c 446 fs/jffs2/nodelist.c prev = &c->inocache_list[new->ino % c->inocache_hashsize]; c 454 fs/jffs2/nodelist.c spin_unlock(&c->inocache_lock); c 457 fs/jffs2/nodelist.c void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) c 465 fs/jffs2/nodelist.c spin_lock(&c->inocache_lock); c 467 fs/jffs2/nodelist.c prev = &c->inocache_list[old->ino % c->inocache_hashsize]; c 484 fs/jffs2/nodelist.c spin_unlock(&c->inocache_lock); c 487 fs/jffs2/nodelist.c void jffs2_free_ino_caches(struct jffs2_sb_info *c) c 492 fs/jffs2/nodelist.c for (i=0; i < c->inocache_hashsize; i++) { c 493 fs/jffs2/nodelist.c this = c->inocache_list[i]; c 496 fs/jffs2/nodelist.c jffs2_xattr_free_inode(c, this); c 500 fs/jffs2/nodelist.c c->inocache_list[i] = NULL; c 504 fs/jffs2/nodelist.c void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) c 509 fs/jffs2/nodelist.c for (i=0; i<c->nr_blocks; i++) { c 510 fs/jffs2/nodelist.c this = c->blocks[i].first_node; c 520 fs/jffs2/nodelist.c c->blocks[i].first_node = c->blocks[i].last_node = NULL; c 565 fs/jffs2/nodelist.c void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) c 574 fs/jffs2/nodelist.c if (c) c 575 fs/jffs2/nodelist.c jffs2_mark_node_obsolete(c, frag->node->raw); c 585 fs/jffs2/nodelist.c struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, c 615 fs/jffs2/nodelist.c } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) { c 616 fs/jffs2/nodelist.c uint32_t last_len = ref_totlen(c, jeb, jeb->last_node); c 635 fs/jffs2/nodelist.c c->unchecked_size += len; c 641 fs/jffs2/nodelist.c c->used_size += len; c 646 fs/jffs2/nodelist.c c->dirty_size += len; c 650 fs/jffs2/nodelist.c c->free_size -= len; c 656 fs/jffs2/nodelist.c ref_totlen(c, jeb, ref); c 662 fs/jffs2/nodelist.c int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 677 fs/jffs2/nodelist.c c->dirty_size += size; c 678 fs/jffs2/nodelist.c c->free_size -= size; c 682 fs/jffs2/nodelist.c uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size; c 685 fs/jffs2/nodelist.c jffs2_link_node_ref(c, jeb, ofs, size, NULL); c 692 fs/jffs2/nodelist.c static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, c 703 fs/jffs2/nodelist.c jeb = &c->blocks[ref->flash_offset / c->sector_size]; c 713 fs/jffs2/nodelist.c ref_end = jeb->offset + c->sector_size - jeb->free_size; c 718 fs/jffs2/nodelist.c uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 723 fs/jffs2/nodelist.c ret = __ref_totlen(c, jeb, ref); c 728 fs/jffs2/nodelist.c jeb = &c->blocks[ref->flash_offset / c->sector_size]; c 746 fs/jffs2/nodelist.c __jffs2_dbg_dump_node_refs_nolock(c, jeb); c 206 fs/jffs2/nodelist.h #define write_ofs(c) ((c)->nextblock->offset + (c)->sector_size - (c)->nextblock->free_size) c 296 fs/jffs2/nodelist.h static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c) c 298 fs/jffs2/nodelist.h return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); c 301 fs/jffs2/nodelist.h #define ref_totlen(a, b, c) __jffs2_ref_totlen((a), (b), (c)) c 309 fs/jffs2/nodelist.h #define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2)) c 364 fs/jffs2/nodelist.h void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); c 365 fs/jffs2/nodelist.h void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); c 366 fs/jffs2/nodelist.h struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); c 367 fs/jffs2/nodelist.h void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); c 368 fs/jffs2/nodelist.h void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old); c 369 fs/jffs2/nodelist.h void jffs2_free_ino_caches(struct jffs2_sb_info *c); c 370 fs/jffs2/nodelist.h void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); c 373 fs/jffs2/nodelist.h int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); c 374 fs/jffs2/nodelist.h uint32_t jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); c 375 fs/jffs2/nodelist.h struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, c 379 fs/jffs2/nodelist.h extern uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, c 384 fs/jffs2/nodelist.h int jffs2_thread_should_wake(struct jffs2_sb_info *c); c 385 fs/jffs2/nodelist.h int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, c 387 fs/jffs2/nodelist.h int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, c 389 fs/jffs2/nodelist.h struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, c 392 fs/jffs2/nodelist.h void jffs2_complete_reservation(struct jffs2_sb_info *c); c 393 fs/jffs2/nodelist.h void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); c 396 fs/jffs2/nodelist.h int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); c 398 fs/jffs2/nodelist.h struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 401 fs/jffs2/nodelist.h struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 404 fs/jffs2/nodelist.h int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 407 fs/jffs2/nodelist.h int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, c 409 fs/jffs2/nodelist.h int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, c 411 fs/jffs2/nodelist.h int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, c 416 fs/jffs2/nodelist.h int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 418 fs/jffs2/nodelist.h int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); c 419 fs/jffs2/nodelist.h void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); c 435 fs/jffs2/nodelist.h int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, c 450 fs/jffs2/nodelist.h int jffs2_garbage_collect_pass(struct jffs2_sb_info *c); c 453 fs/jffs2/nodelist.h int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 456 fs/jffs2/nodelist.h int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 458 fs/jffs2/nodelist.h char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f); c 461 fs/jffs2/nodelist.h int jffs2_scan_medium(struct jffs2_sb_info *c); c 462 fs/jffs2/nodelist.h void jffs2_rotate_lists(struct jffs2_sb_info *c); c 463 fs/jffs2/nodelist.h struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); c 464 fs/jffs2/nodelist.h int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 465 fs/jffs2/nodelist.h int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size); c 468 fs/jffs2/nodelist.h int jffs2_do_mount_fs(struct jffs2_sb_info *c); c 471 fs/jffs2/nodelist.h int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); c 472 fs/jffs2/nodelist.h void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 476 fs/jffs2/nodelist.h int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino); c 477 fs/jffs2/nodelist.h int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c); c 478 fs/jffs2/nodelist.h int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 479 fs/jffs2/nodelist.h int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 24 fs/jffs2/nodemgmt.c static int jffs2_rp_can_write(struct jffs2_sb_info *c) c 27 fs/jffs2/nodemgmt.c struct jffs2_mount_opts *opts = &c->mount_opts; c 29 fs/jffs2/nodemgmt.c avail = c->dirty_size + c->free_size + c->unchecked_size + c 30 fs/jffs2/nodemgmt.c c->erasing_size - c->resv_blocks_write * c->sector_size c 31 fs/jffs2/nodemgmt.c - c->nospc_dirty_size; c 37 fs/jffs2/nodemgmt.c opts->rp_size, c->dirty_size, c->free_size, c 38 fs/jffs2/nodemgmt.c c->erasing_size, c->unchecked_size, c 39 fs/jffs2/nodemgmt.c c->nr_erasing_blocks, avail, c->nospc_dirty_size); c 71 fs/jffs2/nodemgmt.c static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, c 74 fs/jffs2/nodemgmt.c int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, c 78 fs/jffs2/nodemgmt.c int blocksneeded = c->resv_blocks_write; c 83 fs/jffs2/nodemgmt.c mutex_lock(&c->alloc_sem); c 87 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 93 fs/jffs2/nodemgmt.c if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) { c 100 fs/jffs2/nodemgmt.c while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { c 115 fs/jffs2/nodemgmt.c dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; c 116 fs/jffs2/nodemgmt.c if (dirty < c->nospc_dirty_size) { c 117 fs/jffs2/nodemgmt.c if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { c 123 fs/jffs2/nodemgmt.c dirty, c->unchecked_size, c 124 fs/jffs2/nodemgmt.c c->sector_size); c 126 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 127 fs/jffs2/nodemgmt.c mutex_unlock(&c->alloc_sem); c 140 fs/jffs2/nodemgmt.c avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; c 141 fs/jffs2/nodemgmt.c if ( (avail / c->sector_size) <= blocksneeded) { c 142 fs/jffs2/nodemgmt.c if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { c 149 fs/jffs2/nodemgmt.c avail, blocksneeded * c->sector_size); c 150 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 151 fs/jffs2/nodemgmt.c mutex_unlock(&c->alloc_sem); c 155 fs/jffs2/nodemgmt.c mutex_unlock(&c->alloc_sem); c 158 fs/jffs2/nodemgmt.c c->nr_free_blocks, c->nr_erasing_blocks, c 159 fs/jffs2/nodemgmt.c c->free_size, c->dirty_size, c->wasted_size, c 160 fs/jffs2/nodemgmt.c c->used_size, c->erasing_size, c->bad_size, c 161 fs/jffs2/nodemgmt.c c->free_size + c->dirty_size + c 162 fs/jffs2/nodemgmt.c c->wasted_size + c->used_size + c 163 fs/jffs2/nodemgmt.c c->erasing_size + c->bad_size, c 164 fs/jffs2/nodemgmt.c c->flash_size); c 165 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 167 fs/jffs2/nodemgmt.c ret = jffs2_garbage_collect_pass(c); c 170 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 171 fs/jffs2/nodemgmt.c if (c->nr_erasing_blocks && c 172 fs/jffs2/nodemgmt.c list_empty(&c->erase_pending_list) && c 173 fs/jffs2/nodemgmt.c list_empty(&c->erase_complete_list)) { c 176 fs/jffs2/nodemgmt.c add_wait_queue(&c->erase_wait, &wait); c 179 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 182 fs/jffs2/nodemgmt.c remove_wait_queue(&c->erase_wait, &wait); c 184 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 193 fs/jffs2/nodemgmt.c mutex_lock(&c->alloc_sem); c 194 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 197 fs/jffs2/nodemgmt.c ret = jffs2_do_reserve_space(c, minsize, len, sumsize); c 204 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 206 fs/jffs2/nodemgmt.c ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); c 208 fs/jffs2/nodemgmt.c mutex_unlock(&c->alloc_sem); c 212 fs/jffs2/nodemgmt.c int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, c 221 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 222 fs/jffs2/nodemgmt.c ret = jffs2_do_reserve_space(c, minsize, len, sumsize); c 227 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 235 fs/jffs2/nodemgmt.c ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); c 243 fs/jffs2/nodemgmt.c static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) c 246 fs/jffs2/nodemgmt.c if (c->nextblock == NULL) { c 253 fs/jffs2/nodemgmt.c c->dirty_size += jeb->wasted_size; c 254 fs/jffs2/nodemgmt.c c->wasted_size -= jeb->wasted_size; c 257 fs/jffs2/nodemgmt.c if (VERYDIRTY(c, jeb->dirty_size)) { c 261 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->very_dirty_list); c 266 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->dirty_list); c 272 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->clean_list); c 274 fs/jffs2/nodemgmt.c c->nextblock = NULL; c 280 fs/jffs2/nodemgmt.c static int jffs2_find_nextblock(struct jffs2_sb_info *c) c 286 fs/jffs2/nodemgmt.c if (list_empty(&c->free_list)) { c 288 fs/jffs2/nodemgmt.c if (!c->nr_erasing_blocks && c 289 fs/jffs2/nodemgmt.c !list_empty(&c->erasable_list)) { c 292 fs/jffs2/nodemgmt.c ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); c 293 fs/jffs2/nodemgmt.c list_move_tail(&ejeb->list, &c->erase_pending_list); c 294 fs/jffs2/nodemgmt.c c->nr_erasing_blocks++; c 295 fs/jffs2/nodemgmt.c jffs2_garbage_collect_trigger(c); c 300 fs/jffs2/nodemgmt.c if (!c->nr_erasing_blocks && c 301 fs/jffs2/nodemgmt.c !list_empty(&c->erasable_pending_wbuf_list)) { c 305 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 306 fs/jffs2/nodemgmt.c jffs2_flush_wbuf_pad(c); c 307 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 312 fs/jffs2/nodemgmt.c if (!c->nr_erasing_blocks) { c 316 fs/jffs2/nodemgmt.c c->nr_erasing_blocks, c->nr_free_blocks, c 317 fs/jffs2/nodemgmt.c list_empty(&c->erasable_list) ? "yes" : "no", c 318 fs/jffs2/nodemgmt.c list_empty(&c->erasing_list) ? "yes" : "no", c 319 fs/jffs2/nodemgmt.c list_empty(&c->erase_pending_list) ? "yes" : "no"); c 323 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 325 fs/jffs2/nodemgmt.c jffs2_erase_pending_blocks(c, 1); c 326 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 334 fs/jffs2/nodemgmt.c next = c->free_list.next; c 336 fs/jffs2/nodemgmt.c c->nextblock = list_entry(next, struct jffs2_eraseblock, list); c 337 fs/jffs2/nodemgmt.c c->nr_free_blocks--; c 339 fs/jffs2/nodemgmt.c jffs2_sum_reset_collected(c->summary); /* reset collected summary */ c 343 fs/jffs2/nodemgmt.c if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len) c 344 fs/jffs2/nodemgmt.c c->wbuf_ofs = 0xffffffff; c 348 fs/jffs2/nodemgmt.c __func__, c->nextblock->offset); c 354 fs/jffs2/nodemgmt.c static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, c 357 fs/jffs2/nodemgmt.c struct jffs2_eraseblock *jeb = c->nextblock; c 368 fs/jffs2/nodemgmt.c reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); c 372 fs/jffs2/nodemgmt.c c->summary->sum_size, sumsize); c 377 fs/jffs2/nodemgmt.c if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + c 381 fs/jffs2/nodemgmt.c if (jffs2_sum_is_disabled(c->summary)) { c 388 fs/jffs2/nodemgmt.c ret = jffs2_sum_write_sumnode(c); c 393 fs/jffs2/nodemgmt.c if (jffs2_sum_is_disabled(c->summary)) { c 401 fs/jffs2/nodemgmt.c jffs2_close_nextblock(c, jeb); c 404 fs/jffs2/nodemgmt.c reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); c 413 fs/jffs2/nodemgmt.c if (jffs2_wbuf_dirty(c)) { c 414 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 417 fs/jffs2/nodemgmt.c jffs2_flush_wbuf_pad(c); c 418 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 419 fs/jffs2/nodemgmt.c jeb = c->nextblock; c 423 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 425 fs/jffs2/nodemgmt.c ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); c 431 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 437 fs/jffs2/nodemgmt.c jffs2_link_node_ref(c, jeb, c 438 fs/jffs2/nodemgmt.c (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, c 442 fs/jffs2/nodemgmt.c c->dirty_size -= waste; c 444 fs/jffs2/nodemgmt.c c->wasted_size += waste; c 446 fs/jffs2/nodemgmt.c jffs2_close_nextblock(c, jeb); c 453 fs/jffs2/nodemgmt.c ret = jffs2_find_nextblock(c); c 457 fs/jffs2/nodemgmt.c jeb = c->nextblock; c 459 fs/jffs2/nodemgmt.c if (jeb->free_size != c->sector_size - c->cleanmarker_size) { c 469 fs/jffs2/nodemgmt.c if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && c 477 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 478 fs/jffs2/nodemgmt.c jffs2_mark_node_obsolete(c, jeb->first_node); c 479 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 484 fs/jffs2/nodemgmt.c *len, jeb->offset + (c->sector_size - jeb->free_size)); c 500 fs/jffs2/nodemgmt.c struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, c 507 fs/jffs2/nodemgmt.c jeb = &c->blocks[ofs / c->sector_size]; c 515 fs/jffs2/nodemgmt.c if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) c 516 fs/jffs2/nodemgmt.c && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { c 519 fs/jffs2/nodemgmt.c if (c->nextblock) c 520 fs/jffs2/nodemgmt.c pr_warn("nextblock 0x%08x", c->nextblock->offset); c 524 fs/jffs2/nodemgmt.c jeb->offset + (c->sector_size - jeb->free_size)); c 528 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 530 fs/jffs2/nodemgmt.c new = jffs2_link_node_ref(c, jeb, ofs, len, ic); c 537 fs/jffs2/nodemgmt.c if (jffs2_wbuf_dirty(c)) { c 539 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 540 fs/jffs2/nodemgmt.c jffs2_flush_wbuf_pad(c); c 541 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 544 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->clean_list); c 545 fs/jffs2/nodemgmt.c c->nextblock = NULL; c 547 fs/jffs2/nodemgmt.c jffs2_dbg_acct_sanity_check_nolock(c,jeb); c 548 fs/jffs2/nodemgmt.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 550 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 556 fs/jffs2/nodemgmt.c void jffs2_complete_reservation(struct jffs2_sb_info *c) c 559 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 560 fs/jffs2/nodemgmt.c jffs2_garbage_collect_trigger(c); c 561 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 562 fs/jffs2/nodemgmt.c mutex_unlock(&c->alloc_sem); c 579 fs/jffs2/nodemgmt.c void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref) c 597 fs/jffs2/nodemgmt.c blocknr = ref->flash_offset / c->sector_size; c 598 fs/jffs2/nodemgmt.c if (blocknr >= c->nr_blocks) { c 603 fs/jffs2/nodemgmt.c jeb = &c->blocks[blocknr]; c 605 fs/jffs2/nodemgmt.c if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && c 606 fs/jffs2/nodemgmt.c !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { c 613 fs/jffs2/nodemgmt.c mutex_lock(&c->erase_free_sem); c 616 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 618 fs/jffs2/nodemgmt.c freed_len = ref_totlen(c, jeb, ref); c 630 fs/jffs2/nodemgmt.c c->unchecked_size -= freed_len; c 641 fs/jffs2/nodemgmt.c c->used_size -= freed_len; c 645 fs/jffs2/nodemgmt.c if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { c 649 fs/jffs2/nodemgmt.c c->dirty_size += freed_len; c 653 fs/jffs2/nodemgmt.c if (on_list(&jeb->list, &c->bad_used_list)) { c 662 fs/jffs2/nodemgmt.c c->dirty_size += jeb->wasted_size; c 663 fs/jffs2/nodemgmt.c c->wasted_size -= jeb->wasted_size; c 671 fs/jffs2/nodemgmt.c c->wasted_size += freed_len; c 675 fs/jffs2/nodemgmt.c jffs2_dbg_acct_sanity_check_nolock(c, jeb); c 676 fs/jffs2/nodemgmt.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 678 fs/jffs2/nodemgmt.c if (c->flags & JFFS2_SB_FLAG_SCANNING) { c 684 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 689 fs/jffs2/nodemgmt.c if (jeb == c->nextblock) { c 693 fs/jffs2/nodemgmt.c if (jeb == c->gcblock) { c 696 fs/jffs2/nodemgmt.c c->gcblock = NULL; c 702 fs/jffs2/nodemgmt.c if (jffs2_wbuf_dirty(c)) { c 704 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); c 710 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->erase_pending_list); c 711 fs/jffs2/nodemgmt.c c->nr_erasing_blocks++; c 712 fs/jffs2/nodemgmt.c jffs2_garbage_collect_trigger(c); c 717 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->erasable_list); c 721 fs/jffs2/nodemgmt.c } else if (jeb == c->gcblock) { c 729 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->dirty_list); c 730 fs/jffs2/nodemgmt.c } else if (VERYDIRTY(c, jeb->dirty_size) && c 731 fs/jffs2/nodemgmt.c !VERYDIRTY(c, jeb->dirty_size - addedsize)) { c 736 fs/jffs2/nodemgmt.c list_add_tail(&jeb->list, &c->very_dirty_list); c 743 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 745 fs/jffs2/nodemgmt.c if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) || c 746 fs/jffs2/nodemgmt.c (c->flags & JFFS2_SB_FLAG_BUILDING)) { c 758 fs/jffs2/nodemgmt.c ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); c 781 fs/jffs2/nodemgmt.c ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); c 807 fs/jffs2/nodemgmt.c spin_lock(&c->erase_completion_lock); c 819 fs/jffs2/nodemgmt.c jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); c 822 fs/jffs2/nodemgmt.c jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); c 827 fs/jffs2/nodemgmt.c jffs2_del_ino_cache(c, ic); c 830 fs/jffs2/nodemgmt.c spin_unlock(&c->erase_completion_lock); c 834 fs/jffs2/nodemgmt.c mutex_unlock(&c->erase_free_sem); c 837 fs/jffs2/nodemgmt.c int jffs2_thread_should_wake(struct jffs2_sb_info *c) c 844 fs/jffs2/nodemgmt.c if (!list_empty(&c->erase_complete_list) || c 845 fs/jffs2/nodemgmt.c !list_empty(&c->erase_pending_list)) c 848 fs/jffs2/nodemgmt.c if (c->unchecked_size) { c 850 fs/jffs2/nodemgmt.c c->unchecked_size, c->check_ino); c 862 fs/jffs2/nodemgmt.c dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; c 864 fs/jffs2/nodemgmt.c if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && c 865 fs/jffs2/nodemgmt.c (dirty > c->nospc_dirty_size)) c 868 fs/jffs2/nodemgmt.c list_for_each_entry(jeb, &c->very_dirty_list, list) { c 870 fs/jffs2/nodemgmt.c if (nr_very_dirty == c->vdirty_blocks_gctrigger) { c 879 fs/jffs2/nodemgmt.c __func__, c->nr_free_blocks, c->nr_erasing_blocks, c 880 fs/jffs2/nodemgmt.c c->dirty_size, nr_very_dirty, ret ? "yes" : "no"); c 25 fs/jffs2/os-linux.h #define OFNI_BS_2SFFJ(c) ((struct super_block *)c->os_priv) c 63 fs/jffs2/os-linux.h #define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & SB_RDONLY) c 65 fs/jffs2/os-linux.h #define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) ) c 70 fs/jffs2/os-linux.h #define jffs2_can_mark_obsolete(c) (0) c 72 fs/jffs2/os-linux.h #define jffs2_can_mark_obsolete(c) (1) c 75 fs/jffs2/os-linux.h #define jffs2_is_writebuffered(c) (0) c 76 fs/jffs2/os-linux.h #define jffs2_cleanmarker_oob(c) (0) c 77 fs/jffs2/os-linux.h #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) c 79 fs/jffs2/os-linux.h #define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf) c 80 fs/jffs2/os-linux.h #define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf)) c 81 fs/jffs2/os-linux.h #define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; }) c 82 fs/jffs2/os-linux.h #define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; }) c 83 fs/jffs2/os-linux.h #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) c 84 fs/jffs2/os-linux.h #define jffs2_nand_flash_setup(c) (0) c 85 fs/jffs2/os-linux.h #define jffs2_nand_flash_cleanup(c) do {} while(0) c 86 fs/jffs2/os-linux.h #define jffs2_wbuf_dirty(c) (0) c 87 fs/jffs2/os-linux.h #define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e) c 90 fs/jffs2/os-linux.h #define jffs2_dataflash(c) (0) c 91 fs/jffs2/os-linux.h #define jffs2_dataflash_setup(c) (0) c 92 fs/jffs2/os-linux.h #define jffs2_dataflash_cleanup(c) do {} while (0) c 93 fs/jffs2/os-linux.h #define jffs2_nor_wbuf_flash(c) (0) c 94 fs/jffs2/os-linux.h #define jffs2_nor_wbuf_flash_setup(c) (0) c 95 fs/jffs2/os-linux.h #define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) c 96 fs/jffs2/os-linux.h #define jffs2_ubivol(c) (0) c 97 fs/jffs2/os-linux.h #define jffs2_ubivol_setup(c) (0) c 98 fs/jffs2/os-linux.h #define jffs2_ubivol_cleanup(c) do {} while (0) c 99 fs/jffs2/os-linux.h #define jffs2_dirty_trigger(c) do {} while (0) c 103 fs/jffs2/os-linux.h #define jffs2_is_writebuffered(c) (c->wbuf != NULL) c 106 fs/jffs2/os-linux.h #define jffs2_can_mark_obsolete(c) (0) c 108 fs/jffs2/os-linux.h #define jffs2_can_mark_obsolete(c) (c->mtd->flags & (MTD_BIT_WRITEABLE)) c 111 fs/jffs2/os-linux.h #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) c 113 fs/jffs2/os-linux.h #define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len) c 116 fs/jffs2/os-linux.h int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino); c 117 fs/jffs2/os-linux.h int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf); c 118 fs/jffs2/os-linux.h int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf); c 119 fs/jffs2/os-linux.h int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,int mode); c 120 fs/jffs2/os-linux.h int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 121 fs/jffs2/os-linux.h int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); c 122 fs/jffs2/os-linux.h int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); c 125 fs/jffs2/os-linux.h int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino); c 126 fs/jffs2/os-linux.h int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c); c 127 fs/jffs2/os-linux.h int jffs2_nand_flash_setup(struct jffs2_sb_info *c); c 128 fs/jffs2/os-linux.h void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c); c 130 fs/jffs2/os-linux.h #define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH) c 131 fs/jffs2/os-linux.h int jffs2_dataflash_setup(struct jffs2_sb_info *c); c 132 fs/jffs2/os-linux.h void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); c 133 fs/jffs2/os-linux.h #define jffs2_ubivol(c) (c->mtd->type == MTD_UBIVOLUME) c 134 fs/jffs2/os-linux.h int jffs2_ubivol_setup(struct jffs2_sb_info *c); c 135 fs/jffs2/os-linux.h void jffs2_ubivol_cleanup(struct jffs2_sb_info *c); c 137 fs/jffs2/os-linux.h #define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE)) c 138 fs/jffs2/os-linux.h int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); c 139 fs/jffs2/os-linux.h void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); c 140 fs/jffs2/os-linux.h void jffs2_dirty_trigger(struct jffs2_sb_info *c); c 145 fs/jffs2/os-linux.h int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c); c 146 fs/jffs2/os-linux.h void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c); c 147 fs/jffs2/os-linux.h void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c); c 177 fs/jffs2/os-linux.h void jffs2_gc_release_inode(struct jffs2_sb_info *c, c 179 fs/jffs2/os-linux.h struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, c 182 fs/jffs2/os-linux.h unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, c 186 fs/jffs2/os-linux.h void jffs2_flash_cleanup(struct jffs2_sb_info *c); c 190 fs/jffs2/os-linux.h int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, c 192 fs/jffs2/os-linux.h int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, c 23 fs/jffs2/read.c int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 38 fs/jffs2/read.c ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri); c 115 fs/jffs2/read.c ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), c 135 fs/jffs2/read.c ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); c 157 fs/jffs2/read.c int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 212 fs/jffs2/read.c ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); c 31 fs/jffs2/readinode.c static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) c 46 fs/jffs2/readinode.c if (jffs2_is_writebuffered(c)) { c 47 fs/jffs2/readinode.c int adj = ofs % c->wbuf_pagesize; c 49 fs/jffs2/readinode.c adj = c->wbuf_pagesize - adj; c 67 fs/jffs2/readinode.c err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL); c 70 fs/jffs2/readinode.c mtd_unpoint(c->mtd, ofs, retlen); c 85 fs/jffs2/readinode.c err = jffs2_flash_read(c, ofs, len, &retlen, buffer); c 104 fs/jffs2/readinode.c mtd_unpoint(c->mtd, ofs, len); c 114 fs/jffs2/readinode.c jeb = &c->blocks[ref->flash_offset / c->sector_size]; c 115 fs/jffs2/readinode.c len = ref_totlen(c, jeb, ref); c 125 fs/jffs2/readinode.c spin_lock(&c->erase_completion_lock); c 128 fs/jffs2/readinode.c c->used_size += len; c 129 fs/jffs2/readinode.c c->unchecked_size -= len; c 130 fs/jffs2/readinode.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 131 fs/jffs2/readinode.c spin_unlock(&c->erase_completion_lock); c 140 fs/jffs2/readinode.c mtd_unpoint(c->mtd, ofs, len); c 150 fs/jffs2/readinode.c static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) c 163 fs/jffs2/readinode.c ret = check_node_data(c, tn); c 169 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, tn->fn->raw); c 199 fs/jffs2/readinode.c static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) c 201 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, tn->fn->raw); c 218 fs/jffs2/readinode.c static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, c 236 fs/jffs2/readinode.c jffs2_kill_tn(c, rii->mdata_tn); c 240 fs/jffs2/readinode.c jffs2_kill_tn(c, tn); c 278 fs/jffs2/readinode.c if (!check_tn_node(c, this)) { c 281 fs/jffs2/readinode.c jffs2_kill_tn(c, tn); c 287 fs/jffs2/readinode.c jffs2_kill_tn(c, this); c 296 fs/jffs2/readinode.c if (check_tn_node(c, tn)) { c 298 fs/jffs2/readinode.c jffs2_kill_tn(c, tn); c 309 fs/jffs2/readinode.c jffs2_kill_tn(c, this); c 320 fs/jffs2/readinode.c if (!check_tn_node(c, this)) { c 322 fs/jffs2/readinode.c jffs2_kill_tn(c, tn); c 328 fs/jffs2/readinode.c jffs2_kill_tn(c, this); c 451 fs/jffs2/readinode.c static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c, c 500 fs/jffs2/readinode.c if (check_tn_node(c, this)) { c 504 fs/jffs2/readinode.c jffs2_kill_tn(c, this); c 518 fs/jffs2/readinode.c ret = jffs2_add_full_dnode_to_inode(c, f, this->fn); c 525 fs/jffs2/readinode.c if (check_tn_node(c, this)) c 526 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, this->fn->raw); c 586 fs/jffs2/readinode.c static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, c 600 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 613 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 617 fs/jffs2/readinode.c jeb = &c->blocks[ref->flash_offset / c->sector_size]; c 618 fs/jffs2/readinode.c len = ref_totlen(c, jeb, ref); c 620 fs/jffs2/readinode.c spin_lock(&c->erase_completion_lock); c 623 fs/jffs2/readinode.c c->used_size += len; c 624 fs/jffs2/readinode.c c->unchecked_size -= len; c 626 fs/jffs2/readinode.c spin_unlock(&c->erase_completion_lock); c 661 fs/jffs2/readinode.c err = jffs2_flash_read(c, (ref_offset(ref)) + read, c 685 fs/jffs2/readinode.c jffs2_add_fd_to_list(c, fd, &rii->fds); c 697 fs/jffs2/readinode.c static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, c 713 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 733 fs/jffs2/readinode.c jffs2_dbg_dump_node(c, ref_offset(ref)); c 734 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 738 fs/jffs2/readinode.c if (jffs2_is_writebuffered(c) && csize != 0) { c 789 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 803 fs/jffs2/readinode.c jeb = &c->blocks[ref->flash_offset / c->sector_size]; c 804 fs/jffs2/readinode.c len = ref_totlen(c, jeb, ref); c 806 fs/jffs2/readinode.c spin_lock(&c->erase_completion_lock); c 809 fs/jffs2/readinode.c c->used_size += len; c 810 fs/jffs2/readinode.c c->unchecked_size -= len; c 812 fs/jffs2/readinode.c spin_unlock(&c->erase_completion_lock); c 844 fs/jffs2/readinode.c ret = jffs2_add_tn_to_tree(c, rii, tn); c 872 fs/jffs2/readinode.c static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) c 881 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 899 fs/jffs2/readinode.c BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); c 910 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 924 fs/jffs2/readinode.c static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, c 931 fs/jffs2/readinode.c if (jffs2_is_writebuffered(c)) { c 932 fs/jffs2/readinode.c int rem = to_read % c->wbuf_pagesize; c 935 fs/jffs2/readinode.c to_read += c->wbuf_pagesize - rem; c 943 fs/jffs2/readinode.c err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen); c 965 fs/jffs2/readinode.c static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 980 fs/jffs2/readinode.c len = sizeof(union jffs2_node_union) + c->wbuf_pagesize; c 985 fs/jffs2/readinode.c spin_lock(&c->erase_completion_lock); c 998 fs/jffs2/readinode.c spin_unlock(&c->erase_completion_lock); c 1009 fs/jffs2/readinode.c if (jffs2_is_writebuffered(c)) { c 1020 fs/jffs2/readinode.c rem = end % c->wbuf_pagesize; c 1022 fs/jffs2/readinode.c end += c->wbuf_pagesize - rem; c 1029 fs/jffs2/readinode.c err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf); c 1050 fs/jffs2/readinode.c jffs2_dbg_dump_node(c, ref_offset(ref)); c 1051 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 1058 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, ref); c 1068 fs/jffs2/readinode.c err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf); c 1073 fs/jffs2/readinode.c err = read_direntry(c, ref, &node->d, retlen, rii); c 1083 fs/jffs2/readinode.c err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf); c 1088 fs/jffs2/readinode.c err = read_dnode(c, ref, &node->i, len, rii); c 1097 fs/jffs2/readinode.c err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf); c 1102 fs/jffs2/readinode.c err = read_unknown(c, ref, &node->u); c 1108 fs/jffs2/readinode.c spin_lock(&c->erase_completion_lock); c 1111 fs/jffs2/readinode.c spin_unlock(&c->erase_completion_lock); c 1129 fs/jffs2/readinode.c static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, c 1144 fs/jffs2/readinode.c ret = jffs2_get_inode_nodes(c, f, &rii); c 1149 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); c 1153 fs/jffs2/readinode.c ret = jffs2_build_inode_fragtree(c, f, &rii); c 1158 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); c 1174 fs/jffs2/readinode.c jffs2_kill_tn(c, rii.mdata_tn); c 1189 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); c 1201 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); c 1205 fs/jffs2/readinode.c ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node); c 1232 fs/jffs2/readinode.c new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize)); c 1261 fs/jffs2/readinode.c ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node), c 1306 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); c 1312 fs/jffs2/readinode.c int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 1318 fs/jffs2/readinode.c spin_lock(&c->inocache_lock); c 1319 fs/jffs2/readinode.c f->inocache = jffs2_get_ino_cache(c, ino); c 1335 fs/jffs2/readinode.c sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); c 1352 fs/jffs2/readinode.c spin_unlock(&c->inocache_lock); c 1366 fs/jffs2/readinode.c jffs2_add_ino_cache(c, f->inocache); c 1373 fs/jffs2/readinode.c return jffs2_do_read_inode_internal(c, f, latest_node); c 1376 fs/jffs2/readinode.c int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) c 1389 fs/jffs2/readinode.c ret = jffs2_do_read_inode_internal(c, f, &n); c 1391 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); c 1392 fs/jffs2/readinode.c jffs2_xattr_do_crccheck_inode(c, ic); c 1397 fs/jffs2/readinode.c void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) c 1402 fs/jffs2/readinode.c jffs2_xattr_delete_inode(c, f->inocache); c 1407 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING); c 1411 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, f->metadata->raw); c 1415 fs/jffs2/readinode.c jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); c 1425 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); c 1427 fs/jffs2/readinode.c jffs2_del_ino_cache(c, f->inocache); c 39 fs/jffs2/scan.c static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 46 fs/jffs2/scan.c static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 48 fs/jffs2/scan.c static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 51 fs/jffs2/scan.c static inline int min_free(struct jffs2_sb_info *c) c 55 fs/jffs2/scan.c if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize) c 56 fs/jffs2/scan.c return c->wbuf_pagesize; c 69 fs/jffs2/scan.c static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) c 73 fs/jffs2/scan.c if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1))) c 75 fs/jffs2/scan.c if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size))) c 80 fs/jffs2/scan.c c->dirty_size += jeb->wasted_size; c 81 fs/jffs2/scan.c c->wasted_size -= jeb->wasted_size; c 83 fs/jffs2/scan.c if (VERYDIRTY(c, jeb->dirty_size)) { c 84 fs/jffs2/scan.c list_add(&jeb->list, &c->very_dirty_list); c 86 fs/jffs2/scan.c list_add(&jeb->list, &c->dirty_list); c 91 fs/jffs2/scan.c int jffs2_scan_medium(struct jffs2_sb_info *c) c 101 fs/jffs2/scan.c ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, c 103 fs/jffs2/scan.c if (!ret && pointlen < c->mtd->size) { c 107 fs/jffs2/scan.c mtd_unpoint(c->mtd, 0, pointlen); c 116 fs/jffs2/scan.c if (jffs2_cleanmarker_oob(c)) c 117 fs/jffs2/scan.c try_size = c->sector_size; c 124 fs/jffs2/scan.c flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); c 143 fs/jffs2/scan.c for (i=0; i<c->nr_blocks; i++) { c 144 fs/jffs2/scan.c struct jffs2_eraseblock *jeb = &c->blocks[i]; c 151 fs/jffs2/scan.c ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), c 157 fs/jffs2/scan.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 170 fs/jffs2/scan.c list_add(&jeb->list, &c->erase_pending_list); c 171 fs/jffs2/scan.c c->nr_erasing_blocks++; c 178 fs/jffs2/scan.c list_add(&jeb->list, &c->free_list); c 179 fs/jffs2/scan.c c->nr_free_blocks++; c 184 fs/jffs2/scan.c list_add(&jeb->list, &c->erase_pending_list); c 185 fs/jffs2/scan.c c->nr_erasing_blocks++; c 191 fs/jffs2/scan.c list_add(&jeb->list, &c->clean_list); c 198 fs/jffs2/scan.c if (jeb->free_size > min_free(c) && c 199 fs/jffs2/scan.c (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { c 201 fs/jffs2/scan.c if (c->nextblock) { c 202 fs/jffs2/scan.c ret = file_dirty(c, c->nextblock); c 206 fs/jffs2/scan.c jffs2_sum_reset_collected(c->summary); c 209 fs/jffs2/scan.c jffs2_sum_move_collected(c, s); c 212 fs/jffs2/scan.c c->nextblock = jeb; c 214 fs/jffs2/scan.c ret = file_dirty(c, jeb); c 225 fs/jffs2/scan.c list_add(&jeb->list, &c->erase_pending_list); c 226 fs/jffs2/scan.c c->nr_erasing_blocks++; c 231 fs/jffs2/scan.c list_add(&jeb->list, &c->bad_list); c 232 fs/jffs2/scan.c c->bad_size += c->sector_size; c 233 fs/jffs2/scan.c c->free_size -= c->sector_size; c 243 fs/jffs2/scan.c if (c->nextblock && (c->nextblock->dirty_size)) { c 244 fs/jffs2/scan.c c->nextblock->wasted_size += c->nextblock->dirty_size; c 245 fs/jffs2/scan.c c->wasted_size += c->nextblock->dirty_size; c 246 fs/jffs2/scan.c c->dirty_size -= c->nextblock->dirty_size; c 247 fs/jffs2/scan.c c->nextblock->dirty_size = 0; c 250 fs/jffs2/scan.c if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) { c 255 fs/jffs2/scan.c uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; c 259 fs/jffs2/scan.c jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); c 260 fs/jffs2/scan.c jffs2_scan_dirty_space(c, c->nextblock, skip); c 263 fs/jffs2/scan.c if (c->nr_erasing_blocks) { c 264 fs/jffs2/scan.c if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { c 267 fs/jffs2/scan.c empty_blocks, bad_blocks, c->nr_blocks); c 271 fs/jffs2/scan.c spin_lock(&c->erase_completion_lock); c 272 fs/jffs2/scan.c jffs2_garbage_collect_trigger(c); c 273 fs/jffs2/scan.c spin_unlock(&c->erase_completion_lock); c 281 fs/jffs2/scan.c mtd_unpoint(c->mtd, 0, c->mtd->size); c 287 fs/jffs2/scan.c static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, c 293 fs/jffs2/scan.c ret = jffs2_flash_read(c, ofs, len, &retlen, buf); c 307 fs/jffs2/scan.c int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) c 309 fs/jffs2/scan.c if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size c 314 fs/jffs2/scan.c else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { c 315 fs/jffs2/scan.c c->dirty_size -= jeb->dirty_size; c 316 fs/jffs2/scan.c c->wasted_size += jeb->dirty_size; c 327 fs/jffs2/scan.c static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 339 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) c 352 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) c 357 fs/jffs2/scan.c xd = jffs2_setup_xattr_datum(c, xid, version); c 363 fs/jffs2/scan.c = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); c 373 fs/jffs2/scan.c jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd); c 383 fs/jffs2/scan.c static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 395 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) c 404 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen)))) c 425 fs/jffs2/scan.c if (ref->xseqno > c->highest_xseqno) c 426 fs/jffs2/scan.c c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); c 427 fs/jffs2/scan.c ref->next = c->xref_temp; c 428 fs/jffs2/scan.c c->xref_temp = ref; c 430 fs/jffs2/scan.c jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref); c 442 fs/jffs2/scan.c static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 462 fs/jffs2/scan.c if (jffs2_cleanmarker_oob(c)) { c 465 fs/jffs2/scan.c if (mtd_block_isbad(c->mtd, jeb->offset)) c 468 fs/jffs2/scan.c ret = jffs2_check_nand_cleanmarker(c, jeb); c 489 fs/jffs2/scan.c sm = (void *)buf + c->sector_size - sizeof(*sm); c 492 fs/jffs2/scan.c sumlen = c->sector_size - je32_to_cpu(sm->offset); c 496 fs/jffs2/scan.c if (c->wbuf_pagesize) c 497 fs/jffs2/scan.c buf_len = c->wbuf_pagesize; c 502 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, c 503 fs/jffs2/scan.c jeb->offset + c->sector_size - buf_len, c 510 fs/jffs2/scan.c sumlen = c->sector_size - je32_to_cpu(sm->offset); c 514 fs/jffs2/scan.c if (sumlen > c->sector_size) c 527 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, sumptr, c 528 fs/jffs2/scan.c jeb->offset + c->sector_size - sumlen, c 541 fs/jffs2/scan.c err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random); c 559 fs/jffs2/scan.c buf_len = c->sector_size; c 561 fs/jffs2/scan.c buf_len = EMPTY_SCAN_SIZE(c->sector_size); c 562 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); c 569 fs/jffs2/scan.c max_ofs = EMPTY_SCAN_SIZE(c->sector_size); c 576 fs/jffs2/scan.c if (jffs2_cleanmarker_oob(c)) { c 578 fs/jffs2/scan.c int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); c 590 fs/jffs2/scan.c if (c->cleanmarker_size == 0) c 598 fs/jffs2/scan.c if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) c 600 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) c 612 fs/jffs2/scan.c while(ofs < jeb->offset + c->sector_size) { c 614 fs/jffs2/scan.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 617 fs/jffs2/scan.c err = jffs2_prealloc_raw_node_refs(c, jeb, 2); c 631 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, 4))) c 638 fs/jffs2/scan.c if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { c 641 fs/jffs2/scan.c jeb->offset, c->sector_size, ofs, c 643 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) c 649 fs/jffs2/scan.c buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); c 653 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); c 667 fs/jffs2/scan.c scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); c 676 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) c 690 fs/jffs2/scan.c if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && c 691 fs/jffs2/scan.c c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { c 693 fs/jffs2/scan.c EMPTY_SCAN_SIZE(c->sector_size)); c 702 fs/jffs2/scan.c buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); c 714 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); c 724 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, 4))) c 731 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, 4))) c 739 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, 4))) c 750 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, 4))) c 769 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, 4))) c 775 fs/jffs2/scan.c if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { c 780 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, 4))) c 790 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) c 799 fs/jffs2/scan.c buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); c 803 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); c 809 fs/jffs2/scan.c err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); c 816 fs/jffs2/scan.c buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); c 820 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); c 826 fs/jffs2/scan.c err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); c 834 fs/jffs2/scan.c buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); c 838 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); c 844 fs/jffs2/scan.c err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s); c 851 fs/jffs2/scan.c buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); c 855 fs/jffs2/scan.c err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); c 861 fs/jffs2/scan.c err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s); c 870 fs/jffs2/scan.c if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { c 873 fs/jffs2/scan.c c->cleanmarker_size); c 874 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) c 880 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) c 884 fs/jffs2/scan.c jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL); c 886 fs/jffs2/scan.c ofs += PAD(c->cleanmarker_size); c 893 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) c 903 fs/jffs2/scan.c c->flags |= JFFS2_SB_FLAG_RO; c 904 fs/jffs2/scan.c if (!(jffs2_is_readonly(c))) c 906 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) c 919 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) c 928 fs/jffs2/scan.c jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); c 954 fs/jffs2/scan.c c->dirty_size += jeb->wasted_size; c 955 fs/jffs2/scan.c c->wasted_size -= jeb->wasted_size; c 959 fs/jffs2/scan.c return jffs2_scan_classify_jeb(c, jeb); c 962 fs/jffs2/scan.c struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) c 966 fs/jffs2/scan.c ic = jffs2_get_ino_cache(c, ino); c 970 fs/jffs2/scan.c if (ino > c->highest_ino) c 971 fs/jffs2/scan.c c->highest_ino = ino; c 982 fs/jffs2/scan.c jffs2_add_ino_cache(c, ic); c 988 fs/jffs2/scan.c static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 1014 fs/jffs2/scan.c return jffs2_scan_dirty_space(c, jeb, c 1018 fs/jffs2/scan.c ic = jffs2_get_ino_cache(c, ino); c 1020 fs/jffs2/scan.c ic = jffs2_scan_make_ino_cache(c, ino); c 1026 fs/jffs2/scan.c jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); c 1042 fs/jffs2/scan.c static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 1061 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) c 1090 fs/jffs2/scan.c if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) c 1094 fs/jffs2/scan.c ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); c 1100 fs/jffs2/scan.c fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd), c 1108 fs/jffs2/scan.c jffs2_add_fd_to_list(c, fd, &ic->scan_dents); c 1141 fs/jffs2/scan.c void jffs2_rotate_lists(struct jffs2_sb_info *c) c 1146 fs/jffs2/scan.c x = count_list(&c->clean_list); c 1149 fs/jffs2/scan.c rotate_list((&c->clean_list), rotateby); c 1152 fs/jffs2/scan.c x = count_list(&c->very_dirty_list); c 1155 fs/jffs2/scan.c rotate_list((&c->very_dirty_list), rotateby); c 1158 fs/jffs2/scan.c x = count_list(&c->dirty_list); c 1161 fs/jffs2/scan.c rotate_list((&c->dirty_list), rotateby); c 1164 fs/jffs2/scan.c x = count_list(&c->erasable_list); c 1167 fs/jffs2/scan.c rotate_list((&c->erasable_list), rotateby); c 1170 fs/jffs2/scan.c if (c->nr_erasing_blocks) { c 1171 fs/jffs2/scan.c rotateby = pseudo_random % c->nr_erasing_blocks; c 1172 fs/jffs2/scan.c rotate_list((&c->erase_pending_list), rotateby); c 1175 fs/jffs2/scan.c if (c->nr_free_blocks) { c 1176 fs/jffs2/scan.c rotateby = pseudo_random % c->nr_free_blocks; c 1177 fs/jffs2/scan.c rotate_list((&c->free_list), rotateby); c 26 fs/jffs2/summary.c int jffs2_sum_init(struct jffs2_sb_info *c) c 28 fs/jffs2/summary.c uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); c 30 fs/jffs2/summary.c c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); c 32 fs/jffs2/summary.c if (!c->summary) { c 37 fs/jffs2/summary.c c->summary->sum_buf = kmalloc(sum_size, GFP_KERNEL); c 39 fs/jffs2/summary.c if (!c->summary->sum_buf) { c 41 fs/jffs2/summary.c kfree(c->summary); c 50 fs/jffs2/summary.c void jffs2_sum_exit(struct jffs2_sb_info *c) c 54 fs/jffs2/summary.c jffs2_sum_disable_collecting(c->summary); c 56 fs/jffs2/summary.c kfree(c->summary->sum_buf); c 57 fs/jffs2/summary.c c->summary->sum_buf = NULL; c 59 fs/jffs2/summary.c kfree(c->summary); c 60 fs/jffs2/summary.c c->summary = NULL; c 231 fs/jffs2/summary.c void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s) c 234 fs/jffs2/summary.c c->summary->sum_size, c->summary->sum_num, c 237 fs/jffs2/summary.c c->summary->sum_size = s->sum_size; c 238 fs/jffs2/summary.c c->summary->sum_num = s->sum_num; c 239 fs/jffs2/summary.c c->summary->sum_padded = s->sum_padded; c 240 fs/jffs2/summary.c c->summary->sum_list_head = s->sum_list_head; c 241 fs/jffs2/summary.c c->summary->sum_list_tail = s->sum_list_tail; c 248 fs/jffs2/summary.c int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, c 254 fs/jffs2/summary.c if (c->summary->sum_size == JFFS2_SUMMARY_NOSUM_SIZE) { c 260 fs/jffs2/summary.c jeb = &c->blocks[ofs / c->sector_size]; c 278 fs/jffs2/summary.c return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); c 312 fs/jffs2/summary.c return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); c 328 fs/jffs2/summary.c return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); c 339 fs/jffs2/summary.c return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); c 344 fs/jffs2/summary.c c->summary->sum_padded += je32_to_cpu(node->u.totlen); c 370 fs/jffs2/summary.c static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c, c 376 fs/jffs2/summary.c if ((ofs & ~3) > c->sector_size - jeb->free_size) { c 378 fs/jffs2/summary.c jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size)); c 381 fs/jffs2/summary.c return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic); c 386 fs/jffs2/summary.c static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 403 fs/jffs2/summary.c err = jffs2_prealloc_raw_node_refs(c, jeb, 2); c 418 fs/jffs2/summary.c ic = jffs2_scan_make_ino_cache(c, ino); c 424 fs/jffs2/summary.c sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED, c 467 fs/jffs2/summary.c ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); c 473 fs/jffs2/summary.c fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED, c 482 fs/jffs2/summary.c jffs2_add_fd_to_list(c, fd, &ic->scan_dents); c 501 fs/jffs2/summary.c xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), c 508 fs/jffs2/summary.c = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, c 514 fs/jffs2/summary.c sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, c 537 fs/jffs2/summary.c ref->next = c->xref_temp; c 538 fs/jffs2/summary.c c->xref_temp = ref; c 540 fs/jffs2/summary.c sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, c 556 fs/jffs2/summary.c c->wasted_size -= jeb->wasted_size; c 557 fs/jffs2/summary.c c->free_size += c->sector_size - jeb->free_size; c 558 fs/jffs2/summary.c c->used_size -= jeb->used_size; c 559 fs/jffs2/summary.c c->dirty_size -= jeb->dirty_size; c 561 fs/jffs2/summary.c jeb->free_size = c->sector_size; c 563 fs/jffs2/summary.c jffs2_free_jeb_node_refs(c, jeb); c 572 fs/jffs2/summary.c int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 580 fs/jffs2/summary.c ofs = c->sector_size - sumsize; c 620 fs/jffs2/summary.c ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); c 624 fs/jffs2/summary.c if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { c 626 fs/jffs2/summary.c je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); c 627 fs/jffs2/summary.c if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) c 632 fs/jffs2/summary.c if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) c 635 fs/jffs2/summary.c jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c 640 fs/jffs2/summary.c ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); c 649 fs/jffs2/summary.c ret = jffs2_prealloc_raw_node_refs(c, jeb, 2); c 653 fs/jffs2/summary.c sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL); c 659 fs/jffs2/summary.c c->wasted_size += jeb->free_size; c 660 fs/jffs2/summary.c c->free_size -= jeb->free_size; c 664 fs/jffs2/summary.c return jffs2_scan_classify_jeb(c, jeb); c 674 fs/jffs2/summary.c static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 688 fs/jffs2/summary.c jffs2_sum_disable_collecting(c->summary); c 698 fs/jffs2/summary.c jffs2_sum_disable_collecting(c->summary); c 706 fs/jffs2/summary.c memset(c->summary->sum_buf, 0xff, datasize); c 713 fs/jffs2/summary.c isum.padded = cpu_to_je32(c->summary->sum_padded); c 714 fs/jffs2/summary.c isum.cln_mkr = cpu_to_je32(c->cleanmarker_size); c 715 fs/jffs2/summary.c isum.sum_num = cpu_to_je32(c->summary->sum_num); c 716 fs/jffs2/summary.c wpage = c->summary->sum_buf; c 718 fs/jffs2/summary.c while (c->summary->sum_num) { c 719 fs/jffs2/summary.c temp = c->summary->sum_list_head; c 759 fs/jffs2/summary.c temp = c->summary->sum_list_head; c 772 fs/jffs2/summary.c temp = c->summary->sum_list_head; c 785 fs/jffs2/summary.c jffs2_sum_disable_collecting(c->summary); c 792 fs/jffs2/summary.c c->summary->sum_list_head = temp->u.next; c 795 fs/jffs2/summary.c c->summary->sum_num--; c 798 fs/jffs2/summary.c jffs2_sum_reset_collected(c->summary); c 803 fs/jffs2/summary.c sm->offset = cpu_to_je32(c->sector_size - jeb->free_size); c 806 fs/jffs2/summary.c isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize)); c 811 fs/jffs2/summary.c vecs[1].iov_base = c->summary->sum_buf; c 814 fs/jffs2/summary.c sum_ofs = jeb->offset + c->sector_size - jeb->free_size; c 818 fs/jffs2/summary.c ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); c 827 fs/jffs2/summary.c spin_lock(&c->erase_completion_lock); c 828 fs/jffs2/summary.c jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL); c 829 fs/jffs2/summary.c spin_unlock(&c->erase_completion_lock); c 832 fs/jffs2/summary.c c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; c 837 fs/jffs2/summary.c spin_lock(&c->erase_completion_lock); c 838 fs/jffs2/summary.c jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL); c 839 fs/jffs2/summary.c spin_unlock(&c->erase_completion_lock); c 846 fs/jffs2/summary.c int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) c 847 fs/jffs2/summary.c __must_hold(&c->erase_completion_block) c 855 fs/jffs2/summary.c spin_unlock(&c->erase_completion_lock); c 857 fs/jffs2/summary.c jeb = c->nextblock; c 858 fs/jffs2/summary.c jffs2_prealloc_raw_node_refs(c, jeb, 1); c 860 fs/jffs2/summary.c if (!c->summary->sum_num || !c->summary->sum_list_head) { c 865 fs/jffs2/summary.c datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker); c 871 fs/jffs2/summary.c ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); c 872 fs/jffs2/summary.c spin_lock(&c->erase_completion_lock); c 175 fs/jffs2/summary.h int jffs2_sum_init(struct jffs2_sb_info *c); c 176 fs/jffs2/summary.h void jffs2_sum_exit(struct jffs2_sb_info *c); c 180 fs/jffs2/summary.h void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s); c 181 fs/jffs2/summary.h int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, c 183 fs/jffs2/summary.h int jffs2_sum_write_sumnode(struct jffs2_sb_info *c); c 189 fs/jffs2/summary.h int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, c 201 fs/jffs2/summary.h #define jffs2_sum_add_kvec(a,b,c,d) (0) c 205 fs/jffs2/summary.h #define jffs2_sum_add_inode_mem(a,b,c) c 206 fs/jffs2/summary.h #define jffs2_sum_add_dirent_mem(a,b,c) c 207 fs/jffs2/summary.h #define jffs2_sum_add_xattr_mem(a,b,c) c 208 fs/jffs2/summary.h #define jffs2_sum_add_xref_mem(a,b,c) c 209 fs/jffs2/summary.h #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0) c 86 fs/jffs2/super.c struct jffs2_sb_info *c = JFFS2_SB_INFO(root->d_sb); c 87 fs/jffs2/super.c struct jffs2_mount_opts *opts = &c->mount_opts; c 99 fs/jffs2/super.c struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); c 102 fs/jffs2/super.c if (jffs2_is_writebuffered(c)) c 103 fs/jffs2/super.c cancel_delayed_work_sync(&c->wbuf_dwork); c 106 fs/jffs2/super.c mutex_lock(&c->alloc_sem); c 107 fs/jffs2/super.c jffs2_flush_wbuf_pad(c); c 108 fs/jffs2/super.c mutex_unlock(&c->alloc_sem); c 196 fs/jffs2/super.c struct jffs2_sb_info *c = fc->s_fs_info; c 205 fs/jffs2/super.c c->mount_opts.compr = result.uint_32; c 206 fs/jffs2/super.c c->mount_opts.override_compr = true; c 212 fs/jffs2/super.c if (opt > c->mtd->size) c 214 fs/jffs2/super.c c->mtd->size / 1024); c 215 fs/jffs2/super.c c->mount_opts.rp_size = opt; c 249 fs/jffs2/super.c struct jffs2_sb_info *c = sb->s_fs_info; c 255 fs/jffs2/super.c c->mtd = sb->s_mtd; c 256 fs/jffs2/super.c c->os_priv = sb; c 260 fs/jffs2/super.c mutex_init(&c->alloc_sem); c 261 fs/jffs2/super.c mutex_init(&c->erase_free_sem); c 262 fs/jffs2/super.c init_waitqueue_head(&c->erase_wait); c 263 fs/jffs2/super.c init_waitqueue_head(&c->inocache_wq); c 264 fs/jffs2/super.c spin_lock_init(&c->erase_completion_lock); c 265 fs/jffs2/super.c spin_lock_init(&c->inocache_lock); c 309 fs/jffs2/super.c struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); c 313 fs/jffs2/super.c mutex_lock(&c->alloc_sem); c 314 fs/jffs2/super.c jffs2_flush_wbuf_pad(c); c 315 fs/jffs2/super.c mutex_unlock(&c->alloc_sem); c 317 fs/jffs2/super.c jffs2_sum_exit(c); c 319 fs/jffs2/super.c jffs2_free_ino_caches(c); c 320 fs/jffs2/super.c jffs2_free_raw_node_refs(c); c 321 fs/jffs2/super.c kvfree(c->blocks); c 322 fs/jffs2/super.c jffs2_flash_cleanup(c); c 323 fs/jffs2/super.c kfree(c->inocache_list); c 324 fs/jffs2/super.c jffs2_clear_xattr_subsystem(c); c 325 fs/jffs2/super.c mtd_sync(c->mtd); c 331 fs/jffs2/super.c struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); c 332 fs/jffs2/super.c if (c && !sb_rdonly(sb)) c 333 fs/jffs2/super.c jffs2_stop_garbage_collect_thread(c); c 335 fs/jffs2/super.c kfree(c); c 35 fs/jffs2/wbuf.c #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) c 36 fs/jffs2/wbuf.c #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) c 48 fs/jffs2/wbuf.c static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino) c 50 fs/jffs2/wbuf.c struct jffs2_inodirty *this = c->wbuf_inodes; c 69 fs/jffs2/wbuf.c static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c) c 73 fs/jffs2/wbuf.c this = c->wbuf_inodes; c 82 fs/jffs2/wbuf.c c->wbuf_inodes = NULL; c 85 fs/jffs2/wbuf.c static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) c 90 fs/jffs2/wbuf.c jffs2_dirty_trigger(c); c 92 fs/jffs2/wbuf.c if (jffs2_wbuf_pending_for_ino(c, ino)) c 98 fs/jffs2/wbuf.c jffs2_clear_wbuf_ino_list(c); c 99 fs/jffs2/wbuf.c c->wbuf_inodes = &inodirty_nomem; c 103 fs/jffs2/wbuf.c new->next = c->wbuf_inodes; c 104 fs/jffs2/wbuf.c c->wbuf_inodes = new; c 108 fs/jffs2/wbuf.c static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) c 113 fs/jffs2/wbuf.c if (list_empty(&c->erasable_pending_wbuf_list)) c 116 fs/jffs2/wbuf.c list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { c 126 fs/jffs2/wbuf.c list_add_tail(&jeb->list, &c->erase_pending_list); c 127 fs/jffs2/wbuf.c c->nr_erasing_blocks++; c 128 fs/jffs2/wbuf.c jffs2_garbage_collect_trigger(c); c 133 fs/jffs2/wbuf.c list_add_tail(&jeb->list, &c->erasable_list); c 141 fs/jffs2/wbuf.c static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) c 146 fs/jffs2/wbuf.c if (c->nextblock == jeb) c 147 fs/jffs2/wbuf.c c->nextblock = NULL; c 153 fs/jffs2/wbuf.c list_add(&jeb->list, &c->bad_used_list); c 159 fs/jffs2/wbuf.c list_add(&jeb->list, &c->erase_pending_list); c 160 fs/jffs2/wbuf.c c->nr_erasing_blocks++; c 161 fs/jffs2/wbuf.c jffs2_garbage_collect_trigger(c); c 164 fs/jffs2/wbuf.c if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) { c 167 fs/jffs2/wbuf.c jffs2_link_node_ref(c, jeb, c 168 fs/jffs2/wbuf.c (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE, c 171 fs/jffs2/wbuf.c c->wasted_size += oldfree; c 173 fs/jffs2/wbuf.c c->dirty_size -= oldfree; c 177 fs/jffs2/wbuf.c jffs2_dbg_dump_block_lists_nolock(c); c 178 fs/jffs2/wbuf.c jffs2_dbg_acct_sanity_check_nolock(c,jeb); c 179 fs/jffs2/wbuf.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 182 fs/jffs2/wbuf.c static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c, c 230 fs/jffs2/wbuf.c static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, c 237 fs/jffs2/wbuf.c ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); c 240 fs/jffs2/wbuf.c __func__, c->wbuf_ofs, ret); c 242 fs/jffs2/wbuf.c } else if (retlen != c->wbuf_pagesize) { c 244 fs/jffs2/wbuf.c __func__, ofs, retlen, c->wbuf_pagesize); c 247 fs/jffs2/wbuf.c if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) c 258 fs/jffs2/wbuf.c eccstr, c->wbuf_ofs); c 260 fs/jffs2/wbuf.c c->wbuf, c->wbuf_pagesize, 0); c 264 fs/jffs2/wbuf.c c->wbuf_verify, c->wbuf_pagesize, 0); c 269 fs/jffs2/wbuf.c #define jffs2_verify_write(c,b,o) (0) c 275 fs/jffs2/wbuf.c static void jffs2_wbuf_recover(struct jffs2_sb_info *c) c 285 fs/jffs2/wbuf.c jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; c 287 fs/jffs2/wbuf.c spin_lock(&c->erase_completion_lock); c 288 fs/jffs2/wbuf.c if (c->wbuf_ofs % c->mtd->erasesize) c 289 fs/jffs2/wbuf.c jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); c 291 fs/jffs2/wbuf.c jffs2_block_refile(c, jeb, REFILE_ANYWAY); c 292 fs/jffs2/wbuf.c spin_unlock(&c->erase_completion_lock); c 302 fs/jffs2/wbuf.c (next && ref_offset(next) <= c->wbuf_ofs)) { c 305 fs/jffs2/wbuf.c (ref_offset(raw) + ref_totlen(c, jeb, raw)), c 306 fs/jffs2/wbuf.c c->wbuf_ofs); c 311 fs/jffs2/wbuf.c (ref_offset(raw) + ref_totlen(c, jeb, raw))); c 320 fs/jffs2/wbuf.c c->wbuf_len = 0; c 336 fs/jffs2/wbuf.c if (start < c->wbuf_ofs) { c 348 fs/jffs2/wbuf.c ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, c 353 fs/jffs2/wbuf.c (retlen == c->wbuf_ofs - start)) c 356 fs/jffs2/wbuf.c if (ret || retlen != c->wbuf_ofs - start) { c 371 fs/jffs2/wbuf.c c->wbuf_len = 0; c 382 fs/jffs2/wbuf.c memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); c 389 fs/jffs2/wbuf.c ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); c 397 fs/jffs2/wbuf.c jffs2_sum_disable_collecting(c->summary); c 399 fs/jffs2/wbuf.c ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); c 406 fs/jffs2/wbuf.c ofs = write_ofs(c); c 408 fs/jffs2/wbuf.c if (end-start >= c->wbuf_pagesize) { c 414 fs/jffs2/wbuf.c unsigned char *rewrite_buf = buf?:c->wbuf; c 415 fs/jffs2/wbuf.c uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); c 425 fs/jffs2/wbuf.c mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); c 429 fs/jffs2/wbuf.c ret = mtd_write(c->mtd, ofs, towrite, &retlen, c 432 fs/jffs2/wbuf.c if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { c 438 fs/jffs2/wbuf.c jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL); c 444 fs/jffs2/wbuf.c c->wbuf_len = (end - start) - towrite; c 445 fs/jffs2/wbuf.c c->wbuf_ofs = ofs + towrite; c 446 fs/jffs2/wbuf.c memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); c 451 fs/jffs2/wbuf.c memcpy(c->wbuf, buf, end-start); c 453 fs/jffs2/wbuf.c memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); c 455 fs/jffs2/wbuf.c c->wbuf_ofs = ofs; c 456 fs/jffs2/wbuf.c c->wbuf_len = end - start; c 460 fs/jffs2/wbuf.c new_jeb = &c->blocks[ofs / c->sector_size]; c 462 fs/jffs2/wbuf.c spin_lock(&c->erase_completion_lock); c 464 fs/jffs2/wbuf.c uint32_t rawlen = ref_totlen(c, jeb, raw); c 505 fs/jffs2/wbuf.c f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink); c 517 fs/jffs2/wbuf.c adjust_ref = jffs2_incore_replace_raw(c, f, raw, c 518 fs/jffs2/wbuf.c (void *)(buf?:c->wbuf) + (ref_offset(raw) - start)); c 527 fs/jffs2/wbuf.c new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic); c 534 fs/jffs2/wbuf.c jffs2_gc_release_inode(c, f); c 539 fs/jffs2/wbuf.c c->dirty_size += rawlen; c 540 fs/jffs2/wbuf.c c->used_size -= rawlen; c 553 fs/jffs2/wbuf.c list_move(&jeb->list, &c->erase_pending_list); c 554 fs/jffs2/wbuf.c c->nr_erasing_blocks++; c 555 fs/jffs2/wbuf.c jffs2_garbage_collect_trigger(c); c 558 fs/jffs2/wbuf.c jffs2_dbg_acct_sanity_check_nolock(c, jeb); c 559 fs/jffs2/wbuf.c jffs2_dbg_acct_paranoia_check_nolock(c, jeb); c 561 fs/jffs2/wbuf.c jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); c 562 fs/jffs2/wbuf.c jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); c 564 fs/jffs2/wbuf.c spin_unlock(&c->erase_completion_lock); c 567 fs/jffs2/wbuf.c c->wbuf_ofs, c->wbuf_len); c 580 fs/jffs2/wbuf.c static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) c 588 fs/jffs2/wbuf.c if (!jffs2_is_writebuffered(c)) c 591 fs/jffs2/wbuf.c if (!mutex_is_locked(&c->alloc_sem)) { c 596 fs/jffs2/wbuf.c if (!c->wbuf_len) /* already checked c->wbuf above */ c 599 fs/jffs2/wbuf.c wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; c 600 fs/jffs2/wbuf.c if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1)) c 610 fs/jffs2/wbuf.c c->wbuf_len = PAD(c->wbuf_len); c 614 fs/jffs2/wbuf.c memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); c 616 fs/jffs2/wbuf.c if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { c 617 fs/jffs2/wbuf.c struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); c 620 fs/jffs2/wbuf.c padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len); c 630 fs/jffs2/wbuf.c pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs); c 632 fs/jffs2/wbuf.c mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c 638 fs/jffs2/wbuf.c ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, c 639 fs/jffs2/wbuf.c &retlen, c->wbuf); c 644 fs/jffs2/wbuf.c } else if (retlen != c->wbuf_pagesize) { c 646 fs/jffs2/wbuf.c retlen, c->wbuf_pagesize); c 649 fs/jffs2/wbuf.c } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { c 651 fs/jffs2/wbuf.c jffs2_wbuf_recover(c); c 658 fs/jffs2/wbuf.c uint32_t waste = c->wbuf_pagesize - c->wbuf_len; c 661 fs/jffs2/wbuf.c (wbuf_jeb == c->nextblock) ? "next" : "", c 669 fs/jffs2/wbuf.c c->wbuf_ofs, c->wbuf_len, waste); c 675 fs/jffs2/wbuf.c spin_lock(&c->erase_completion_lock); c 677 fs/jffs2/wbuf.c jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL); c 680 fs/jffs2/wbuf.c c->dirty_size -= waste; c 682 fs/jffs2/wbuf.c c->wasted_size += waste; c 684 fs/jffs2/wbuf.c spin_lock(&c->erase_completion_lock); c 687 fs/jffs2/wbuf.c jffs2_refile_wbuf_blocks(c); c 688 fs/jffs2/wbuf.c jffs2_clear_wbuf_ino_list(c); c 689 fs/jffs2/wbuf.c spin_unlock(&c->erase_completion_lock); c 691 fs/jffs2/wbuf.c memset(c->wbuf,0xff,c->wbuf_pagesize); c 693 fs/jffs2/wbuf.c c->wbuf_ofs += c->wbuf_pagesize; c 694 fs/jffs2/wbuf.c c->wbuf_len = 0; c 702 fs/jffs2/wbuf.c int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) c 710 fs/jffs2/wbuf.c if (!c->wbuf) c 713 fs/jffs2/wbuf.c mutex_lock(&c->alloc_sem); c 714 fs/jffs2/wbuf.c if (!jffs2_wbuf_pending_for_ino(c, ino)) { c 716 fs/jffs2/wbuf.c mutex_unlock(&c->alloc_sem); c 720 fs/jffs2/wbuf.c old_wbuf_ofs = c->wbuf_ofs; c 721 fs/jffs2/wbuf.c old_wbuf_len = c->wbuf_len; c 723 fs/jffs2/wbuf.c if (c->unchecked_size) { c 727 fs/jffs2/wbuf.c down_write(&c->wbuf_sem); c 728 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); c 732 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); c 733 fs/jffs2/wbuf.c up_write(&c->wbuf_sem); c 735 fs/jffs2/wbuf.c old_wbuf_ofs == c->wbuf_ofs) { c 737 fs/jffs2/wbuf.c mutex_unlock(&c->alloc_sem); c 741 fs/jffs2/wbuf.c ret = jffs2_garbage_collect_pass(c); c 744 fs/jffs2/wbuf.c mutex_lock(&c->alloc_sem); c 745 fs/jffs2/wbuf.c down_write(&c->wbuf_sem); c 746 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); c 750 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); c 751 fs/jffs2/wbuf.c up_write(&c->wbuf_sem); c 754 fs/jffs2/wbuf.c mutex_lock(&c->alloc_sem); c 759 fs/jffs2/wbuf.c mutex_unlock(&c->alloc_sem); c 764 fs/jffs2/wbuf.c int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) c 768 fs/jffs2/wbuf.c if (!c->wbuf) c 771 fs/jffs2/wbuf.c down_write(&c->wbuf_sem); c 772 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); c 775 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); c 776 fs/jffs2/wbuf.c up_write(&c->wbuf_sem); c 781 fs/jffs2/wbuf.c static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf, c 784 fs/jffs2/wbuf.c if (len && !c->wbuf_len && (len >= c->wbuf_pagesize)) c 787 fs/jffs2/wbuf.c if (len > (c->wbuf_pagesize - c->wbuf_len)) c 788 fs/jffs2/wbuf.c len = c->wbuf_pagesize - c->wbuf_len; c 789 fs/jffs2/wbuf.c memcpy(c->wbuf + c->wbuf_len, buf, len); c 790 fs/jffs2/wbuf.c c->wbuf_len += (uint32_t) len; c 794 fs/jffs2/wbuf.c int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, c 804 fs/jffs2/wbuf.c if (!jffs2_is_writebuffered(c)) c 805 fs/jffs2/wbuf.c return jffs2_flash_direct_writev(c, invecs, count, to, retlen); c 807 fs/jffs2/wbuf.c down_write(&c->wbuf_sem); c 810 fs/jffs2/wbuf.c if (c->wbuf_ofs == 0xFFFFFFFF) { c 811 fs/jffs2/wbuf.c c->wbuf_ofs = PAGE_DIV(to); c 812 fs/jffs2/wbuf.c c->wbuf_len = PAGE_MOD(to); c 813 fs/jffs2/wbuf.c memset(c->wbuf,0xff,c->wbuf_pagesize); c 823 fs/jffs2/wbuf.c if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { c 825 fs/jffs2/wbuf.c if (c->wbuf_len) { c 827 fs/jffs2/wbuf.c __func__, (unsigned long)to, c->wbuf_ofs); c 828 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); c 833 fs/jffs2/wbuf.c c->wbuf_ofs = PAGE_DIV(to); c 834 fs/jffs2/wbuf.c c->wbuf_len = PAGE_MOD(to); c 837 fs/jffs2/wbuf.c if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { c 841 fs/jffs2/wbuf.c if (c->wbuf_len) c 843 fs/jffs2/wbuf.c c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len); c 848 fs/jffs2/wbuf.c if (c->wbuf_len != PAGE_MOD(to)) { c 849 fs/jffs2/wbuf.c c->wbuf_len = PAGE_MOD(to); c 851 fs/jffs2/wbuf.c if (!c->wbuf_len) { c 852 fs/jffs2/wbuf.c c->wbuf_len = c->wbuf_pagesize; c 853 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, NOPAD); c 863 fs/jffs2/wbuf.c wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); c 865 fs/jffs2/wbuf.c if (c->wbuf_len == c->wbuf_pagesize) { c 866 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, NOPAD); c 875 fs/jffs2/wbuf.c if (vlen >= c->wbuf_pagesize) { c 876 fs/jffs2/wbuf.c ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen), c 883 fs/jffs2/wbuf.c c->wbuf_ofs = outvec_to; c 888 fs/jffs2/wbuf.c wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); c 889 fs/jffs2/wbuf.c if (c->wbuf_len == c->wbuf_pagesize) { c 890 fs/jffs2/wbuf.c ret = __jffs2_flush_wbuf(c, NOPAD); c 906 fs/jffs2/wbuf.c int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); c 911 fs/jffs2/wbuf.c if (c->wbuf_len && ino) c 912 fs/jffs2/wbuf.c jffs2_wbuf_dirties_inode(c, ino); c 915 fs/jffs2/wbuf.c up_write(&c->wbuf_sem); c 924 fs/jffs2/wbuf.c spin_lock(&c->erase_completion_lock); c 926 fs/jffs2/wbuf.c jeb = &c->blocks[outvec_to / c->sector_size]; c 927 fs/jffs2/wbuf.c jffs2_block_refile(c, jeb, REFILE_ANYWAY); c 929 fs/jffs2/wbuf.c spin_unlock(&c->erase_completion_lock); c 933 fs/jffs2/wbuf.c up_write(&c->wbuf_sem); c 941 fs/jffs2/wbuf.c int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, c 946 fs/jffs2/wbuf.c if (!jffs2_is_writebuffered(c)) c 947 fs/jffs2/wbuf.c return jffs2_flash_direct_write(c, ofs, len, retlen, buf); c 951 fs/jffs2/wbuf.c return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0); c 957 fs/jffs2/wbuf.c int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf) c 962 fs/jffs2/wbuf.c if (!jffs2_is_writebuffered(c)) c 963 fs/jffs2/wbuf.c return mtd_read(c->mtd, ofs, len, retlen, buf); c 966 fs/jffs2/wbuf.c down_read(&c->wbuf_sem); c 967 fs/jffs2/wbuf.c ret = mtd_read(c->mtd, ofs, len, retlen, buf); c 987 fs/jffs2/wbuf.c if (!c->wbuf_pagesize || !c->wbuf_len) c 991 fs/jffs2/wbuf.c if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs)) c 994 fs/jffs2/wbuf.c if (ofs >= c->wbuf_ofs) { c 995 fs/jffs2/wbuf.c owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */ c 996 fs/jffs2/wbuf.c if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ c 998 fs/jffs2/wbuf.c lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ c 1002 fs/jffs2/wbuf.c orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ c 1006 fs/jffs2/wbuf.c if (lwbf > c->wbuf_len) c 1007 fs/jffs2/wbuf.c lwbf = c->wbuf_len; c 1010 fs/jffs2/wbuf.c memcpy(buf+orbf,c->wbuf+owbf,lwbf); c 1013 fs/jffs2/wbuf.c up_read(&c->wbuf_sem); c 1033 fs/jffs2/wbuf.c int jffs2_check_oob_empty(struct jffs2_sb_info *c, c 1037 fs/jffs2/wbuf.c int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); c 1041 fs/jffs2/wbuf.c ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; c 1042 fs/jffs2/wbuf.c ops.oobbuf = c->oobbuf; c 1046 fs/jffs2/wbuf.c ret = mtd_read_oob(c->mtd, jeb->offset, &ops); c 1076 fs/jffs2/wbuf.c int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, c 1080 fs/jffs2/wbuf.c int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); c 1084 fs/jffs2/wbuf.c ops.oobbuf = c->oobbuf; c 1088 fs/jffs2/wbuf.c ret = mtd_read_oob(c->mtd, jeb->offset, &ops); c 1097 fs/jffs2/wbuf.c return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen); c 1100 fs/jffs2/wbuf.c int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, c 1105 fs/jffs2/wbuf.c int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); c 1113 fs/jffs2/wbuf.c ret = mtd_write_oob(c->mtd, jeb->offset, &ops); c 1133 fs/jffs2/wbuf.c int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) c 1142 fs/jffs2/wbuf.c ret = mtd_block_markbad(c->mtd, bad_offset); c 1162 fs/jffs2/wbuf.c struct jffs2_sb_info *c = work_to_sb(work); c 1163 fs/jffs2/wbuf.c struct super_block *sb = OFNI_BS_2SFFJ(c); c 1167 fs/jffs2/wbuf.c jffs2_flush_wbuf_gc(c, 0); c 1171 fs/jffs2/wbuf.c void jffs2_dirty_trigger(struct jffs2_sb_info *c) c 1173 fs/jffs2/wbuf.c struct super_block *sb = OFNI_BS_2SFFJ(c); c 1180 fs/jffs2/wbuf.c if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay)) c 1184 fs/jffs2/wbuf.c int jffs2_nand_flash_setup(struct jffs2_sb_info *c) c 1186 fs/jffs2/wbuf.c if (!c->mtd->oobsize) c 1190 fs/jffs2/wbuf.c c->cleanmarker_size = 0; c 1192 fs/jffs2/wbuf.c if (c->mtd->oobavail == 0) { c 1199 fs/jffs2/wbuf.c c->oobavail = c->mtd->oobavail; c 1202 fs/jffs2/wbuf.c init_rwsem(&c->wbuf_sem); c 1203 fs/jffs2/wbuf.c INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c 1204 fs/jffs2/wbuf.c c->wbuf_pagesize = c->mtd->writesize; c 1205 fs/jffs2/wbuf.c c->wbuf_ofs = 0xFFFFFFFF; c 1207 fs/jffs2/wbuf.c c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); c 1208 fs/jffs2/wbuf.c if (!c->wbuf) c 1211 fs/jffs2/wbuf.c c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL); c 1212 fs/jffs2/wbuf.c if (!c->oobbuf) { c 1213 fs/jffs2/wbuf.c kfree(c->wbuf); c 1218 fs/jffs2/wbuf.c c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); c 1219 fs/jffs2/wbuf.c if (!c->wbuf_verify) { c 1220 fs/jffs2/wbuf.c kfree(c->oobbuf); c 1221 fs/jffs2/wbuf.c kfree(c->wbuf); c 1228 fs/jffs2/wbuf.c void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) c 1231 fs/jffs2/wbuf.c kfree(c->wbuf_verify); c 1233 fs/jffs2/wbuf.c kfree(c->wbuf); c 1234 fs/jffs2/wbuf.c kfree(c->oobbuf); c 1237 fs/jffs2/wbuf.c int jffs2_dataflash_setup(struct jffs2_sb_info *c) { c 1238 fs/jffs2/wbuf.c c->cleanmarker_size = 0; /* No cleanmarkers needed */ c 1241 fs/jffs2/wbuf.c init_rwsem(&c->wbuf_sem); c 1242 fs/jffs2/wbuf.c INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c 1243 fs/jffs2/wbuf.c c->wbuf_pagesize = c->mtd->erasesize; c 1253 fs/jffs2/wbuf.c c->sector_size = 8 * c->mtd->erasesize; c 1255 fs/jffs2/wbuf.c while (c->sector_size < 8192) { c 1256 fs/jffs2/wbuf.c c->sector_size *= 2; c 1260 fs/jffs2/wbuf.c c->flash_size = c->mtd->size; c 1262 fs/jffs2/wbuf.c if ((c->flash_size % c->sector_size) != 0) { c 1263 fs/jffs2/wbuf.c c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; c 1264 fs/jffs2/wbuf.c pr_warn("flash size adjusted to %dKiB\n", c->flash_size); c 1267 fs/jffs2/wbuf.c c->wbuf_ofs = 0xFFFFFFFF; c 1268 fs/jffs2/wbuf.c c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); c 1269 fs/jffs2/wbuf.c if (!c->wbuf) c 1273 fs/jffs2/wbuf.c c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); c 1274 fs/jffs2/wbuf.c if (!c->wbuf_verify) { c 1275 fs/jffs2/wbuf.c kfree(c->wbuf); c 1281 fs/jffs2/wbuf.c c->wbuf_pagesize, c->sector_size); c 1286 fs/jffs2/wbuf.c void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) { c 1288 fs/jffs2/wbuf.c kfree(c->wbuf_verify); c 1290 fs/jffs2/wbuf.c kfree(c->wbuf); c 1293 fs/jffs2/wbuf.c int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { c 1296 fs/jffs2/wbuf.c c->cleanmarker_size = max(16u, c->mtd->writesize); c 1299 fs/jffs2/wbuf.c init_rwsem(&c->wbuf_sem); c 1300 fs/jffs2/wbuf.c INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c 1302 fs/jffs2/wbuf.c c->wbuf_pagesize = c->mtd->writesize; c 1303 fs/jffs2/wbuf.c c->wbuf_ofs = 0xFFFFFFFF; c 1305 fs/jffs2/wbuf.c c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); c 1306 fs/jffs2/wbuf.c if (!c->wbuf) c 1310 fs/jffs2/wbuf.c c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); c 1311 fs/jffs2/wbuf.c if (!c->wbuf_verify) { c 1312 fs/jffs2/wbuf.c kfree(c->wbuf); c 1319 fs/jffs2/wbuf.c void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { c 1321 fs/jffs2/wbuf.c kfree(c->wbuf_verify); c 1323 fs/jffs2/wbuf.c kfree(c->wbuf); c 1326 fs/jffs2/wbuf.c int jffs2_ubivol_setup(struct jffs2_sb_info *c) { c 1327 fs/jffs2/wbuf.c c->cleanmarker_size = 0; c 1329 fs/jffs2/wbuf.c if (c->mtd->writesize == 1) c 1333 fs/jffs2/wbuf.c init_rwsem(&c->wbuf_sem); c 1334 fs/jffs2/wbuf.c INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c 1336 fs/jffs2/wbuf.c c->wbuf_pagesize = c->mtd->writesize; c 1337 fs/jffs2/wbuf.c c->wbuf_ofs = 0xFFFFFFFF; c 1338 fs/jffs2/wbuf.c c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); c 1339 fs/jffs2/wbuf.c if (!c->wbuf) c 1343 fs/jffs2/wbuf.c c->wbuf_pagesize, c->sector_size); c 1348 fs/jffs2/wbuf.c void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) { c 1349 fs/jffs2/wbuf.c kfree(c->wbuf); c 23 fs/jffs2/write.c int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 40 fs/jffs2/write.c jffs2_add_ino_cache(c, f->inocache); c 59 fs/jffs2/write.c struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 96 fs/jffs2/write.c flash_ofs = write_ofs(c); c 98 fs/jffs2/write.c jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); c 109 fs/jffs2/write.c ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen, c 123 fs/jffs2/write.c jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); c 131 fs/jffs2/write.c struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; c 137 fs/jffs2/write.c jffs2_dbg_acct_sanity_check(c,jeb); c 138 fs/jffs2/write.c jffs2_dbg_acct_paranoia_check(c, jeb); c 141 fs/jffs2/write.c ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy, c 146 fs/jffs2/write.c jffs2_complete_reservation(c); c 148 fs/jffs2/write.c ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy, c 154 fs/jffs2/write.c flash_ofs = write_ofs(c); c 158 fs/jffs2/write.c jffs2_dbg_acct_sanity_check(c,jeb); c 159 fs/jffs2/write.c jffs2_dbg_acct_paranoia_check(c, jeb); c 182 fs/jffs2/write.c fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); c 199 fs/jffs2/write.c jffs2_dbg_acct_sanity_check(c,NULL); c 205 fs/jffs2/write.c struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 254 fs/jffs2/write.c flash_ofs = write_ofs(c); c 256 fs/jffs2/write.c jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); c 268 fs/jffs2/write.c ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, c 275 fs/jffs2/write.c jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); c 283 fs/jffs2/write.c struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; c 289 fs/jffs2/write.c jffs2_dbg_acct_sanity_check(c,jeb); c 290 fs/jffs2/write.c jffs2_dbg_acct_paranoia_check(c, jeb); c 293 fs/jffs2/write.c ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy, c 298 fs/jffs2/write.c jffs2_complete_reservation(c); c 300 fs/jffs2/write.c ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy, c 306 fs/jffs2/write.c flash_ofs = write_ofs(c); c 309 fs/jffs2/write.c jffs2_dbg_acct_sanity_check(c,jeb); c 310 fs/jffs2/write.c jffs2_dbg_acct_paranoia_check(c, jeb); c 321 fs/jffs2/write.c fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | dirent_node_state(rd), c 331 fs/jffs2/write.c jffs2_dbg_acct_sanity_check(c,NULL); c 340 fs/jffs2/write.c int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, c 362 fs/jffs2/write.c ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, c 373 fs/jffs2/write.c comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); c 391 fs/jffs2/write.c fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY); c 398 fs/jffs2/write.c jffs2_complete_reservation(c); c 407 fs/jffs2/write.c ret = jffs2_add_full_dnode_to_inode(c, f, fn); c 409 fs/jffs2/write.c jffs2_mark_node_obsolete(c, f->metadata->raw); c 417 fs/jffs2/write.c jffs2_mark_node_obsolete(c, fn->raw); c 421 fs/jffs2/write.c jffs2_complete_reservation(c); c 425 fs/jffs2/write.c jffs2_complete_reservation(c); c 441 fs/jffs2/write.c int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, c 454 fs/jffs2/write.c ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, c 465 fs/jffs2/write.c fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); c 474 fs/jffs2/write.c jffs2_complete_reservation(c); c 483 fs/jffs2/write.c jffs2_complete_reservation(c); c 492 fs/jffs2/write.c ret = jffs2_reserve_space(c, sizeof(*rd)+qstr->len, &alloclen, c 504 fs/jffs2/write.c jffs2_complete_reservation(c); c 524 fs/jffs2/write.c fd = jffs2_write_dirent(c, dir_f, rd, qstr->name, qstr->len, ALLOC_NORMAL); c 531 fs/jffs2/write.c jffs2_complete_reservation(c); c 538 fs/jffs2/write.c jffs2_add_fd_to_list(c, fd, &dir_f->dents); c 540 fs/jffs2/write.c jffs2_complete_reservation(c); c 547 fs/jffs2/write.c int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, c 556 fs/jffs2/write.c if (!jffs2_can_mark_obsolete(c)) { c 563 fs/jffs2/write.c ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, c 587 fs/jffs2/write.c fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION); c 592 fs/jffs2/write.c jffs2_complete_reservation(c); c 598 fs/jffs2/write.c jffs2_add_fd_to_list(c, fd, &dir_f->dents); c 606 fs/jffs2/write.c mutex_lock(&c->alloc_sem); c 616 fs/jffs2/write.c jffs2_mark_node_obsolete(c, fd->raw); c 653 fs/jffs2/write.c jffs2_mark_node_obsolete(c, fd->raw); c 663 fs/jffs2/write.c jffs2_complete_reservation(c); c 669 fs/jffs2/write.c int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time) c 680 fs/jffs2/write.c ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, c 706 fs/jffs2/write.c fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL); c 711 fs/jffs2/write.c jffs2_complete_reservation(c); c 717 fs/jffs2/write.c jffs2_add_fd_to_list(c, fd, &dir_f->dents); c 719 fs/jffs2/write.c jffs2_complete_reservation(c); c 16 fs/jffs2/writev.c int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, c 19 fs/jffs2/writev.c if (!jffs2_is_writebuffered(c)) { c 22 fs/jffs2/writev.c res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to); c 29 fs/jffs2/writev.c return mtd_writev(c->mtd, vecs, count, to, retlen); c 32 fs/jffs2/writev.c int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, c 36 fs/jffs2/writev.c ret = mtd_write(c->mtd, ofs, len, retlen, buf); c 45 fs/jffs2/writev.c res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs); c 71 fs/jffs2/xattr.c static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 76 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 83 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 87 fs/jffs2/xattr.c static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 92 fs/jffs2/xattr.c c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len); c 102 fs/jffs2/xattr.c static void reclaim_xattr_datum(struct jffs2_sb_info *c) c 110 fs/jffs2/xattr.c if (c->xdatum_mem_threshold > c->xdatum_mem_usage) c 113 fs/jffs2/xattr.c before = c->xdatum_mem_usage; c 114 fs/jffs2/xattr.c target = c->xdatum_mem_usage * 4 / 5; /* 20% reduction */ c 116 fs/jffs2/xattr.c list_for_each_entry_safe(xd, _xd, &c->xattrindex[index], xindex) { c 120 fs/jffs2/xattr.c unload_xattr_datum(c, xd); c 122 fs/jffs2/xattr.c if (c->xdatum_mem_usage <= target) c 129 fs/jffs2/xattr.c before, c->xdatum_mem_usage, before - c->xdatum_mem_usage); c 132 fs/jffs2/xattr.c static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 142 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 146 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 148 fs/jffs2/xattr.c rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx); c 182 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 185 fs/jffs2/xattr.c jeb = &c->blocks[ref_offset(raw) / c->sector_size]; c 186 fs/jffs2/xattr.c totlen = PAD(ref_totlen(c, jeb, raw)); c 188 fs/jffs2/xattr.c c->unchecked_size -= totlen; c->used_size += totlen; c 193 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 204 fs/jffs2/xattr.c static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 220 fs/jffs2/xattr.c ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr), c 245 fs/jffs2/xattr.c c->xdatum_mem_usage += length; c 249 fs/jffs2/xattr.c list_add(&xd->xindex, &c->xattrindex[i]); c 252 fs/jffs2/xattr.c reclaim_xattr_datum(c); c 263 fs/jffs2/xattr.c static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 277 fs/jffs2/xattr.c if (unlikely(is_xattr_datum_unchecked(c, xd))) c 278 fs/jffs2/xattr.c rc = do_verify_xattr_datum(c, xd); c 280 fs/jffs2/xattr.c rc = do_load_xattr_datum(c, xd); c 284 fs/jffs2/xattr.c static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 291 fs/jffs2/xattr.c uint32_t phys_ofs = write_ofs(c); c 317 fs/jffs2/xattr.c rc = jffs2_flash_writev(c, vecs, 2, phys_ofs, &length, 0); c 323 fs/jffs2/xattr.c jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(totlen), NULL); c 328 fs/jffs2/xattr.c jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd); c 336 fs/jffs2/xattr.c static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, c 349 fs/jffs2/xattr.c list_for_each_entry(xd, &c->xattrindex[i], xindex) { c 376 fs/jffs2/xattr.c xd->xid = ++c->highest_xid; c 387 fs/jffs2/xattr.c rc = save_xattr_datum(c, xd); c 396 fs/jffs2/xattr.c list_add(&xd->xindex, &c->xattrindex[i]); c 398 fs/jffs2/xattr.c c->xdatum_mem_usage += (xd->name_len + 1 + xd->value_len); c 399 fs/jffs2/xattr.c reclaim_xattr_datum(c); c 404 fs/jffs2/xattr.c static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 407 fs/jffs2/xattr.c if (atomic_dec_and_lock(&xd->refcnt, &c->erase_completion_lock)) { c 408 fs/jffs2/xattr.c unload_xattr_datum(c, xd); c 414 fs/jffs2/xattr.c list_add(&xd->xindex, &c->xattr_dead_list); c 416 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 444 fs/jffs2/xattr.c static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) c 453 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 457 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 459 fs/jffs2/xattr.c rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr); c 485 fs/jffs2/xattr.c if (ref->xseqno > c->highest_xseqno) c 486 fs/jffs2/xattr.c c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); c 488 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 491 fs/jffs2/xattr.c jeb = &c->blocks[ref_offset(raw) / c->sector_size]; c 492 fs/jffs2/xattr.c totlen = PAD(ref_totlen(c, jeb, raw)); c 494 fs/jffs2/xattr.c c->unchecked_size -= totlen; c->used_size += totlen; c 499 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 506 fs/jffs2/xattr.c static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) c 511 fs/jffs2/xattr.c uint32_t xseqno, phys_ofs = write_ofs(c); c 519 fs/jffs2/xattr.c xseqno = (c->highest_xseqno += 2); c 531 fs/jffs2/xattr.c ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr); c 537 fs/jffs2/xattr.c jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL); c 543 fs/jffs2/xattr.c jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref); c 550 fs/jffs2/xattr.c static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, c 563 fs/jffs2/xattr.c ret = save_xattr_ref(c, ref); c 576 fs/jffs2/xattr.c static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) c 585 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 586 fs/jffs2/xattr.c ref->next = c->xref_dead_list; c 587 fs/jffs2/xattr.c c->xref_dead_list = ref; c 588 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 593 fs/jffs2/xattr.c unrefer_xattr_datum(c, xd); c 596 fs/jffs2/xattr.c void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) c 605 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 608 fs/jffs2/xattr.c delete_xattr_ref(c, ref); c 611 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 614 fs/jffs2/xattr.c void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) c 620 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 625 fs/jffs2/xattr.c unload_xattr_datum(c, xd); c 631 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 634 fs/jffs2/xattr.c static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) c 645 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 650 fs/jffs2/xattr.c rc = load_xattr_datum(c, ref->xd); c 653 fs/jffs2/xattr.c delete_xattr_ref(c, ref); c 661 fs/jffs2/xattr.c rc = load_xattr_datum(c, cmp->xd); c 665 fs/jffs2/xattr.c delete_xattr_ref(c, cmp); c 674 fs/jffs2/xattr.c delete_xattr_ref(c, cmp); c 677 fs/jffs2/xattr.c delete_xattr_ref(c, ref); c 685 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 690 fs/jffs2/xattr.c void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) c 692 fs/jffs2/xattr.c check_xattr_ref_inode(c, ic); c 707 fs/jffs2/xattr.c void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c) c 712 fs/jffs2/xattr.c INIT_LIST_HEAD(&c->xattrindex[i]); c 713 fs/jffs2/xattr.c INIT_LIST_HEAD(&c->xattr_unchecked); c 714 fs/jffs2/xattr.c INIT_LIST_HEAD(&c->xattr_dead_list); c 715 fs/jffs2/xattr.c c->xref_dead_list = NULL; c 716 fs/jffs2/xattr.c c->xref_temp = NULL; c 718 fs/jffs2/xattr.c init_rwsem(&c->xattr_sem); c 719 fs/jffs2/xattr.c c->highest_xid = 0; c 720 fs/jffs2/xattr.c c->highest_xseqno = 0; c 721 fs/jffs2/xattr.c c->xdatum_mem_usage = 0; c 722 fs/jffs2/xattr.c c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */ c 725 fs/jffs2/xattr.c static struct jffs2_xattr_datum *jffs2_find_xattr_datum(struct jffs2_sb_info *c, uint32_t xid) c 731 fs/jffs2/xattr.c BUG_ON(!(c->flags & (JFFS2_SB_FLAG_SCANNING|JFFS2_SB_FLAG_BUILDING))); c 733 fs/jffs2/xattr.c list_for_each_entry(xd, &c->xattrindex[i], xindex) { c 740 fs/jffs2/xattr.c void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c) c 746 fs/jffs2/xattr.c for (ref=c->xref_temp; ref; ref = _ref) { c 751 fs/jffs2/xattr.c for (ref=c->xref_dead_list; ref; ref = _ref) { c 757 fs/jffs2/xattr.c list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { c 764 fs/jffs2/xattr.c list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) { c 768 fs/jffs2/xattr.c list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { c 775 fs/jffs2/xattr.c void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) c 785 fs/jffs2/xattr.c BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); c 790 fs/jffs2/xattr.c for (ref=c->xref_temp; ref; ref=_ref) { c 795 fs/jffs2/xattr.c if (verify_xattr_ref(c, ref)) { c 798 fs/jffs2/xattr.c jffs2_mark_node_obsolete(c, ref->node); c 826 fs/jffs2/xattr.c c->xref_temp = NULL; c 834 fs/jffs2/xattr.c ref->next = c->xref_dead_list; c 835 fs/jffs2/xattr.c c->xref_dead_list = ref; c 841 fs/jffs2/xattr.c xd = jffs2_find_xattr_datum(c, ref->xid); c 842 fs/jffs2/xattr.c ic = jffs2_get_ino_cache(c, ref->ino); c 847 fs/jffs2/xattr.c ref->next = c->xref_dead_list; c 848 fs/jffs2/xattr.c c->xref_dead_list = ref; c 862 fs/jffs2/xattr.c list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { c 869 fs/jffs2/xattr.c list_add(&xd->xindex, &c->xattr_unchecked); c 873 fs/jffs2/xattr.c if (is_xattr_datum_unchecked(c, xd)) { c 876 fs/jffs2/xattr.c list_add(&xd->xindex, &c->xattr_unchecked); c 889 fs/jffs2/xattr.c struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, c 894 fs/jffs2/xattr.c xd = jffs2_find_xattr_datum(c, xid); c 901 fs/jffs2/xattr.c if (xd->xid > c->highest_xid) c 902 fs/jffs2/xattr.c c->highest_xid = xd->xid; c 903 fs/jffs2/xattr.c list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]); c 965 fs/jffs2/xattr.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 974 fs/jffs2/xattr.c rc = check_xattr_ref_inode(c, ic); c 978 fs/jffs2/xattr.c down_read(&c->xattr_sem); c 988 fs/jffs2/xattr.c up_read(&c->xattr_sem); c 989 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 992 fs/jffs2/xattr.c rc = load_xattr_datum(c, xd); c 995 fs/jffs2/xattr.c delete_xattr_ref(c, ref); c 1024 fs/jffs2/xattr.c up_read(&c->xattr_sem); c 1026 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1035 fs/jffs2/xattr.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 1041 fs/jffs2/xattr.c rc = check_xattr_ref_inode(c, ic); c 1045 fs/jffs2/xattr.c down_read(&c->xattr_sem); c 1057 fs/jffs2/xattr.c up_read(&c->xattr_sem); c 1058 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 1061 fs/jffs2/xattr.c rc = load_xattr_datum(c, xd); c 1064 fs/jffs2/xattr.c delete_xattr_ref(c, ref); c 1086 fs/jffs2/xattr.c up_read(&c->xattr_sem); c 1088 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1097 fs/jffs2/xattr.c struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); c 1104 fs/jffs2/xattr.c rc = check_xattr_ref_inode(c, ic); c 1109 fs/jffs2/xattr.c rc = jffs2_reserve_space(c, request, &length, c 1117 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 1124 fs/jffs2/xattr.c rc = load_xattr_datum(c, xd); c 1127 fs/jffs2/xattr.c delete_xattr_ref(c, ref); c 1141 fs/jffs2/xattr.c rc = save_xattr_ref(c, ref); c 1144 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 1145 fs/jffs2/xattr.c ref->next = c->xref_dead_list; c 1146 fs/jffs2/xattr.c c->xref_dead_list = ref; c 1147 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 1148 fs/jffs2/xattr.c unrefer_xattr_datum(c, xd); c 1169 fs/jffs2/xattr.c xd = create_xattr_datum(c, xprefix, xname, buffer, size); c 1174 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1175 fs/jffs2/xattr.c jffs2_complete_reservation(c); c 1179 fs/jffs2/xattr.c rc = jffs2_reserve_space(c, request, &length, c 1181 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 1184 fs/jffs2/xattr.c unrefer_xattr_datum(c, xd); c 1185 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1190 fs/jffs2/xattr.c newref = create_xattr_ref(c, ic, xd); c 1197 fs/jffs2/xattr.c unrefer_xattr_datum(c, xd); c 1199 fs/jffs2/xattr.c delete_xattr_ref(c, ref); c 1202 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1203 fs/jffs2/xattr.c jffs2_complete_reservation(c); c 1219 fs/jffs2/xattr.c int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, c 1225 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 1231 fs/jffs2/xattr.c rc = load_xattr_datum(c, xd); c 1239 fs/jffs2/xattr.c rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE); c 1244 fs/jffs2/xattr.c rc = save_xattr_datum(c, xd); c 1250 fs/jffs2/xattr.c jffs2_mark_node_obsolete(c, raw); c 1251 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1255 fs/jffs2/xattr.c int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, c 1261 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 1270 fs/jffs2/xattr.c totlen = ref_totlen(c, c->gcblock, ref->node); c 1272 fs/jffs2/xattr.c rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE); c 1278 fs/jffs2/xattr.c rc = save_xattr_ref(c, ref); c 1284 fs/jffs2/xattr.c jffs2_mark_node_obsolete(c, raw); c 1285 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1289 fs/jffs2/xattr.c int jffs2_verify_xattr(struct jffs2_sb_info *c) c 1297 fs/jffs2/xattr.c down_write(&c->xattr_sem); c 1298 fs/jffs2/xattr.c list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { c 1299 fs/jffs2/xattr.c rc = do_verify_xattr_datum(c, xd); c 1303 fs/jffs2/xattr.c spin_lock(&c->erase_completion_lock); c 1307 fs/jffs2/xattr.c jeb = &c->blocks[ref_offset(raw) / c->sector_size]; c 1308 fs/jffs2/xattr.c totlen = PAD(ref_totlen(c, jeb, raw)); c 1309 fs/jffs2/xattr.c c->unchecked_size -= totlen; c->used_size += totlen; c 1315 fs/jffs2/xattr.c list_add(&xd->xindex, &c->xattr_dead_list); c 1316 fs/jffs2/xattr.c spin_unlock(&c->erase_completion_lock); c 1318 fs/jffs2/xattr.c up_write(&c->xattr_sem); c 1319 fs/jffs2/xattr.c return list_empty(&c->xattr_unchecked) ? 1 : 0; c 1322 fs/jffs2/xattr.c void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) c 1332 fs/jffs2/xattr.c void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) c 1340 fs/jffs2/xattr.c for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) { c 73 fs/jffs2/xattr.h extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c); c 74 fs/jffs2/xattr.h extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c); c 75 fs/jffs2/xattr.h extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c); c 77 fs/jffs2/xattr.h extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, c 80 fs/jffs2/xattr.h extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); c 81 fs/jffs2/xattr.h extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); c 82 fs/jffs2/xattr.h extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); c 84 fs/jffs2/xattr.h extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, c 86 fs/jffs2/xattr.h extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, c 88 fs/jffs2/xattr.h extern int jffs2_verify_xattr(struct jffs2_sb_info *c); c 89 fs/jffs2/xattr.h extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd); c 90 fs/jffs2/xattr.h extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref); c 105 fs/jffs2/xattr.h #define jffs2_init_xattr_subsystem(c) c 106 fs/jffs2/xattr.h #define jffs2_build_xattr_subsystem(c) c 107 fs/jffs2/xattr.h #define jffs2_clear_xattr_subsystem(c) c 109 fs/jffs2/xattr.h #define jffs2_xattr_do_crccheck_inode(c, ic) c 110 fs/jffs2/xattr.h #define jffs2_xattr_delete_inode(c, ic) c 111 fs/jffs2/xattr.h #define jffs2_xattr_free_inode(c, ic) c 112 fs/jffs2/xattr.h #define jffs2_verify_xattr(c) (1) c 34 fs/jfs/jfs_debug.c char c; c 36 fs/jfs/jfs_debug.c if (get_user(c, buffer)) c 40 fs/jfs/jfs_debug.c if (c < '0' || c > '9') c 42 fs/jfs/jfs_debug.c jfsloglevel = c - '0'; c 185 fs/jfs/jfs_dtree.c #define ciToUpper(c) UniStrupr((c)->name) c 3473 fs/jfs/jfs_dtree.c goto c; c 3526 fs/jfs/jfs_dtree.c c: c 42 fs/lockd/clntproc.c void nlmclnt_next_cookie(struct nlm_cookie *c) c 46 fs/lockd/clntproc.c memcpy(c->data, &cookie, 4); c 47 fs/lockd/clntproc.c c->len=4; c 47 fs/lockd/xdr.c static __be32 *nlm_decode_cookie(__be32 *p, struct nlm_cookie *c) c 55 fs/lockd/xdr.c c->len=4; c 56 fs/lockd/xdr.c memset(c->data, 0, 4); /* hockeypux brain damage */ c 60 fs/lockd/xdr.c c->len=len; c 61 fs/lockd/xdr.c memcpy(c->data, p, len); c 75 fs/lockd/xdr.c nlm_encode_cookie(__be32 *p, struct nlm_cookie *c) c 77 fs/lockd/xdr.c *p++ = htonl(c->len); c 78 fs/lockd/xdr.c memcpy(p, c->data, c->len); c 79 fs/lockd/xdr.c p+=XDR_QUADLEN(c->len); c 47 fs/lockd/xdr4.c nlm4_decode_cookie(__be32 *p, struct nlm_cookie *c) c 55 fs/lockd/xdr4.c c->len=4; c 56 fs/lockd/xdr4.c memset(c->data, 0, 4); /* hockeypux brain damage */ c 60 fs/lockd/xdr4.c c->len=len; c 61 fs/lockd/xdr4.c memcpy(c->data, p, len); c 75 fs/lockd/xdr4.c nlm4_encode_cookie(__be32 *p, struct nlm_cookie *c) c 77 fs/lockd/xdr4.c *p++ = htonl(c->len); c 78 fs/lockd/xdr4.c memcpy(p, c->data, c->len); c 79 fs/lockd/xdr4.c p+=XDR_QUADLEN(c->len); c 2019 fs/namei.c unsigned long len = 0, c; c 2021 fs/namei.c c = (unsigned char)*name; c 2022 fs/namei.c while (c) { c 2024 fs/namei.c hash = partial_name_hash(c, hash); c 2025 fs/namei.c c = (unsigned char)name[len]; c 2038 fs/namei.c unsigned long len = 0, c; c 2040 fs/namei.c c = (unsigned char)*name; c 2043 fs/namei.c hash = partial_name_hash(c, hash); c 2044 fs/namei.c c = (unsigned char)name[len]; c 2045 fs/namei.c } while (c && c != '/'); c 2464 fs/namei.c unsigned int c = *(const unsigned char *)name++; c 2465 fs/namei.c if (c == '/' || c == '\0') c 2993 fs/namespace.c char c; c 2999 fs/namespace.c if (__get_user(c, f)) { c 3003 fs/namespace.c *t++ = c; c 598 fs/nfs/nfs4_fs.h #define nfs4_state_protect(a, b, c, d) do { } while (0) c 599 fs/nfs/nfs4_fs.h #define nfs4_state_protect_write(a, b, c, d) do { } while (0) c 1269 fs/nfs/nfs4proc.c const struct nfs4_open_createattrs *c, c 1277 fs/nfs/nfs4proc.c struct nfs4_label *label = (c != NULL) ? c->label : NULL; c 1309 fs/nfs/nfs4proc.c if (c->sattr != NULL && c->sattr->ia_valid != 0) { c 1311 fs/nfs/nfs4proc.c memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); c 1313 fs/nfs/nfs4proc.c memcpy(p->o_arg.u.verifier.data, c->verf, c 3040 fs/nfs/nfs4proc.c const struct nfs4_open_createattrs *c, c 3052 fs/nfs/nfs4proc.c struct iattr *sattr = c->sattr; c 3053 fs/nfs/nfs4proc.c struct nfs4_label *label = c->label; c 3073 fs/nfs/nfs4proc.c c, claim, GFP_KERNEL); c 3160 fs/nfs/nfs4proc.c struct nfs4_open_createattrs c = { c 3171 fs/nfs/nfs4proc.c status = _nfs4_do_open(dir, ctx, flags, &c, opened); c 2845 fs/nfs/super.c char *c; c 2877 fs/nfs/super.c c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN); c 2878 fs/nfs/super.c if (IS_ERR(c)) c 2879 fs/nfs/super.c return PTR_ERR(c); c 2880 fs/nfs/super.c args->nfs_server.hostname = c; c 2882 fs/nfs/super.c c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN); c 2883 fs/nfs/super.c if (IS_ERR(c)) c 2884 fs/nfs/super.c return PTR_ERR(c); c 2885 fs/nfs/super.c args->nfs_server.export_path = c; c 2886 fs/nfs/super.c dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c); c 2888 fs/nfs/super.c c = strndup_user(data->client_addr.data, 16); c 2889 fs/nfs/super.c if (IS_ERR(c)) c 2890 fs/nfs/super.c return PTR_ERR(c); c 2891 fs/nfs/super.c args->client_address = c; c 79 fs/nfs/sysfs.c struct nfs_netns_client *c = container_of(kobj, c 82 fs/nfs/sysfs.c return scnprintf(buf, PAGE_SIZE, "%s\n", c->identifier); c 86 fs/nfs/sysfs.c static size_t nfs_string_strip(const char *c, size_t len) c 88 fs/nfs/sysfs.c while (len > 0 && c[len-1] == '\n') c 97 fs/nfs/sysfs.c struct nfs_netns_client *c = container_of(kobj, c 110 fs/nfs/sysfs.c old = xchg(&c->identifier, p); c 120 fs/nfs/sysfs.c struct nfs_netns_client *c = container_of(kobj, c 124 fs/nfs/sysfs.c if (c->identifier) c 125 fs/nfs/sysfs.c kfree(c->identifier); c 126 fs/nfs/sysfs.c kfree(c); c 1179 fs/nfsd/nfs4callback.c struct nfsd4_conn *c; c 1182 fs/nfsd/nfs4callback.c list_for_each_entry(c, &s->se_conns, cn_persession) { c 1183 fs/nfsd/nfs4callback.c if (c->cn_flags & NFS4_CDFC4_BACK) c 1184 fs/nfsd/nfs4callback.c return c; c 1195 fs/nfsd/nfs4callback.c struct nfsd4_conn *c; c 1222 fs/nfsd/nfs4callback.c c = __nfsd4_find_backchannel(clp); c 1223 fs/nfsd/nfs4callback.c if (c) { c 1224 fs/nfsd/nfs4callback.c svc_xprt_get(c->cn_xprt); c 1225 fs/nfsd/nfs4callback.c conn.cb_xprt = c->cn_xprt; c 1226 fs/nfsd/nfs4callback.c ses = c->cn_session; c 100 fs/nfsd/nfs4recover.c unsigned char c = md5[i]; c 102 fs/nfsd/nfs4recover.c *out++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1); c 103 fs/nfsd/nfs4recover.c *out++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1); c 1654 fs/nfsd/nfs4state.c static void free_conn(struct nfsd4_conn *c) c 1656 fs/nfsd/nfs4state.c svc_xprt_put(c->cn_xprt); c 1657 fs/nfsd/nfs4state.c kfree(c); c 1662 fs/nfsd/nfs4state.c struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); c 1663 fs/nfsd/nfs4state.c struct nfs4_client *clp = c->cn_session->se_client; c 1666 fs/nfsd/nfs4state.c if (!list_empty(&c->cn_persession)) { c 1667 fs/nfsd/nfs4state.c list_del(&c->cn_persession); c 1668 fs/nfsd/nfs4state.c free_conn(c); c 1735 fs/nfsd/nfs4state.c struct nfsd4_conn *c; c 1739 fs/nfsd/nfs4state.c c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); c 1740 fs/nfsd/nfs4state.c list_del_init(&c->cn_persession); c 1743 fs/nfsd/nfs4state.c unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); c 1744 fs/nfsd/nfs4state.c free_conn(c); c 1921 fs/nfsd/nfs4state.c struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); c 1922 fs/nfsd/nfs4state.c struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); c 3492 fs/nfsd/nfs4state.c struct nfsd4_conn *c; c 3494 fs/nfsd/nfs4state.c list_for_each_entry(c, &s->se_conns, cn_persession) { c 3495 fs/nfsd/nfs4state.c if (c->cn_xprt == xpt) { c 3496 fs/nfsd/nfs4state.c return c; c 3505 fs/nfsd/nfs4state.c struct nfsd4_conn *c; c 3510 fs/nfsd/nfs4state.c c = __nfsd4_find_conn(new->cn_xprt, ses); c 3511 fs/nfsd/nfs4state.c if (c) c 2039 fs/nfsd/nfs4xdr.c static __be32 *encode_cinfo(__be32 *p, struct nfsd4_change_info *c) c 2041 fs/nfsd/nfs4xdr.c *p++ = cpu_to_be32(c->atomic); c 2042 fs/nfsd/nfs4xdr.c if (c->change_supported) { c 2043 fs/nfsd/nfs4xdr.c p = xdr_encode_hyper(p, c->before_change); c 2044 fs/nfsd/nfs4xdr.c p = xdr_encode_hyper(p, c->after_change); c 2046 fs/nfsd/nfs4xdr.c *p++ = cpu_to_be32(c->before_ctime_sec); c 2047 fs/nfsd/nfs4xdr.c *p++ = cpu_to_be32(c->before_ctime_nsec); c 2048 fs/nfsd/nfs4xdr.c *p++ = cpu_to_be32(c->after_ctime_sec); c 2049 fs/nfsd/nfs4xdr.c *p++ = cpu_to_be32(c->after_ctime_nsec); c 49 fs/nfsd/xdr4.h #define SET_STATE_ID(c, f) ((c)->sid_flags |= (f)) c 50 fs/nfsd/xdr4.h #define HAS_STATE_ID(c, f) ((c)->sid_flags & (f)) c 51 fs/nfsd/xdr4.h #define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f)) c 58 fs/nls/nls_base.c int c0, c, nc; c 77 fs/nls/nls_base.c c = (*s ^ 0x80) & 0xFF; c 78 fs/nls/nls_base.c if (c & 0xC0) c 80 fs/nls/nls_base.c l = (l << 6) | c; c 89 fs/nls/nls_base.c int c, nc; c 103 fs/nls/nls_base.c c = t->shift; c 104 fs/nls/nls_base.c *s = (u8) (t->cval | (l >> c)); c 105 fs/nls/nls_base.c while (c > 0) { c 106 fs/nls/nls_base.c c -= 6; c 108 fs/nls/nls_base.c *s = (u8) (0x80 | ((l >> c) & 0x3F)); c 117 fs/nls/nls_base.c static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian) c 121 fs/nls/nls_base.c *s = (wchar_t) c; c 124 fs/nls/nls_base.c *s = __cpu_to_le16(c); c 127 fs/nls/nls_base.c *s = __cpu_to_be16(c); c 174 fs/nls/nls_base.c static inline unsigned long get_utf16(unsigned c, enum utf16_endian endian) c 178 fs/nls/nls_base.c return c; c 180 fs/nls/nls_base.c return __le16_to_cpu(c); c 182 fs/nls/nls_base.c return __be16_to_cpu(c); c 22 fs/nls/nls_euc-jp.c #define IS_SJIS_JISX0201KANA(c) ((0xA1 <= (c)) && ((c) <= 0xDF)) c 43 fs/nls/nls_euc-jp.c #define IS_EUC_BYTE(c) ((0xA1 <= (c)) && ((c) <= 0xFE)) c 193 fs/ocfs2/alloc.h static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c) c 195 fs/ocfs2/alloc.h c->c_first_suballocator = NULL; c 196 fs/ocfs2/alloc.h c->c_global_allocator = NULL; c 203 fs/ocfs2/alloc.h static inline int ocfs2_dealloc_has_cluster(struct ocfs2_cached_dealloc_ctxt *c) c 205 fs/ocfs2/alloc.h return c->c_global_allocator != NULL; c 202 fs/ocfs2/cluster/tcp.c # define o2net_init_nst(a, b, c, d, e) c 213 fs/ocfs2/dir.c __u32 a = in[0], b = in[1], c = in[2], d = in[3]; c 219 fs/ocfs2/dir.c b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); c 91 fs/ocfs2/dlm/dlmrecovery.c u64 c; c 93 fs/ocfs2/dlm/dlmrecovery.c c = dlm_mig_cookie; c 99 fs/ocfs2/dlm/dlmrecovery.c return c; c 1809 fs/ocfs2/dlm/dlmrecovery.c __be64 c; c 1858 fs/ocfs2/dlm/dlmrecovery.c c = ml->cookie; c 1863 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_node(be64_to_cpu(c)), c 1864 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_seq(be64_to_cpu(c)), c 1872 fs/ocfs2/dlm/dlmrecovery.c c = lock->ml.cookie; c 1875 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_node(be64_to_cpu(c)), c 1876 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_seq(be64_to_cpu(c)), c 1879 fs/ocfs2/dlm/dlmrecovery.c c = ml->cookie; c 1883 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_node(be64_to_cpu(c)), c 1884 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_seq(be64_to_cpu(c)), c 1892 fs/ocfs2/dlm/dlmrecovery.c c = ml->cookie; c 1895 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_node(be64_to_cpu(c)), c 1896 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_seq(be64_to_cpu(c)), c 2002 fs/ocfs2/dlm/dlmrecovery.c c = lock->ml.cookie; c 2006 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_node(be64_to_cpu(c)), c 2007 fs/ocfs2/dlm/dlmrecovery.c dlm_get_lock_cookie_seq(be64_to_cpu(c))); c 1689 fs/ocfs2/namei.c const char *c; c 1735 fs/ocfs2/namei.c c = &symname[virtual * sb->s_blocksize]; c 1756 fs/ocfs2/namei.c memcpy(bhs[virtual]->b_data, c, c 45 fs/ocfs2/ocfs2_lockid.h char c; c 48 fs/ocfs2/ocfs2_lockid.h c = 'M'; c 51 fs/ocfs2/ocfs2_lockid.h c = 'D'; c 54 fs/ocfs2/ocfs2_lockid.h c = 'S'; c 57 fs/ocfs2/ocfs2_lockid.h c = 'R'; c 60 fs/ocfs2/ocfs2_lockid.h c = 'W'; c 63 fs/ocfs2/ocfs2_lockid.h c = 'N'; c 66 fs/ocfs2/ocfs2_lockid.h c = 'O'; c 69 fs/ocfs2/ocfs2_lockid.h c = 'F'; c 72 fs/ocfs2/ocfs2_lockid.h c = 'Q'; c 75 fs/ocfs2/ocfs2_lockid.h c = 'Y'; c 78 fs/ocfs2/ocfs2_lockid.h c = 'P'; c 81 fs/ocfs2/ocfs2_lockid.h c = 'T'; c 84 fs/ocfs2/ocfs2_lockid.h c = 'I'; c 87 fs/ocfs2/ocfs2_lockid.h c = '\0'; c 90 fs/ocfs2/ocfs2_lockid.h return c; c 50 fs/ocfs2/quota_local.c static unsigned int ol_quota_chunk_block(struct super_block *sb, int c) c 53 fs/ocfs2/quota_local.c return 1 + (ol_chunk_blocks(sb) + 1) * c; c 56 fs/ocfs2/quota_local.c static unsigned int ol_dqblk_block(struct super_block *sb, int c, int off) c 60 fs/ocfs2/quota_local.c return ol_quota_chunk_block(sb, c) + 1 + off / epb; c 63 fs/ocfs2/quota_local.c static unsigned int ol_dqblk_block_off(struct super_block *sb, int c, int off) c 71 fs/ocfs2/quota_local.c static loff_t ol_dqblk_off(struct super_block *sb, int c, int off) c 73 fs/ocfs2/quota_local.c return (ol_dqblk_block(sb, c, off) << sb->s_blocksize_bits) + c 74 fs/ocfs2/quota_local.c ol_dqblk_block_off(sb, c, off); c 83 fs/ocfs2/quota_local.c static int ol_dqblk_chunk_off(struct super_block *sb, int c, loff_t off) c 88 fs/ocfs2/quota_local.c ol_quota_chunk_block(sb, c) - 1) * epb c 189 fs/ocfs2/stack_user.c struct ocfs2_live_connection *c; c 193 fs/ocfs2/stack_user.c list_for_each_entry(c, &ocfs2_live_connection_list, oc_list) { c 194 fs/ocfs2/stack_user.c if ((c->oc_conn->cc_namelen == len) && c 195 fs/ocfs2/stack_user.c !strncmp(c->oc_conn->cc_name, name, len)) c 196 fs/ocfs2/stack_user.c return c; c 208 fs/ocfs2/stack_user.c struct ocfs2_live_connection *c) c 213 fs/ocfs2/stack_user.c c->oc_conn = conn; c 215 fs/ocfs2/stack_user.c if ((c->oc_type == NO_CONTROLD) || atomic_read(&ocfs2_control_opened)) c 216 fs/ocfs2/stack_user.c list_add(&c->oc_list, &ocfs2_live_connection_list); c 231 fs/ocfs2/stack_user.c static void ocfs2_live_connection_drop(struct ocfs2_live_connection *c) c 234 fs/ocfs2/stack_user.c list_del_init(&c->oc_list); c 235 fs/ocfs2/stack_user.c c->oc_conn = NULL; c 238 fs/ocfs2/stack_user.c kfree(c); c 279 fs/ocfs2/stack_user.c struct ocfs2_live_connection *c; c 283 fs/ocfs2/stack_user.c c = ocfs2_connection_find(uuid); c 284 fs/ocfs2/stack_user.c if (c) { c 285 fs/ocfs2/stack_user.c BUG_ON(c->oc_conn == NULL); c 286 fs/ocfs2/stack_user.c c->oc_conn->cc_recovery_handler(nodenum, c 287 fs/ocfs2/stack_user.c c->oc_conn->cc_recovery_data); c 12 fs/orangefs/orangefs-bufmap.c int c; c 19 fs/orangefs/orangefs-bufmap.c .c = -1, c 23 fs/orangefs/orangefs-bufmap.c .c = -1, c 31 fs/orangefs/orangefs-bufmap.c m->c = m->count = count; c 40 fs/orangefs/orangefs-bufmap.c m->c -= m->count + 1; c 48 fs/orangefs/orangefs-bufmap.c if (m->c != -1) { c 54 fs/orangefs/orangefs-bufmap.c if (m->c == -1) c 73 fs/orangefs/orangefs-bufmap.c v = ++m->c; c 92 fs/orangefs/orangefs-bufmap.c if (m->c > 0) c 95 fs/orangefs/orangefs-bufmap.c if (m->c < 0) { c 104 fs/orangefs/orangefs-bufmap.c if (unlikely(!t) && n != left && m->c < 0) c 128 fs/orangefs/orangefs-bufmap.c if (unlikely(m->c <= 0)) c 131 fs/orangefs/orangefs-bufmap.c m->c--; c 400 fs/overlayfs/copy_up.c static int ovl_link_up(struct ovl_copy_up_ctx *c) c 404 fs/overlayfs/copy_up.c struct dentry *upperdir = ovl_dentry_upper(c->parent); c 408 fs/overlayfs/copy_up.c err = ovl_set_impure(c->parent, upperdir); c 412 fs/overlayfs/copy_up.c err = ovl_set_nlink_lower(c->dentry); c 417 fs/overlayfs/copy_up.c upper = lookup_one_len(c->dentry->d_name.name, upperdir, c 418 fs/overlayfs/copy_up.c c->dentry->d_name.len); c 421 fs/overlayfs/copy_up.c err = ovl_do_link(ovl_dentry_upper(c->dentry), udir, upper); c 426 fs/overlayfs/copy_up.c ovl_set_timestamps(upperdir, &c->pstat); c 427 fs/overlayfs/copy_up.c ovl_dentry_set_upper_alias(c->dentry); c 434 fs/overlayfs/copy_up.c err = ovl_set_nlink_upper(c->dentry); c 439 fs/overlayfs/copy_up.c static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp) c 447 fs/overlayfs/copy_up.c if (S_ISREG(c->stat.mode) && !c->metacopy) { c 450 fs/overlayfs/copy_up.c ovl_path_upper(c->dentry, &upperpath); c 455 fs/overlayfs/copy_up.c ovl_path_lowerdata(c->dentry, &datapath); c 456 fs/overlayfs/copy_up.c err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size); c 461 fs/overlayfs/copy_up.c err = ovl_copy_xattr(c->lowerpath.dentry, temp); c 472 fs/overlayfs/copy_up.c if (c->origin) { c 473 fs/overlayfs/copy_up.c err = ovl_set_origin(c->dentry, c->lowerpath.dentry, temp); c 478 fs/overlayfs/copy_up.c if (c->metacopy) { c 479 fs/overlayfs/copy_up.c err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY, c 486 fs/overlayfs/copy_up.c if (c->metacopy) c 487 fs/overlayfs/copy_up.c err = ovl_set_size(temp, &c->stat); c 489 fs/overlayfs/copy_up.c err = ovl_set_attr(temp, &c->stat); c 527 fs/overlayfs/copy_up.c static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) c 530 fs/overlayfs/copy_up.c struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir); c 536 fs/overlayfs/copy_up.c .mode = c->stat.mode & S_IFMT, c 537 fs/overlayfs/copy_up.c .rdev = c->stat.rdev, c 538 fs/overlayfs/copy_up.c .link = c->link c 541 fs/overlayfs/copy_up.c err = ovl_lock_rename_workdir(c->workdir, c->destdir); c 545 fs/overlayfs/copy_up.c err = ovl_prep_cu_creds(c->dentry, &cc); c 549 fs/overlayfs/copy_up.c temp = ovl_create_temp(c->workdir, &cattr); c 556 fs/overlayfs/copy_up.c err = ovl_copy_up_inode(c, temp); c 560 fs/overlayfs/copy_up.c if (S_ISDIR(c->stat.mode) && c->indexed) { c 561 fs/overlayfs/copy_up.c err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp); c 566 fs/overlayfs/copy_up.c upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len); c 576 fs/overlayfs/copy_up.c if (!c->metacopy) c 577 fs/overlayfs/copy_up.c ovl_set_upperdata(d_inode(c->dentry)); c 578 fs/overlayfs/copy_up.c inode = d_inode(c->dentry); c 583 fs/overlayfs/copy_up.c unlock_rename(c->workdir, c->destdir); c 594 fs/overlayfs/copy_up.c static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c) c 596 fs/overlayfs/copy_up.c struct inode *udir = d_inode(c->destdir); c 601 fs/overlayfs/copy_up.c err = ovl_prep_cu_creds(c->dentry, &cc); c 605 fs/overlayfs/copy_up.c temp = ovl_do_tmpfile(c->workdir, c->stat.mode); c 611 fs/overlayfs/copy_up.c err = ovl_copy_up_inode(c, temp); c 617 fs/overlayfs/copy_up.c upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len); c 628 fs/overlayfs/copy_up.c if (!c->metacopy) c 629 fs/overlayfs/copy_up.c ovl_set_upperdata(d_inode(c->dentry)); c 630 fs/overlayfs/copy_up.c ovl_inode_update(d_inode(c->dentry), temp); c 648 fs/overlayfs/copy_up.c static int ovl_do_copy_up(struct ovl_copy_up_ctx *c) c 651 fs/overlayfs/copy_up.c struct ovl_fs *ofs = c->dentry->d_sb->s_fs_info; c 660 fs/overlayfs/copy_up.c if (ovl_need_index(c->dentry)) { c 661 fs/overlayfs/copy_up.c c->indexed = true; c 662 fs/overlayfs/copy_up.c if (S_ISDIR(c->stat.mode)) c 663 fs/overlayfs/copy_up.c c->workdir = ovl_indexdir(c->dentry->d_sb); c 668 fs/overlayfs/copy_up.c if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) c 669 fs/overlayfs/copy_up.c c->origin = true; c 672 fs/overlayfs/copy_up.c c->destdir = ovl_indexdir(c->dentry->d_sb); c 673 fs/overlayfs/copy_up.c err = ovl_get_index_name(c->lowerpath.dentry, &c->destname); c 676 fs/overlayfs/copy_up.c } else if (WARN_ON(!c->parent)) { c 684 fs/overlayfs/copy_up.c err = ovl_set_impure(c->parent, c->destdir); c 690 fs/overlayfs/copy_up.c if (S_ISREG(c->stat.mode) && ofs->tmpfile) c 691 fs/overlayfs/copy_up.c err = ovl_copy_up_tmpfile(c); c 693 fs/overlayfs/copy_up.c err = ovl_copy_up_workdir(c); c 697 fs/overlayfs/copy_up.c if (c->indexed) c 698 fs/overlayfs/copy_up.c ovl_set_flag(OVL_INDEX, d_inode(c->dentry)); c 702 fs/overlayfs/copy_up.c err = ovl_set_nlink_upper(c->dentry); c 704 fs/overlayfs/copy_up.c struct inode *udir = d_inode(c->destdir); c 708 fs/overlayfs/copy_up.c ovl_set_timestamps(c->destdir, &c->pstat); c 711 fs/overlayfs/copy_up.c ovl_dentry_set_upper_alias(c->dentry); c 716 fs/overlayfs/copy_up.c kfree(c->destname.name); c 738 fs/overlayfs/copy_up.c static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c) c 745 fs/overlayfs/copy_up.c ovl_path_upper(c->dentry, &upperpath); c 749 fs/overlayfs/copy_up.c ovl_path_lowerdata(c->dentry, &datapath); c 753 fs/overlayfs/copy_up.c if (c->stat.size) { c 760 fs/overlayfs/copy_up.c err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size); c 780 fs/overlayfs/copy_up.c ovl_set_upperdata(d_inode(c->dentry)); c 258 fs/proc/base.c char *page, c; c 299 fs/proc/base.c if (access_remote_vm(mm, arg_end-1, &c, 1, FOLL_ANON) == 1 && c) c 155 fs/proc/inode.c DECLARE_COMPLETION_ONSTACK(c); c 156 fs/proc/inode.c pdeo->c = &c; c 158 fs/proc/inode.c wait_for_completion(&c); c 161 fs/proc/inode.c struct completion *c; c 170 fs/proc/inode.c c = pdeo->c; c 172 fs/proc/inode.c if (unlikely(c)) c 173 fs/proc/inode.c complete(c); c 180 fs/proc/inode.c DECLARE_COMPLETION_ONSTACK(c); c 182 fs/proc/inode.c de->pde_unload_completion = &c; c 184 fs/proc/inode.c wait_for_completion(&c); c 372 fs/proc/inode.c pdeo->c = NULL; c 203 fs/proc/internal.h struct completion *c; c 13 fs/proc/util.c unsigned c = *name++ - '0'; c 14 fs/proc/util.c if (c > 9) c 19 fs/proc/util.c n += c; c 490 fs/pstore/platform.c static void pstore_console_write(struct console *con, const char *s, unsigned c) c 494 fs/pstore/platform.c if (!c) c 501 fs/pstore/platform.c record.size = c; c 319 fs/pstore/ram_core.c int c = count; c 322 fs/pstore/ram_core.c if (unlikely(c > prz->buffer_size)) { c 323 fs/pstore/ram_core.c s += c - prz->buffer_size; c 324 fs/pstore/ram_core.c c = prz->buffer_size; c 327 fs/pstore/ram_core.c buffer_size_add(prz, c); c 329 fs/pstore/ram_core.c start = buffer_start_add(prz, c); c 332 fs/pstore/ram_core.c if (unlikely(rem < c)) { c 335 fs/pstore/ram_core.c c -= rem; c 338 fs/pstore/ram_core.c persistent_ram_update(prz, s, start, c); c 348 fs/pstore/ram_core.c int rem, ret = 0, c = count; c 353 fs/pstore/ram_core.c if (unlikely(c > prz->buffer_size)) { c 354 fs/pstore/ram_core.c s += c - prz->buffer_size; c 355 fs/pstore/ram_core.c c = prz->buffer_size; c 358 fs/pstore/ram_core.c buffer_size_add(prz, c); c 360 fs/pstore/ram_core.c start = buffer_start_add(prz, c); c 363 fs/pstore/ram_core.c if (unlikely(rem < c)) { c 366 fs/pstore/ram_core.c c -= rem; c 370 fs/pstore/ram_core.c ret = persistent_ram_update_user(prz, s, start, c); c 37 fs/reiserfs/hashes.c b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \ c 49 fs/reiserfs/hashes.c u32 a, b, c, d; c 63 fs/reiserfs/hashes.c c = (u32) msg[8] | c 81 fs/reiserfs/hashes.c c = (u32) msg[8] | c 96 fs/reiserfs/hashes.c c = d = pad; c 98 fs/reiserfs/hashes.c c <<= 8; c 99 fs/reiserfs/hashes.c c |= msg[i]; c 105 fs/reiserfs/hashes.c b = c = d = pad; c 111 fs/reiserfs/hashes.c a = b = c = d = pad; c 131 fs/reiserfs/hashes.c u32 a, c; c 143 fs/reiserfs/hashes.c c = msg[i] - 48; c 146 fs/reiserfs/hashes.c a = a + c * pow; c 150 fs/reiserfs/hashes.c c = '0' - 48; c 153 fs/reiserfs/hashes.c a = a + c * pow; c 157 fs/reiserfs/hashes.c c = i; c 160 fs/reiserfs/hashes.c a = a + c * pow; c 2735 fs/reiserfs/reiserfs.h #define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id) c 2736 fs/reiserfs/reiserfs.h #define get_commit_trans_len(c) le32_to_cpu((c)->j_len) c 2737 fs/reiserfs/reiserfs.h #define get_commit_mount_id(c) le32_to_cpu((c)->j_mount_id) c 2739 fs/reiserfs/reiserfs.h #define set_commit_trans_id(c,val) do { (c)->j_trans_id = cpu_to_le32 (val); } while (0) c 2740 fs/reiserfs/reiserfs.h #define set_commit_trans_len(c,val) do { (c)->j_len = cpu_to_le32 (val); } while (0) c 1130 fs/reiserfs/super.c int c; c 1189 fs/reiserfs/super.c c = reiserfs_getopt(s, &pos, opts, &arg, mount_options); c 1190 fs/reiserfs/super.c if (c == -1) c 1194 fs/reiserfs/super.c if (c == 'r') { c 1217 fs/reiserfs/super.c if (c == 'c') { c 1230 fs/reiserfs/super.c if (c == 'w') { c 1236 fs/reiserfs/super.c if (c == 'j') { c 1250 fs/reiserfs/super.c if (c == 'u' || c == 'g') { c 1251 fs/reiserfs/super.c int qtype = c == 'u' ? USRQUOTA : GRPQUOTA; c 1300 fs/reiserfs/super.c if (c == 'f') { c 1321 fs/reiserfs/super.c if (c == 'u' || c == 'g' || c == 'f') { c 437 fs/seq_file.c char c = *p++; c 438 fs/seq_file.c if (!c) { c 440 fs/seq_file.c } else if (!strchr(esc, c)) { c 441 fs/seq_file.c *s++ = c; c 446 fs/seq_file.c *s++ = '0' + ((c & 0300) >> 6); c 447 fs/seq_file.c *s++ = '0' + ((c & 070) >> 3); c 448 fs/seq_file.c *s++ = '0' + (c & 07); c 657 fs/seq_file.c void seq_putc(struct seq_file *m, char c) c 662 fs/seq_file.c m->buf[m->count++] = c; c 840 fs/seq_file.c void seq_pad(struct seq_file *m, char c) c 851 fs/seq_file.c if (c) c 852 fs/seq_file.c seq_putc(m, c); c 30 fs/ubifs/auth.c int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node, c 34 fs/ubifs/auth.c SHASH_DESC_ON_STACK(shash, c->hash_tfm); c 37 fs/ubifs/auth.c shash->tfm = c->hash_tfm; c 53 fs/ubifs/auth.c static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash, c 56 fs/ubifs/auth.c SHASH_DESC_ON_STACK(shash, c->hmac_tfm); c 59 fs/ubifs/auth.c shash->tfm = c->hmac_tfm; c 61 fs/ubifs/auth.c err = crypto_shash_digest(shash, hash, c->hash_len, hmac); c 78 fs/ubifs/auth.c int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, c 86 fs/ubifs/auth.c SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); c 88 fs/ubifs/auth.c hash_desc->tfm = c->hash_tfm; c 89 fs/ubifs/auth.c ubifs_shash_copy_state(c, inhash, hash_desc); c 96 fs/ubifs/auth.c err = ubifs_hash_calc_hmac(c, hash, auth->hmac); c 101 fs/ubifs/auth.c ubifs_prepare_node(c, auth, ubifs_auth_node_sz(c), 0); c 105 fs/ubifs/auth.c static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c, c 111 fs/ubifs/auth.c if (!ubifs_authenticated(c)) c 136 fs/ubifs/auth.c struct shash_desc *__ubifs_hash_get_desc(const struct ubifs_info *c) c 138 fs/ubifs/auth.c return ubifs_get_desc(c, c->hash_tfm); c 152 fs/ubifs/auth.c void ubifs_bad_hash(const struct ubifs_info *c, const void *node, const u8 *hash, c 155 fs/ubifs/auth.c int len = min(c->hash_len, 20); c 156 fs/ubifs/auth.c int cropped = len != c->hash_len; c 161 fs/ubifs/auth.c __ubifs_node_calc_hash(c, node, calc); c 163 fs/ubifs/auth.c ubifs_err(c, "hash mismatch on node at LEB %d:%d", lnum, offs); c 164 fs/ubifs/auth.c ubifs_err(c, "hash expected: %*ph%s", len, hash, cont); c 165 fs/ubifs/auth.c ubifs_err(c, "hash calculated: %*ph%s", len, calc, cont); c 178 fs/ubifs/auth.c int __ubifs_node_check_hash(const struct ubifs_info *c, const void *node, c 184 fs/ubifs/auth.c err = __ubifs_node_calc_hash(c, node, calc); c 188 fs/ubifs/auth.c if (ubifs_check_hash(c, expected, calc)) c 206 fs/ubifs/auth.c int ubifs_sb_verify_signature(struct ubifs_info *c, c 214 fs/ubifs/auth.c sleb = ubifs_scan(c, UBIFS_SB_LNUM, UBIFS_SB_NODE_SZ, c->sbuf, 0); c 221 fs/ubifs/auth.c ubifs_err(c, "Unable to find signature node"); c 229 fs/ubifs/auth.c ubifs_err(c, "Signature node is of wrong type"); c 237 fs/ubifs/auth.c ubifs_err(c, "invalid signature len %d", le32_to_cpu(signode->len)); c 243 fs/ubifs/auth.c ubifs_err(c, "Signature type %d is not supported\n", c 255 fs/ubifs/auth.c ubifs_err(c, "Failed to verify signature"); c 257 fs/ubifs/auth.c ubifs_msg(c, "Successfully verified super block signature"); c 271 fs/ubifs/auth.c int ubifs_init_authentication(struct ubifs_info *c) c 278 fs/ubifs/auth.c if (!c->auth_hash_name) { c 279 fs/ubifs/auth.c ubifs_err(c, "authentication hash name needed with authentication"); c 283 fs/ubifs/auth.c c->auth_hash_algo = match_string(hash_algo_name, HASH_ALGO__LAST, c 284 fs/ubifs/auth.c c->auth_hash_name); c 285 fs/ubifs/auth.c if ((int)c->auth_hash_algo < 0) { c 286 fs/ubifs/auth.c ubifs_err(c, "Unknown hash algo %s specified", c 287 fs/ubifs/auth.c c->auth_hash_name); c 292 fs/ubifs/auth.c c->auth_hash_name); c 294 fs/ubifs/auth.c keyring_key = request_key(&key_type_logon, c->auth_key_name, NULL); c 297 fs/ubifs/auth.c ubifs_err(c, "Failed to request key: %ld", c 305 fs/ubifs/auth.c ubifs_err(c, "key type must be logon"); c 317 fs/ubifs/auth.c c->hash_tfm = crypto_alloc_shash(c->auth_hash_name, 0, 0); c 318 fs/ubifs/auth.c if (IS_ERR(c->hash_tfm)) { c 319 fs/ubifs/auth.c err = PTR_ERR(c->hash_tfm); c 320 fs/ubifs/auth.c ubifs_err(c, "Can not allocate %s: %d", c 321 fs/ubifs/auth.c c->auth_hash_name, err); c 325 fs/ubifs/auth.c c->hash_len = crypto_shash_digestsize(c->hash_tfm); c 326 fs/ubifs/auth.c if (c->hash_len > UBIFS_HASH_ARR_SZ) { c 327 fs/ubifs/auth.c ubifs_err(c, "hash %s is bigger than maximum allowed hash size (%d > %d)", c 328 fs/ubifs/auth.c c->auth_hash_name, c->hash_len, UBIFS_HASH_ARR_SZ); c 333 fs/ubifs/auth.c c->hmac_tfm = crypto_alloc_shash(hmac_name, 0, 0); c 334 fs/ubifs/auth.c if (IS_ERR(c->hmac_tfm)) { c 335 fs/ubifs/auth.c err = PTR_ERR(c->hmac_tfm); c 336 fs/ubifs/auth.c ubifs_err(c, "Can not allocate %s: %d", hmac_name, err); c 340 fs/ubifs/auth.c c->hmac_desc_len = crypto_shash_digestsize(c->hmac_tfm); c 341 fs/ubifs/auth.c if (c->hmac_desc_len > UBIFS_HMAC_ARR_SZ) { c 342 fs/ubifs/auth.c ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)", c 343 fs/ubifs/auth.c hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ); c 348 fs/ubifs/auth.c err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen); c 352 fs/ubifs/auth.c c->authenticated = true; c 354 fs/ubifs/auth.c c->log_hash = ubifs_hash_get_desc(c); c 355 fs/ubifs/auth.c if (IS_ERR(c->log_hash)) c 362 fs/ubifs/auth.c crypto_free_shash(c->hmac_tfm); c 365 fs/ubifs/auth.c crypto_free_shash(c->hash_tfm); c 379 fs/ubifs/auth.c void __ubifs_exit_authentication(struct ubifs_info *c) c 381 fs/ubifs/auth.c if (!ubifs_authenticated(c)) c 384 fs/ubifs/auth.c crypto_free_shash(c->hmac_tfm); c 385 fs/ubifs/auth.c crypto_free_shash(c->hash_tfm); c 386 fs/ubifs/auth.c kfree(c->log_hash); c 401 fs/ubifs/auth.c static int ubifs_node_calc_hmac(const struct ubifs_info *c, const void *node, c 404 fs/ubifs/auth.c SHASH_DESC_ON_STACK(shash, c->hmac_tfm); c 405 fs/ubifs/auth.c int hmac_len = c->hmac_desc_len; c 408 fs/ubifs/auth.c ubifs_assert(c, ofs_hmac > 8); c 409 fs/ubifs/auth.c ubifs_assert(c, ofs_hmac + hmac_len < len); c 411 fs/ubifs/auth.c shash->tfm = c->hmac_tfm; c 445 fs/ubifs/auth.c int __ubifs_node_insert_hmac(const struct ubifs_info *c, void *node, int len, c 448 fs/ubifs/auth.c return ubifs_node_calc_hmac(c, node, len, ofs_hmac, node + ofs_hmac); c 461 fs/ubifs/auth.c int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *node, c 464 fs/ubifs/auth.c int hmac_len = c->hmac_desc_len; c 472 fs/ubifs/auth.c err = ubifs_node_calc_hmac(c, node, len, ofs_hmac, hmac); c 488 fs/ubifs/auth.c int __ubifs_shash_copy_state(const struct ubifs_info *c, struct shash_desc *src, c 522 fs/ubifs/auth.c int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac) c 524 fs/ubifs/auth.c SHASH_DESC_ON_STACK(shash, c->hmac_tfm); c 528 fs/ubifs/auth.c if (!ubifs_authenticated(c)) c 531 fs/ubifs/auth.c shash->tfm = c->hmac_tfm; c 556 fs/ubifs/auth.c bool ubifs_hmac_zero(struct ubifs_info *c, const u8 *hmac) c 558 fs/ubifs/auth.c return !memchr_inv(hmac, 0, c->hmac_desc_len); c 51 fs/ubifs/budget.c static void shrink_liability(struct ubifs_info *c, int nr_to_write) c 53 fs/ubifs/budget.c down_read(&c->vfs_sb->s_umount); c 54 fs/ubifs/budget.c writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE); c 55 fs/ubifs/budget.c up_read(&c->vfs_sb->s_umount); c 66 fs/ubifs/budget.c static int run_gc(struct ubifs_info *c) c 71 fs/ubifs/budget.c down_read(&c->commit_sem); c 72 fs/ubifs/budget.c lnum = ubifs_garbage_collect(c, 1); c 73 fs/ubifs/budget.c up_read(&c->commit_sem); c 79 fs/ubifs/budget.c err = ubifs_return_leb(c, lnum); c 92 fs/ubifs/budget.c static long long get_liability(struct ubifs_info *c) c 96 fs/ubifs/budget.c spin_lock(&c->space_lock); c 97 fs/ubifs/budget.c liab = c->bi.idx_growth + c->bi.data_growth + c->bi.dd_growth; c 98 fs/ubifs/budget.c spin_unlock(&c->space_lock); c 120 fs/ubifs/budget.c static int make_free_space(struct ubifs_info *c) c 126 fs/ubifs/budget.c liab1 = get_liability(c); c 132 fs/ubifs/budget.c shrink_liability(c, NR_TO_WRITE); c 134 fs/ubifs/budget.c liab2 = get_liability(c); c 142 fs/ubifs/budget.c err = run_gc(c); c 151 fs/ubifs/budget.c err = ubifs_run_commit(c); c 166 fs/ubifs/budget.c int ubifs_calc_min_idx_lebs(struct ubifs_info *c) c 171 fs/ubifs/budget.c idx_size = c->bi.old_idx_sz + c->bi.idx_growth + c->bi.uncommitted_idx; c 179 fs/ubifs/budget.c idx_lebs = div_u64(idx_size + c->idx_leb_size - 1, c->idx_leb_size); c 197 fs/ubifs/budget.c long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs) c 202 fs/ubifs/budget.c available = c->main_bytes - c->lst.total_used; c 219 fs/ubifs/budget.c subtract_lebs += c->jhead_cnt - 1; c 224 fs/ubifs/budget.c available -= (long long)subtract_lebs * c->leb_size; c 227 fs/ubifs/budget.c available -= c->lst.total_dead; c 235 fs/ubifs/budget.c available -= c->lst.total_dark; c 243 fs/ubifs/budget.c if (c->lst.idx_lebs > min_idx_lebs) { c 244 fs/ubifs/budget.c subtract_lebs = c->lst.idx_lebs - min_idx_lebs; c 245 fs/ubifs/budget.c available -= subtract_lebs * c->dark_wm; c 261 fs/ubifs/budget.c static int can_use_rp(struct ubifs_info *c) c 263 fs/ubifs/budget.c if (uid_eq(current_fsuid(), c->rp_uid) || capable(CAP_SYS_RESOURCE) || c 264 fs/ubifs/budget.c (!gid_eq(c->rp_gid, GLOBAL_ROOT_GID) && in_group_p(c->rp_gid))) c 294 fs/ubifs/budget.c static int do_budget_space(struct ubifs_info *c) c 300 fs/ubifs/budget.c min_idx_lebs = ubifs_calc_min_idx_lebs(c); c 303 fs/ubifs/budget.c if (min_idx_lebs > c->lst.idx_lebs) c 304 fs/ubifs/budget.c rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs; c 330 fs/ubifs/budget.c lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c 331 fs/ubifs/budget.c c->lst.taken_empty_lebs; c 334 fs/ubifs/budget.c min_idx_lebs, c->bi.min_idx_lebs, rsvd_idx_lebs); c 338 fs/ubifs/budget.c available = ubifs_calc_available(c, min_idx_lebs); c 339 fs/ubifs/budget.c outstanding = c->bi.data_growth + c->bi.dd_growth; c 347 fs/ubifs/budget.c if (available - outstanding <= c->rp_size && !can_use_rp(c)) c 350 fs/ubifs/budget.c c->bi.min_idx_lebs = min_idx_lebs; c 362 fs/ubifs/budget.c static int calc_idx_growth(const struct ubifs_info *c, c 369 fs/ubifs/budget.c return znodes * c->max_idx_node_sz; c 378 fs/ubifs/budget.c static int calc_data_growth(const struct ubifs_info *c, c 383 fs/ubifs/budget.c data_growth = req->new_ino ? c->bi.inode_budget : 0; c 385 fs/ubifs/budget.c data_growth += c->bi.page_budget; c 387 fs/ubifs/budget.c data_growth += c->bi.dent_budget; c 398 fs/ubifs/budget.c static int calc_dd_growth(const struct ubifs_info *c, c 403 fs/ubifs/budget.c dd_growth = req->dirtied_page ? c->bi.page_budget : 0; c 406 fs/ubifs/budget.c dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1); c 408 fs/ubifs/budget.c dd_growth += c->bi.dent_budget; c 426 fs/ubifs/budget.c int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) c 430 fs/ubifs/budget.c ubifs_assert(c, req->new_page <= 1); c 431 fs/ubifs/budget.c ubifs_assert(c, req->dirtied_page <= 1); c 432 fs/ubifs/budget.c ubifs_assert(c, req->new_dent <= 1); c 433 fs/ubifs/budget.c ubifs_assert(c, req->mod_dent <= 1); c 434 fs/ubifs/budget.c ubifs_assert(c, req->new_ino <= 1); c 435 fs/ubifs/budget.c ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA); c 436 fs/ubifs/budget.c ubifs_assert(c, req->dirtied_ino <= 4); c 437 fs/ubifs/budget.c ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); c 438 fs/ubifs/budget.c ubifs_assert(c, !(req->new_ino_d & 7)); c 439 fs/ubifs/budget.c ubifs_assert(c, !(req->dirtied_ino_d & 7)); c 441 fs/ubifs/budget.c data_growth = calc_data_growth(c, req); c 442 fs/ubifs/budget.c dd_growth = calc_dd_growth(c, req); c 445 fs/ubifs/budget.c idx_growth = calc_idx_growth(c, req); c 448 fs/ubifs/budget.c spin_lock(&c->space_lock); c 449 fs/ubifs/budget.c ubifs_assert(c, c->bi.idx_growth >= 0); c 450 fs/ubifs/budget.c ubifs_assert(c, c->bi.data_growth >= 0); c 451 fs/ubifs/budget.c ubifs_assert(c, c->bi.dd_growth >= 0); c 453 fs/ubifs/budget.c if (unlikely(c->bi.nospace) && (c->bi.nospace_rp || !can_use_rp(c))) { c 455 fs/ubifs/budget.c spin_unlock(&c->space_lock); c 459 fs/ubifs/budget.c c->bi.idx_growth += idx_growth; c 460 fs/ubifs/budget.c c->bi.data_growth += data_growth; c 461 fs/ubifs/budget.c c->bi.dd_growth += dd_growth; c 463 fs/ubifs/budget.c err = do_budget_space(c); c 468 fs/ubifs/budget.c spin_unlock(&c->space_lock); c 473 fs/ubifs/budget.c c->bi.idx_growth -= idx_growth; c 474 fs/ubifs/budget.c c->bi.data_growth -= data_growth; c 475 fs/ubifs/budget.c c->bi.dd_growth -= dd_growth; c 476 fs/ubifs/budget.c spin_unlock(&c->space_lock); c 483 fs/ubifs/budget.c err = make_free_space(c); c 495 fs/ubifs/budget.c c->bi.nospace = 1; c 496 fs/ubifs/budget.c if (can_use_rp(c) || c->rp_size == 0) c 497 fs/ubifs/budget.c c->bi.nospace_rp = 1; c 500 fs/ubifs/budget.c ubifs_err(c, "cannot budget space, error %d", err); c 515 fs/ubifs/budget.c void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req) c 517 fs/ubifs/budget.c ubifs_assert(c, req->new_page <= 1); c 518 fs/ubifs/budget.c ubifs_assert(c, req->dirtied_page <= 1); c 519 fs/ubifs/budget.c ubifs_assert(c, req->new_dent <= 1); c 520 fs/ubifs/budget.c ubifs_assert(c, req->mod_dent <= 1); c 521 fs/ubifs/budget.c ubifs_assert(c, req->new_ino <= 1); c 522 fs/ubifs/budget.c ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA); c 523 fs/ubifs/budget.c ubifs_assert(c, req->dirtied_ino <= 4); c 524 fs/ubifs/budget.c ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); c 525 fs/ubifs/budget.c ubifs_assert(c, !(req->new_ino_d & 7)); c 526 fs/ubifs/budget.c ubifs_assert(c, !(req->dirtied_ino_d & 7)); c 528 fs/ubifs/budget.c ubifs_assert(c, req->idx_growth >= 0); c 529 fs/ubifs/budget.c ubifs_assert(c, req->data_growth >= 0); c 530 fs/ubifs/budget.c ubifs_assert(c, req->dd_growth >= 0); c 534 fs/ubifs/budget.c req->data_growth = calc_data_growth(c, req); c 535 fs/ubifs/budget.c req->dd_growth = calc_dd_growth(c, req); c 536 fs/ubifs/budget.c req->idx_growth = calc_idx_growth(c, req); c 542 fs/ubifs/budget.c c->bi.nospace = c->bi.nospace_rp = 0; c 545 fs/ubifs/budget.c spin_lock(&c->space_lock); c 546 fs/ubifs/budget.c c->bi.idx_growth -= req->idx_growth; c 547 fs/ubifs/budget.c c->bi.uncommitted_idx += req->idx_growth; c 548 fs/ubifs/budget.c c->bi.data_growth -= req->data_growth; c 549 fs/ubifs/budget.c c->bi.dd_growth -= req->dd_growth; c 550 fs/ubifs/budget.c c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); c 552 fs/ubifs/budget.c ubifs_assert(c, c->bi.idx_growth >= 0); c 553 fs/ubifs/budget.c ubifs_assert(c, c->bi.data_growth >= 0); c 554 fs/ubifs/budget.c ubifs_assert(c, c->bi.dd_growth >= 0); c 555 fs/ubifs/budget.c ubifs_assert(c, c->bi.min_idx_lebs < c->main_lebs); c 556 fs/ubifs/budget.c ubifs_assert(c, !(c->bi.idx_growth & 7)); c 557 fs/ubifs/budget.c ubifs_assert(c, !(c->bi.data_growth & 7)); c 558 fs/ubifs/budget.c ubifs_assert(c, !(c->bi.dd_growth & 7)); c 559 fs/ubifs/budget.c spin_unlock(&c->space_lock); c 571 fs/ubifs/budget.c void ubifs_convert_page_budget(struct ubifs_info *c) c 573 fs/ubifs/budget.c spin_lock(&c->space_lock); c 575 fs/ubifs/budget.c c->bi.idx_growth -= c->max_idx_node_sz << UBIFS_BLOCKS_PER_PAGE_SHIFT; c 577 fs/ubifs/budget.c c->bi.data_growth -= c->bi.page_budget; c 579 fs/ubifs/budget.c c->bi.dd_growth += c->bi.page_budget; c 581 fs/ubifs/budget.c c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); c 582 fs/ubifs/budget.c spin_unlock(&c->space_lock); c 594 fs/ubifs/budget.c void ubifs_release_dirty_inode_budget(struct ubifs_info *c, c 601 fs/ubifs/budget.c req.dd_growth = c->bi.inode_budget + ALIGN(ui->data_len, 8); c 602 fs/ubifs/budget.c ubifs_release_budget(c, &req); c 625 fs/ubifs/budget.c long long ubifs_reported_space(const struct ubifs_info *c, long long free) c 642 fs/ubifs/budget.c f = c->fanout > 3 ? c->fanout >> 1 : 2; c 645 fs/ubifs/budget.c divisor += (c->max_idx_node_sz * 3) / (f - 1); c 666 fs/ubifs/budget.c long long ubifs_get_free_space_nolock(struct ubifs_info *c) c 671 fs/ubifs/budget.c ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c)); c 672 fs/ubifs/budget.c outstanding = c->bi.data_growth + c->bi.dd_growth; c 673 fs/ubifs/budget.c available = ubifs_calc_available(c, c->bi.min_idx_lebs); c 686 fs/ubifs/budget.c if (c->bi.min_idx_lebs > c->lst.idx_lebs) c 687 fs/ubifs/budget.c rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs; c 690 fs/ubifs/budget.c lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c 691 fs/ubifs/budget.c c->lst.taken_empty_lebs; c 693 fs/ubifs/budget.c available += lebs * (c->dark_wm - c->leb_overhead); c 696 fs/ubifs/budget.c free = ubifs_reported_space(c, available - outstanding); c 709 fs/ubifs/budget.c long long ubifs_get_free_space(struct ubifs_info *c) c 713 fs/ubifs/budget.c spin_lock(&c->space_lock); c 714 fs/ubifs/budget.c free = ubifs_get_free_space_nolock(c); c 715 fs/ubifs/budget.c spin_unlock(&c->space_lock); c 56 fs/ubifs/commit.c static int nothing_to_commit(struct ubifs_info *c) c 62 fs/ubifs/commit.c if (c->mounting || c->remounting_rw) c 69 fs/ubifs/commit.c if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode)) c 79 fs/ubifs/commit.c if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags)) c 82 fs/ubifs/commit.c ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0); c 83 fs/ubifs/commit.c ubifs_assert(c, c->dirty_pn_cnt == 0); c 84 fs/ubifs/commit.c ubifs_assert(c, c->dirty_nn_cnt == 0); c 97 fs/ubifs/commit.c static int do_commit(struct ubifs_info *c) c 104 fs/ubifs/commit.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 106 fs/ubifs/commit.c if (c->ro_error) { c 111 fs/ubifs/commit.c if (nothing_to_commit(c)) { c 112 fs/ubifs/commit.c up_write(&c->commit_sem); c 118 fs/ubifs/commit.c for (i = 0; i < c->jhead_cnt; i++) { c 119 fs/ubifs/commit.c err = ubifs_wbuf_sync(&c->jheads[i].wbuf); c 124 fs/ubifs/commit.c c->cmt_no += 1; c 125 fs/ubifs/commit.c err = ubifs_gc_start_commit(c); c 128 fs/ubifs/commit.c err = dbg_check_lprops(c); c 131 fs/ubifs/commit.c err = ubifs_log_start_commit(c, &new_ltail_lnum); c 134 fs/ubifs/commit.c err = ubifs_tnc_start_commit(c, &zroot); c 137 fs/ubifs/commit.c err = ubifs_lpt_start_commit(c); c 140 fs/ubifs/commit.c err = ubifs_orphan_start_commit(c); c 144 fs/ubifs/commit.c ubifs_get_lp_stats(c, &lst); c 146 fs/ubifs/commit.c up_write(&c->commit_sem); c 148 fs/ubifs/commit.c err = ubifs_tnc_end_commit(c); c 151 fs/ubifs/commit.c err = ubifs_lpt_end_commit(c); c 154 fs/ubifs/commit.c err = ubifs_orphan_end_commit(c); c 157 fs/ubifs/commit.c err = dbg_check_old_index(c, &zroot); c 161 fs/ubifs/commit.c c->mst_node->cmt_no = cpu_to_le64(c->cmt_no); c 162 fs/ubifs/commit.c c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum); c 163 fs/ubifs/commit.c c->mst_node->root_lnum = cpu_to_le32(zroot.lnum); c 164 fs/ubifs/commit.c c->mst_node->root_offs = cpu_to_le32(zroot.offs); c 165 fs/ubifs/commit.c c->mst_node->root_len = cpu_to_le32(zroot.len); c 166 fs/ubifs/commit.c c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum); c 167 fs/ubifs/commit.c c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs); c 168 fs/ubifs/commit.c c->mst_node->index_size = cpu_to_le64(c->bi.old_idx_sz); c 169 fs/ubifs/commit.c c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum); c 170 fs/ubifs/commit.c c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs); c 171 fs/ubifs/commit.c c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum); c 172 fs/ubifs/commit.c c->mst_node->nhead_offs = cpu_to_le32(c->nhead_offs); c 173 fs/ubifs/commit.c c->mst_node->ltab_lnum = cpu_to_le32(c->ltab_lnum); c 174 fs/ubifs/commit.c c->mst_node->ltab_offs = cpu_to_le32(c->ltab_offs); c 175 fs/ubifs/commit.c c->mst_node->lsave_lnum = cpu_to_le32(c->lsave_lnum); c 176 fs/ubifs/commit.c c->mst_node->lsave_offs = cpu_to_le32(c->lsave_offs); c 177 fs/ubifs/commit.c c->mst_node->lscan_lnum = cpu_to_le32(c->lscan_lnum); c 178 fs/ubifs/commit.c c->mst_node->empty_lebs = cpu_to_le32(lst.empty_lebs); c 179 fs/ubifs/commit.c c->mst_node->idx_lebs = cpu_to_le32(lst.idx_lebs); c 180 fs/ubifs/commit.c c->mst_node->total_free = cpu_to_le64(lst.total_free); c 181 fs/ubifs/commit.c c->mst_node->total_dirty = cpu_to_le64(lst.total_dirty); c 182 fs/ubifs/commit.c c->mst_node->total_used = cpu_to_le64(lst.total_used); c 183 fs/ubifs/commit.c c->mst_node->total_dead = cpu_to_le64(lst.total_dead); c 184 fs/ubifs/commit.c c->mst_node->total_dark = cpu_to_le64(lst.total_dark); c 185 fs/ubifs/commit.c if (c->no_orphs) c 186 fs/ubifs/commit.c c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); c 188 fs/ubifs/commit.c c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS); c 190 fs/ubifs/commit.c old_ltail_lnum = c->ltail_lnum; c 191 fs/ubifs/commit.c err = ubifs_log_end_commit(c, new_ltail_lnum); c 195 fs/ubifs/commit.c err = ubifs_log_post_commit(c, old_ltail_lnum); c 198 fs/ubifs/commit.c err = ubifs_gc_end_commit(c); c 201 fs/ubifs/commit.c err = ubifs_lpt_post_commit(c); c 206 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 207 fs/ubifs/commit.c c->cmt_state = COMMIT_RESTING; c 208 fs/ubifs/commit.c wake_up(&c->cmt_wq); c 210 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 214 fs/ubifs/commit.c up_write(&c->commit_sem); c 216 fs/ubifs/commit.c ubifs_err(c, "commit failed, error %d", err); c 217 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 218 fs/ubifs/commit.c c->cmt_state = COMMIT_BROKEN; c 219 fs/ubifs/commit.c wake_up(&c->cmt_wq); c 220 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 221 fs/ubifs/commit.c ubifs_ro_mode(c, err); c 232 fs/ubifs/commit.c static int run_bg_commit(struct ubifs_info *c) c 234 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 239 fs/ubifs/commit.c if (c->cmt_state != COMMIT_BACKGROUND && c 240 fs/ubifs/commit.c c->cmt_state != COMMIT_REQUIRED) c 242 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 244 fs/ubifs/commit.c down_write(&c->commit_sem); c 245 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 246 fs/ubifs/commit.c if (c->cmt_state == COMMIT_REQUIRED) c 247 fs/ubifs/commit.c c->cmt_state = COMMIT_RUNNING_REQUIRED; c 248 fs/ubifs/commit.c else if (c->cmt_state == COMMIT_BACKGROUND) c 249 fs/ubifs/commit.c c->cmt_state = COMMIT_RUNNING_BACKGROUND; c 252 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 254 fs/ubifs/commit.c return do_commit(c); c 257 fs/ubifs/commit.c up_write(&c->commit_sem); c 259 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 278 fs/ubifs/commit.c struct ubifs_info *c = info; c 280 fs/ubifs/commit.c ubifs_msg(c, "background thread \"%s\" started, PID %d", c 281 fs/ubifs/commit.c c->bgt_name, current->pid); c 293 fs/ubifs/commit.c if (!c->need_bgt) { c 306 fs/ubifs/commit.c c->need_bgt = 0; c 307 fs/ubifs/commit.c err = ubifs_bg_wbufs_sync(c); c 309 fs/ubifs/commit.c ubifs_ro_mode(c, err); c 311 fs/ubifs/commit.c run_bg_commit(c); c 315 fs/ubifs/commit.c ubifs_msg(c, "background thread \"%s\" stops", c->bgt_name); c 326 fs/ubifs/commit.c void ubifs_commit_required(struct ubifs_info *c) c 328 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 329 fs/ubifs/commit.c switch (c->cmt_state) { c 332 fs/ubifs/commit.c dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), c 334 fs/ubifs/commit.c c->cmt_state = COMMIT_REQUIRED; c 337 fs/ubifs/commit.c dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), c 339 fs/ubifs/commit.c c->cmt_state = COMMIT_RUNNING_REQUIRED; c 346 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 356 fs/ubifs/commit.c void ubifs_request_bg_commit(struct ubifs_info *c) c 358 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 359 fs/ubifs/commit.c if (c->cmt_state == COMMIT_RESTING) { c 360 fs/ubifs/commit.c dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state), c 362 fs/ubifs/commit.c c->cmt_state = COMMIT_BACKGROUND; c 363 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 364 fs/ubifs/commit.c ubifs_wake_up_bgt(c); c 366 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 375 fs/ubifs/commit.c static int wait_for_commit(struct ubifs_info *c) c 386 fs/ubifs/commit.c wait_event(c->cmt_wq, c->cmt_state != COMMIT_RUNNING_BACKGROUND && c 387 fs/ubifs/commit.c c->cmt_state != COMMIT_RUNNING_REQUIRED); c 399 fs/ubifs/commit.c int ubifs_run_commit(struct ubifs_info *c) c 403 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 404 fs/ubifs/commit.c if (c->cmt_state == COMMIT_BROKEN) { c 409 fs/ubifs/commit.c if (c->cmt_state == COMMIT_RUNNING_BACKGROUND) c 414 fs/ubifs/commit.c c->cmt_state = COMMIT_RUNNING_REQUIRED; c 416 fs/ubifs/commit.c if (c->cmt_state == COMMIT_RUNNING_REQUIRED) { c 417 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 418 fs/ubifs/commit.c return wait_for_commit(c); c 420 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 424 fs/ubifs/commit.c down_write(&c->commit_sem); c 425 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 430 fs/ubifs/commit.c if (c->cmt_state == COMMIT_BROKEN) { c 435 fs/ubifs/commit.c if (c->cmt_state == COMMIT_RUNNING_BACKGROUND) c 436 fs/ubifs/commit.c c->cmt_state = COMMIT_RUNNING_REQUIRED; c 438 fs/ubifs/commit.c if (c->cmt_state == COMMIT_RUNNING_REQUIRED) { c 439 fs/ubifs/commit.c up_write(&c->commit_sem); c 440 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 441 fs/ubifs/commit.c return wait_for_commit(c); c 443 fs/ubifs/commit.c c->cmt_state = COMMIT_RUNNING_REQUIRED; c 444 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 446 fs/ubifs/commit.c err = do_commit(c); c 450 fs/ubifs/commit.c up_write(&c->commit_sem); c 452 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 467 fs/ubifs/commit.c int ubifs_gc_should_commit(struct ubifs_info *c) c 471 fs/ubifs/commit.c spin_lock(&c->cs_lock); c 472 fs/ubifs/commit.c if (c->cmt_state == COMMIT_BACKGROUND) { c 474 fs/ubifs/commit.c c->cmt_state = COMMIT_REQUIRED; c 477 fs/ubifs/commit.c if (c->cmt_state == COMMIT_REQUIRED) c 479 fs/ubifs/commit.c spin_unlock(&c->cs_lock); c 514 fs/ubifs/commit.c int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot) c 518 fs/ubifs/commit.c struct ubifs_debug_info *d = c->dbg; c 525 fs/ubifs/commit.c idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); c 529 fs/ubifs/commit.c err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); c 553 fs/ubifs/commit.c int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) c 557 fs/ubifs/commit.c struct ubifs_debug_info *d = c->dbg; c 565 fs/ubifs/commit.c if (!dbg_is_chk_index(c)) c 570 fs/ubifs/commit.c sz = sizeof(struct idx_node) + ubifs_idx_node_sz(c, c->fanout) - c 597 fs/ubifs/commit.c err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); c 602 fs/ubifs/commit.c if (child_cnt < 1 || child_cnt > c->fanout) { c 620 fs/ubifs/commit.c key_read(c, ubifs_idx_key(c, idx), &lower_key); c 621 fs/ubifs/commit.c highest_ino_key(c, &upper_key, INUM_WATERMARK); c 623 fs/ubifs/commit.c key_copy(c, &upper_key, &i->upper_key); c 637 fs/ubifs/commit.c key_read(c, ubifs_idx_key(c, idx), &l_key); c 638 fs/ubifs/commit.c br = ubifs_idx_branch(c, idx, child_cnt - 1); c 639 fs/ubifs/commit.c key_read(c, &br->key, &u_key); c 640 fs/ubifs/commit.c if (keys_cmp(c, &lower_key, &l_key) > 0) { c 644 fs/ubifs/commit.c if (keys_cmp(c, &upper_key, &u_key) < 0) { c 648 fs/ubifs/commit.c if (keys_cmp(c, &upper_key, &u_key) == 0) c 649 fs/ubifs/commit.c if (!is_hash_key(c, &u_key)) { c 684 fs/ubifs/commit.c br = ubifs_idx_branch(c, idx, iip); c 688 fs/ubifs/commit.c key_read(c, &br->key, &lower_key); c 690 fs/ubifs/commit.c br = ubifs_idx_branch(c, idx, iip + 1); c 691 fs/ubifs/commit.c key_read(c, &br->key, &upper_key); c 693 fs/ubifs/commit.c key_copy(c, &i->upper_key, &upper_key); c 696 fs/ubifs/commit.c err = dbg_old_index_check_init(c, zroot); c 703 fs/ubifs/commit.c ubifs_err(c, "dumping index node (iip=%d)", i->iip); c 704 fs/ubifs/commit.c ubifs_dump_node(c, idx); c 709 fs/ubifs/commit.c ubifs_err(c, "dumping parent index node"); c 710 fs/ubifs/commit.c ubifs_dump_node(c, &i->idx); c 718 fs/ubifs/commit.c ubifs_err(c, "failed, error %d", err); c 101 fs/ubifs/compress.c void ubifs_compress(const struct ubifs_info *c, const void *in_buf, c 121 fs/ubifs/compress.c ubifs_warn(c, "cannot compress %d bytes, compressor %s, error %d, leave data uncompressed", c 153 fs/ubifs/compress.c int ubifs_decompress(const struct ubifs_info *c, const void *in_buf, c 160 fs/ubifs/compress.c ubifs_err(c, "invalid compression type %d", compr_type); c 167 fs/ubifs/compress.c ubifs_err(c, "%s compression is not compiled in", compr->name); c 184 fs/ubifs/compress.c ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d", c 30 fs/ubifs/crypto.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 35 fs/ubifs/crypto.c ubifs_assert(c, pad_len <= *out_len); c 45 fs/ubifs/crypto.c ubifs_err(c, "fscrypt_encrypt_block_inplace() failed: %d", err); c 56 fs/ubifs/crypto.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 62 fs/ubifs/crypto.c ubifs_err(c, "bad compr_size: %i", clen); c 66 fs/ubifs/crypto.c ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE); c 71 fs/ubifs/crypto.c ubifs_err(c, "fscrypt_decrypt_block_inplace() failed: %d", err); c 90 fs/ubifs/debug.c const char *dbg_snprintf_key(const struct ubifs_info *c, c 94 fs/ubifs/debug.c int type = key_type(c, key); c 96 fs/ubifs/debug.c if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { c 100 fs/ubifs/debug.c (unsigned long)key_inum(c, key), c 106 fs/ubifs/debug.c (unsigned long)key_inum(c, key), c 107 fs/ubifs/debug.c get_key_type(type), key_hash(c, key)); c 111 fs/ubifs/debug.c (unsigned long)key_inum(c, key), c 112 fs/ubifs/debug.c get_key_type(type), key_block(c, key)); c 116 fs/ubifs/debug.c (unsigned long)key_inum(c, key), c 124 fs/ubifs/debug.c len -= snprintf(p, len, "bad key format %d", c->key_fmt); c 125 fs/ubifs/debug.c ubifs_assert(c, len > 0); c 224 fs/ubifs/debug.c void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode) c 269 fs/ubifs/debug.c ubifs_assert(c, !mutex_is_locked(&c->tnc_mutex)); c 271 fs/ubifs/debug.c lowest_dent_key(c, &key, inode->i_ino); c 273 fs/ubifs/debug.c dent = ubifs_tnc_next_ent(c, &key, &nm); c 289 fs/ubifs/debug.c key_read(c, &dent->key, &key); c 294 fs/ubifs/debug.c void ubifs_dump_node(const struct ubifs_info *c, const void *node) c 412 fs/ubifs/debug.c key_read(c, &ino->key, &key); c 414 fs/ubifs/debug.c dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); c 447 fs/ubifs/debug.c key_read(c, &dent->key, &key); c 449 fs/ubifs/debug.c dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); c 472 fs/ubifs/debug.c key_read(c, &dn->key, &key); c 474 fs/ubifs/debug.c dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); c 504 fs/ubifs/debug.c for (i = 0; i < n && i < c->fanout - 1; i++) { c 507 fs/ubifs/debug.c br = ubifs_idx_branch(c, idx, i); c 508 fs/ubifs/debug.c key_read(c, &br->key, &key); c 512 fs/ubifs/debug.c dbg_snprintf_key(c, &key, key_buf, c 575 fs/ubifs/debug.c void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi) c 583 fs/ubifs/debug.c spin_lock(&c->space_lock); c 596 fs/ubifs/debug.c c->dark_wm, c->dead_wm, c->max_idx_node_sz); c 598 fs/ubifs/debug.c if (bi != &c->bi) c 607 fs/ubifs/debug.c c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt); c 609 fs/ubifs/debug.c atomic_long_read(&c->dirty_pg_cnt), c 610 fs/ubifs/debug.c atomic_long_read(&c->dirty_zn_cnt), c 611 fs/ubifs/debug.c atomic_long_read(&c->clean_zn_cnt)); c 612 fs/ubifs/debug.c pr_err("\tgc_lnum %d, ihead_lnum %d\n", c->gc_lnum, c->ihead_lnum); c 615 fs/ubifs/debug.c if (c->jheads) c 616 fs/ubifs/debug.c for (i = 0; i < c->jhead_cnt; i++) c 618 fs/ubifs/debug.c dbg_jhead(c->jheads[i].wbuf.jhead), c 619 fs/ubifs/debug.c c->jheads[i].wbuf.lnum); c 620 fs/ubifs/debug.c for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { c 624 fs/ubifs/debug.c list_for_each_entry(bud, &c->old_buds, list) c 626 fs/ubifs/debug.c list_for_each_entry(idx_gc, &c->idx_gc, list) c 629 fs/ubifs/debug.c pr_err("\tcommit state %d\n", c->cmt_state); c 632 fs/ubifs/debug.c available = ubifs_calc_available(c, c->bi.min_idx_lebs); c 633 fs/ubifs/debug.c outstanding = c->bi.data_growth + c->bi.dd_growth; c 634 fs/ubifs/debug.c free = ubifs_get_free_space_nolock(c); c 640 fs/ubifs/debug.c spin_unlock(&c->space_lock); c 643 fs/ubifs/debug.c void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) c 650 fs/ubifs/debug.c if (spc < c->dead_wm) c 653 fs/ubifs/debug.c dark = ubifs_calc_dark(c, spc); c 657 fs/ubifs/debug.c lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, c 661 fs/ubifs/debug.c lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc, c 708 fs/ubifs/debug.c for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) { c 712 fs/ubifs/debug.c for (i = 0; i < c->jhead_cnt; i++) { c 718 fs/ubifs/debug.c if (c->jheads && c 719 fs/ubifs/debug.c lp->lnum == c->jheads[i].wbuf.lnum) { c 729 fs/ubifs/debug.c if (lp->lnum == c->gc_lnum) c 734 fs/ubifs/debug.c void ubifs_dump_lprops(struct ubifs_info *c) c 741 fs/ubifs/debug.c ubifs_get_lp_stats(c, &lst); c 744 fs/ubifs/debug.c for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) { c 745 fs/ubifs/debug.c err = ubifs_read_one_lp(c, lnum, &lp); c 747 fs/ubifs/debug.c ubifs_err(c, "cannot read lprops for LEB %d", lnum); c 751 fs/ubifs/debug.c ubifs_dump_lprop(c, &lp); c 756 fs/ubifs/debug.c void ubifs_dump_lpt_info(struct ubifs_info *c) c 762 fs/ubifs/debug.c pr_err("\tlpt_sz: %lld\n", c->lpt_sz); c 763 fs/ubifs/debug.c pr_err("\tpnode_sz: %d\n", c->pnode_sz); c 764 fs/ubifs/debug.c pr_err("\tnnode_sz: %d\n", c->nnode_sz); c 765 fs/ubifs/debug.c pr_err("\tltab_sz: %d\n", c->ltab_sz); c 766 fs/ubifs/debug.c pr_err("\tlsave_sz: %d\n", c->lsave_sz); c 767 fs/ubifs/debug.c pr_err("\tbig_lpt: %d\n", c->big_lpt); c 768 fs/ubifs/debug.c pr_err("\tlpt_hght: %d\n", c->lpt_hght); c 769 fs/ubifs/debug.c pr_err("\tpnode_cnt: %d\n", c->pnode_cnt); c 770 fs/ubifs/debug.c pr_err("\tnnode_cnt: %d\n", c->nnode_cnt); c 771 fs/ubifs/debug.c pr_err("\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt); c 772 fs/ubifs/debug.c pr_err("\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt); c 773 fs/ubifs/debug.c pr_err("\tlsave_cnt: %d\n", c->lsave_cnt); c 774 fs/ubifs/debug.c pr_err("\tspace_bits: %d\n", c->space_bits); c 775 fs/ubifs/debug.c pr_err("\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits); c 776 fs/ubifs/debug.c pr_err("\tlpt_offs_bits: %d\n", c->lpt_offs_bits); c 777 fs/ubifs/debug.c pr_err("\tlpt_spc_bits: %d\n", c->lpt_spc_bits); c 778 fs/ubifs/debug.c pr_err("\tpcnt_bits: %d\n", c->pcnt_bits); c 779 fs/ubifs/debug.c pr_err("\tlnum_bits: %d\n", c->lnum_bits); c 780 fs/ubifs/debug.c pr_err("\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); c 782 fs/ubifs/debug.c c->nhead_lnum, c->nhead_offs); c 783 fs/ubifs/debug.c pr_err("\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs); c 784 fs/ubifs/debug.c if (c->big_lpt) c 786 fs/ubifs/debug.c c->lsave_lnum, c->lsave_offs); c 787 fs/ubifs/debug.c for (i = 0; i < c->lpt_lebs; i++) c 789 fs/ubifs/debug.c i + c->lpt_first, c->ltab[i].free, c->ltab[i].dirty, c 790 fs/ubifs/debug.c c->ltab[i].tgc, c->ltab[i].cmt); c 794 fs/ubifs/debug.c void ubifs_dump_sleb(const struct ubifs_info *c, c 806 fs/ubifs/debug.c ubifs_dump_node(c, snod->node); c 810 fs/ubifs/debug.c void ubifs_dump_leb(const struct ubifs_info *c, int lnum) c 818 fs/ubifs/debug.c buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); c 820 fs/ubifs/debug.c ubifs_err(c, "cannot allocate memory for dumping LEB %d", lnum); c 824 fs/ubifs/debug.c sleb = ubifs_scan(c, lnum, 0, buf, 0); c 826 fs/ubifs/debug.c ubifs_err(c, "scan error %d", (int)PTR_ERR(sleb)); c 837 fs/ubifs/debug.c ubifs_dump_node(c, snod->node); c 848 fs/ubifs/debug.c void ubifs_dump_znode(const struct ubifs_info *c, c 859 fs/ubifs/debug.c zbr = &c->zroot; c 865 fs/ubifs/debug.c if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) { c 876 fs/ubifs/debug.c dbg_snprintf_key(c, &zbr->key, key_buf, c 881 fs/ubifs/debug.c dbg_snprintf_key(c, &zbr->key, key_buf, c 887 fs/ubifs/debug.c void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) c 903 fs/ubifs/debug.c void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, c 921 fs/ubifs/debug.c void ubifs_dump_tnc(struct ubifs_info *c) c 928 fs/ubifs/debug.c znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); c 936 fs/ubifs/debug.c ubifs_dump_znode(c, znode); c 937 fs/ubifs/debug.c znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); c 942 fs/ubifs/debug.c static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode, c 945 fs/ubifs/debug.c ubifs_dump_znode(c, znode); c 956 fs/ubifs/debug.c void ubifs_dump_index(struct ubifs_info *c) c 958 fs/ubifs/debug.c dbg_walk_index(c, NULL, dump_znode, NULL); c 968 fs/ubifs/debug.c void dbg_save_space_info(struct ubifs_info *c) c 970 fs/ubifs/debug.c struct ubifs_debug_info *d = c->dbg; c 973 fs/ubifs/debug.c spin_lock(&c->space_lock); c 974 fs/ubifs/debug.c memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats)); c 975 fs/ubifs/debug.c memcpy(&d->saved_bi, &c->bi, sizeof(struct ubifs_budg_info)); c 976 fs/ubifs/debug.c d->saved_idx_gc_cnt = c->idx_gc_cnt; c 1002 fs/ubifs/debug.c freeable_cnt = c->freeable_cnt; c 1003 fs/ubifs/debug.c c->freeable_cnt = 0; c 1004 fs/ubifs/debug.c d->saved_free = ubifs_get_free_space_nolock(c); c 1005 fs/ubifs/debug.c c->freeable_cnt = freeable_cnt; c 1006 fs/ubifs/debug.c spin_unlock(&c->space_lock); c 1018 fs/ubifs/debug.c int dbg_check_space_info(struct ubifs_info *c) c 1020 fs/ubifs/debug.c struct ubifs_debug_info *d = c->dbg; c 1025 fs/ubifs/debug.c spin_lock(&c->space_lock); c 1026 fs/ubifs/debug.c freeable_cnt = c->freeable_cnt; c 1027 fs/ubifs/debug.c c->freeable_cnt = 0; c 1028 fs/ubifs/debug.c free = ubifs_get_free_space_nolock(c); c 1029 fs/ubifs/debug.c c->freeable_cnt = freeable_cnt; c 1030 fs/ubifs/debug.c spin_unlock(&c->space_lock); c 1033 fs/ubifs/debug.c ubifs_err(c, "free space changed from %lld to %lld", c 1041 fs/ubifs/debug.c ubifs_msg(c, "saved lprops statistics dump"); c 1043 fs/ubifs/debug.c ubifs_msg(c, "saved budgeting info dump"); c 1044 fs/ubifs/debug.c ubifs_dump_budg(c, &d->saved_bi); c 1045 fs/ubifs/debug.c ubifs_msg(c, "saved idx_gc_cnt %d", d->saved_idx_gc_cnt); c 1046 fs/ubifs/debug.c ubifs_msg(c, "current lprops statistics dump"); c 1047 fs/ubifs/debug.c ubifs_get_lp_stats(c, &lst); c 1049 fs/ubifs/debug.c ubifs_msg(c, "current budgeting info dump"); c 1050 fs/ubifs/debug.c ubifs_dump_budg(c, &c->bi); c 1065 fs/ubifs/debug.c int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode) c 1070 fs/ubifs/debug.c if (!dbg_is_chk_gen(c)) c 1078 fs/ubifs/debug.c ubifs_err(c, "ui_size is %lld, synced_i_size is %lld, but inode is clean", c 1080 fs/ubifs/debug.c ubifs_err(c, "i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino, c 1103 fs/ubifs/debug.c int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) c 1111 fs/ubifs/debug.c if (!dbg_is_chk_gen(c)) c 1117 fs/ubifs/debug.c lowest_dent_key(c, &key, dir->i_ino); c 1121 fs/ubifs/debug.c dent = ubifs_tnc_next_ent(c, &key, &nm); c 1136 fs/ubifs/debug.c key_read(c, &dent->key, &key); c 1141 fs/ubifs/debug.c ubifs_err(c, "directory inode %lu has size %llu, but calculated size is %llu", c 1144 fs/ubifs/debug.c ubifs_dump_inode(c, dir); c 1149 fs/ubifs/debug.c ubifs_err(c, "directory inode %lu has nlink %u, but calculated nlink is %u", c 1151 fs/ubifs/debug.c ubifs_dump_inode(c, dir); c 1172 fs/ubifs/debug.c static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, c 1180 fs/ubifs/debug.c ubifs_assert(c, !keys_cmp(c, &zbr1->key, &zbr2->key)); c 1190 fs/ubifs/debug.c err = ubifs_tnc_read_node(c, zbr1, dent1); c 1193 fs/ubifs/debug.c err = ubifs_validate_entry(c, dent1); c 1197 fs/ubifs/debug.c err = ubifs_tnc_read_node(c, zbr2, dent2); c 1200 fs/ubifs/debug.c err = ubifs_validate_entry(c, dent2); c 1206 fs/ubifs/debug.c key_read(c, &dent1->key, &key); c 1207 fs/ubifs/debug.c if (keys_cmp(c, &zbr1->key, &key)) { c 1208 fs/ubifs/debug.c ubifs_err(c, "1st entry at %d:%d has key %s", zbr1->lnum, c 1209 fs/ubifs/debug.c zbr1->offs, dbg_snprintf_key(c, &key, key_buf, c 1211 fs/ubifs/debug.c ubifs_err(c, "but it should have key %s according to tnc", c 1212 fs/ubifs/debug.c dbg_snprintf_key(c, &zbr1->key, key_buf, c 1214 fs/ubifs/debug.c ubifs_dump_node(c, dent1); c 1218 fs/ubifs/debug.c key_read(c, &dent2->key, &key); c 1219 fs/ubifs/debug.c if (keys_cmp(c, &zbr2->key, &key)) { c 1220 fs/ubifs/debug.c ubifs_err(c, "2nd entry at %d:%d has key %s", zbr1->lnum, c 1221 fs/ubifs/debug.c zbr1->offs, dbg_snprintf_key(c, &key, key_buf, c 1223 fs/ubifs/debug.c ubifs_err(c, "but it should have key %s according to tnc", c 1224 fs/ubifs/debug.c dbg_snprintf_key(c, &zbr2->key, key_buf, c 1226 fs/ubifs/debug.c ubifs_dump_node(c, dent2); c 1239 fs/ubifs/debug.c ubifs_err(c, "2 xent/dent nodes with the same name"); c 1241 fs/ubifs/debug.c ubifs_err(c, "bad order of colliding key %s", c 1242 fs/ubifs/debug.c dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN)); c 1244 fs/ubifs/debug.c ubifs_msg(c, "first node at %d:%d\n", zbr1->lnum, zbr1->offs); c 1245 fs/ubifs/debug.c ubifs_dump_node(c, dent1); c 1246 fs/ubifs/debug.c ubifs_msg(c, "second node at %d:%d\n", zbr2->lnum, zbr2->offs); c 1247 fs/ubifs/debug.c ubifs_dump_node(c, dent2); c 1263 fs/ubifs/debug.c static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr) c 1269 fs/ubifs/debug.c if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) { c 1277 fs/ubifs/debug.c if (znode->iip < 0 || znode->iip >= c->fanout) { c 1320 fs/ubifs/debug.c err = ubifs_search_zbranch(c, zp, &zbr->key, &n); c 1334 fs/ubifs/debug.c if (keys_cmp(c, &zp->zbranch[n].key, c 1347 fs/ubifs/debug.c cmp = keys_cmp(c, min, &znode->zbranch[0].key); c 1361 fs/ubifs/debug.c cmp = keys_cmp(c, max, c 1370 fs/ubifs/debug.c if (zbr != &c->zroot) { c 1381 fs/ubifs/debug.c cmp = keys_cmp(c, &znode->zbranch[n - 1].key, c 1389 fs/ubifs/debug.c if (!is_hash_key(c, &znode->zbranch[n].key)) { c 1394 fs/ubifs/debug.c if (znode->level != 0 || c->replaying) c 1401 fs/ubifs/debug.c err = dbg_check_key_order(c, &znode->zbranch[n - 1], c 1448 fs/ubifs/debug.c ubifs_err(c, "failed, error %d", err); c 1449 fs/ubifs/debug.c ubifs_msg(c, "dump of the znode"); c 1450 fs/ubifs/debug.c ubifs_dump_znode(c, znode); c 1452 fs/ubifs/debug.c ubifs_msg(c, "dump of the parent znode"); c 1453 fs/ubifs/debug.c ubifs_dump_znode(c, zp); c 1467 fs/ubifs/debug.c int dbg_check_tnc(struct ubifs_info *c, int extra) c 1473 fs/ubifs/debug.c if (!dbg_is_chk_index(c)) c 1476 fs/ubifs/debug.c ubifs_assert(c, mutex_is_locked(&c->tnc_mutex)); c 1477 fs/ubifs/debug.c if (!c->zroot.znode) c 1480 fs/ubifs/debug.c znode = ubifs_tnc_postorder_first(c->zroot.znode); c 1486 fs/ubifs/debug.c zbr = &c->zroot; c 1490 fs/ubifs/debug.c err = dbg_check_znode(c, zbr); c 1502 fs/ubifs/debug.c znode = ubifs_tnc_postorder_next(c, znode); c 1511 fs/ubifs/debug.c if (prev->level == 0 && znode->level == 0 && !c->replaying && c 1512 fs/ubifs/debug.c !keys_cmp(c, &prev->zbranch[last].key, c 1514 fs/ubifs/debug.c err = dbg_check_key_order(c, &prev->zbranch[last], c 1519 fs/ubifs/debug.c ubifs_msg(c, "first znode"); c 1520 fs/ubifs/debug.c ubifs_dump_znode(c, prev); c 1521 fs/ubifs/debug.c ubifs_msg(c, "second znode"); c 1522 fs/ubifs/debug.c ubifs_dump_znode(c, znode); c 1529 fs/ubifs/debug.c if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) { c 1530 fs/ubifs/debug.c ubifs_err(c, "incorrect clean_zn_cnt %ld, calculated %ld", c 1531 fs/ubifs/debug.c atomic_long_read(&c->clean_zn_cnt), c 1535 fs/ubifs/debug.c if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) { c 1536 fs/ubifs/debug.c ubifs_err(c, "incorrect dirty_zn_cnt %ld, calculated %ld", c 1537 fs/ubifs/debug.c atomic_long_read(&c->dirty_zn_cnt), c 1561 fs/ubifs/debug.c int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, c 1568 fs/ubifs/debug.c mutex_lock(&c->tnc_mutex); c 1570 fs/ubifs/debug.c if (!c->zroot.znode) { c 1571 fs/ubifs/debug.c c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0); c 1572 fs/ubifs/debug.c if (IS_ERR(c->zroot.znode)) { c 1573 fs/ubifs/debug.c err = PTR_ERR(c->zroot.znode); c 1574 fs/ubifs/debug.c c->zroot.znode = NULL; c 1584 fs/ubifs/debug.c znode = c->zroot.znode; c 1589 fs/ubifs/debug.c child = ubifs_load_znode(c, zbr, znode, 0); c 1606 fs/ubifs/debug.c err = znode_cb(c, znode, priv); c 1608 fs/ubifs/debug.c ubifs_err(c, "znode checking function returned error %d", c 1610 fs/ubifs/debug.c ubifs_dump_znode(c, znode); c 1617 fs/ubifs/debug.c err = leaf_cb(c, zbr, priv); c 1619 fs/ubifs/debug.c ubifs_err(c, "leaf checking function returned error %d, for leaf at LEB %d:%d", c 1636 fs/ubifs/debug.c child = ubifs_load_znode(c, zbr, znode, idx); c 1656 fs/ubifs/debug.c child = ubifs_load_znode(c, zbr, znode, 0); c 1667 fs/ubifs/debug.c mutex_unlock(&c->tnc_mutex); c 1674 fs/ubifs/debug.c zbr = &c->zroot; c 1675 fs/ubifs/debug.c ubifs_msg(c, "dump of znode at LEB %d:%d", zbr->lnum, zbr->offs); c 1676 fs/ubifs/debug.c ubifs_dump_znode(c, znode); c 1678 fs/ubifs/debug.c mutex_unlock(&c->tnc_mutex); c 1692 fs/ubifs/debug.c static int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv) c 1697 fs/ubifs/debug.c add = ubifs_idx_node_sz(c, znode->child_cnt); c 1712 fs/ubifs/debug.c int dbg_check_idx_size(struct ubifs_info *c, long long idx_size) c 1717 fs/ubifs/debug.c if (!dbg_is_chk_index(c)) c 1720 fs/ubifs/debug.c err = dbg_walk_index(c, NULL, add_size, &calc); c 1722 fs/ubifs/debug.c ubifs_err(c, "error %d while walking the index", err); c 1727 fs/ubifs/debug.c ubifs_err(c, "index size check failed: calculated size is %lld, should be %lld", c 1791 fs/ubifs/debug.c static struct fsck_inode *add_inode(struct ubifs_info *c, c 1797 fs/ubifs/debug.c ino_t inum = key_inum_flash(c, &ino->key); c 1813 fs/ubifs/debug.c if (inum > c->highest_inum) { c 1814 fs/ubifs/debug.c ubifs_err(c, "too high inode number, max. is %lu", c 1815 fs/ubifs/debug.c (unsigned long)c->highest_inum); c 1823 fs/ubifs/debug.c inode = ilookup(c->vfs_sb, inum); c 1904 fs/ubifs/debug.c static struct fsck_inode *read_add_inode(struct ubifs_info *c, c 1918 fs/ubifs/debug.c ino_key_init(c, &key, inum); c 1919 fs/ubifs/debug.c err = ubifs_lookup_level0(c, &key, &znode, &n); c 1921 fs/ubifs/debug.c ubifs_err(c, "inode %lu not found in index", (unsigned long)inum); c 1924 fs/ubifs/debug.c ubifs_err(c, "error %d while looking up inode %lu", c 1931 fs/ubifs/debug.c ubifs_err(c, "bad node %lu node length %d", c 1940 fs/ubifs/debug.c err = ubifs_tnc_read_node(c, zbr, ino); c 1942 fs/ubifs/debug.c ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d", c 1948 fs/ubifs/debug.c fscki = add_inode(c, fsckd, ino); c 1951 fs/ubifs/debug.c ubifs_err(c, "error %ld while adding inode %lu node", c 1975 fs/ubifs/debug.c static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 1981 fs/ubifs/debug.c int err, type = key_type(c, &zbr->key); c 1985 fs/ubifs/debug.c ubifs_err(c, "bad leaf length %d (LEB %d:%d)", c 1994 fs/ubifs/debug.c err = ubifs_tnc_read_node(c, zbr, node); c 1996 fs/ubifs/debug.c ubifs_err(c, "cannot read leaf node at LEB %d:%d, error %d", c 2003 fs/ubifs/debug.c fscki = add_inode(c, priv, node); c 2006 fs/ubifs/debug.c ubifs_err(c, "error %d while adding inode node", err); c 2014 fs/ubifs/debug.c ubifs_err(c, "unexpected node type %d at LEB %d:%d", c 2021 fs/ubifs/debug.c if (le64_to_cpu(ch->sqnum) > c->max_sqnum) { c 2022 fs/ubifs/debug.c ubifs_err(c, "too high sequence number, max. is %llu", c 2023 fs/ubifs/debug.c c->max_sqnum); c 2032 fs/ubifs/debug.c ubifs_assert(c, zbr->len >= UBIFS_DATA_NODE_SZ); c 2038 fs/ubifs/debug.c inum = key_inum_flash(c, &dn->key); c 2039 fs/ubifs/debug.c fscki = read_add_inode(c, priv, inum); c 2042 fs/ubifs/debug.c ubifs_err(c, "error %d while processing data node and trying to find inode node %lu", c 2048 fs/ubifs/debug.c blk_offs = key_block_flash(c, &dn->key); c 2052 fs/ubifs/debug.c ubifs_err(c, "data node at LEB %d:%d is not within inode size %lld", c 2062 fs/ubifs/debug.c ubifs_assert(c, zbr->len >= UBIFS_DENT_NODE_SZ); c 2064 fs/ubifs/debug.c err = ubifs_validate_entry(c, dent); c 2073 fs/ubifs/debug.c fscki = read_add_inode(c, priv, inum); c 2076 fs/ubifs/debug.c ubifs_err(c, "error %d while processing entry node and trying to find inode node %lu", c 2084 fs/ubifs/debug.c inum = key_inum_flash(c, &dent->key); c 2085 fs/ubifs/debug.c fscki1 = read_add_inode(c, priv, inum); c 2088 fs/ubifs/debug.c ubifs_err(c, "error %d while processing entry node and trying to find parent inode node %lu", c 2111 fs/ubifs/debug.c ubifs_msg(c, "dump of node at LEB %d:%d", zbr->lnum, zbr->offs); c 2112 fs/ubifs/debug.c ubifs_dump_node(c, node); c 2140 fs/ubifs/debug.c static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) c 2162 fs/ubifs/debug.c ubifs_err(c, "directory inode %lu has %d direntries which refer it, but should be 1", c 2169 fs/ubifs/debug.c ubifs_err(c, "root inode %lu has non-zero (%d) direntries which refer it", c 2175 fs/ubifs/debug.c ubifs_err(c, "directory inode %lu size is %lld, but calculated size is %lld", c 2181 fs/ubifs/debug.c ubifs_err(c, "directory inode %lu nlink is %d, but calculated nlink is %d", c 2188 fs/ubifs/debug.c ubifs_err(c, "inode %lu nlink is %d, but calculated nlink is %d", c 2195 fs/ubifs/debug.c ubifs_err(c, "inode %lu has xattr size %u, but calculated size is %lld", c 2201 fs/ubifs/debug.c ubifs_err(c, "inode %lu has %u xattrs, but calculated count is %lld", c 2207 fs/ubifs/debug.c ubifs_err(c, "inode %lu has xattr names' size %u, but calculated names' size is %lld", c 2218 fs/ubifs/debug.c ino_key_init(c, &key, fscki->inum); c 2219 fs/ubifs/debug.c err = ubifs_lookup_level0(c, &key, &znode, &n); c 2221 fs/ubifs/debug.c ubifs_err(c, "inode %lu not found in index", c 2225 fs/ubifs/debug.c ubifs_err(c, "error %d while looking up inode %lu", c 2235 fs/ubifs/debug.c err = ubifs_tnc_read_node(c, zbr, ino); c 2237 fs/ubifs/debug.c ubifs_err(c, "cannot read inode node at LEB %d:%d, error %d", c 2243 fs/ubifs/debug.c ubifs_msg(c, "dump of the inode %lu sitting in LEB %d:%d", c 2245 fs/ubifs/debug.c ubifs_dump_node(c, ino); c 2263 fs/ubifs/debug.c int dbg_check_filesystem(struct ubifs_info *c) c 2268 fs/ubifs/debug.c if (!dbg_is_chk_fs(c)) c 2272 fs/ubifs/debug.c err = dbg_walk_index(c, check_leaf, NULL, &fsckd); c 2276 fs/ubifs/debug.c err = check_inodes(c, &fsckd); c 2284 fs/ubifs/debug.c ubifs_err(c, "file-system check failed with error %d", err); c 2298 fs/ubifs/debug.c int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) c 2303 fs/ubifs/debug.c if (!dbg_is_chk_gen(c)) c 2315 fs/ubifs/debug.c ubifs_err(c, "bad node type %d", sa->type); c 2316 fs/ubifs/debug.c ubifs_dump_node(c, sa->node); c 2320 fs/ubifs/debug.c ubifs_err(c, "bad node type %d", sb->type); c 2321 fs/ubifs/debug.c ubifs_dump_node(c, sb->node); c 2325 fs/ubifs/debug.c inuma = key_inum(c, &sa->key); c 2326 fs/ubifs/debug.c inumb = key_inum(c, &sb->key); c 2331 fs/ubifs/debug.c ubifs_err(c, "larger inum %lu goes before inum %lu", c 2336 fs/ubifs/debug.c blka = key_block(c, &sa->key); c 2337 fs/ubifs/debug.c blkb = key_block(c, &sb->key); c 2340 fs/ubifs/debug.c ubifs_err(c, "larger block %u goes before %u", blka, blkb); c 2344 fs/ubifs/debug.c ubifs_err(c, "two data nodes for the same block"); c 2352 fs/ubifs/debug.c ubifs_dump_node(c, sa->node); c 2353 fs/ubifs/debug.c ubifs_dump_node(c, sb->node); c 2365 fs/ubifs/debug.c int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) c 2370 fs/ubifs/debug.c if (!dbg_is_chk_gen(c)) c 2383 fs/ubifs/debug.c ubifs_err(c, "bad node type %d", sa->type); c 2384 fs/ubifs/debug.c ubifs_dump_node(c, sa->node); c 2389 fs/ubifs/debug.c ubifs_err(c, "bad node type %d", sb->type); c 2390 fs/ubifs/debug.c ubifs_dump_node(c, sb->node); c 2395 fs/ubifs/debug.c ubifs_err(c, "non-inode node goes before inode node"); c 2405 fs/ubifs/debug.c ubifs_err(c, "smaller inode node goes first"); c 2415 fs/ubifs/debug.c inuma = key_inum(c, &sa->key); c 2416 fs/ubifs/debug.c inumb = key_inum(c, &sb->key); c 2421 fs/ubifs/debug.c ubifs_err(c, "larger inum %lu goes before inum %lu", c 2426 fs/ubifs/debug.c hasha = key_block(c, &sa->key); c 2427 fs/ubifs/debug.c hashb = key_block(c, &sb->key); c 2430 fs/ubifs/debug.c ubifs_err(c, "larger hash %u goes before %u", c 2439 fs/ubifs/debug.c ubifs_msg(c, "dumping first node"); c 2440 fs/ubifs/debug.c ubifs_dump_node(c, sa->node); c 2441 fs/ubifs/debug.c ubifs_msg(c, "dumping second node"); c 2442 fs/ubifs/debug.c ubifs_dump_node(c, sb->node); c 2453 fs/ubifs/debug.c static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) c 2455 fs/ubifs/debug.c struct ubifs_debug_info *d = c->dbg; c 2457 fs/ubifs/debug.c ubifs_assert(c, dbg_is_tst_rcvry(c)); c 2470 fs/ubifs/debug.c ubifs_warn(c, "failing after %lums", delay); c 2476 fs/ubifs/debug.c ubifs_warn(c, "failing after %lu calls", delay); c 2494 fs/ubifs/debug.c ubifs_warn(c, "failing in super block LEB %d", lnum); c 2498 fs/ubifs/debug.c ubifs_warn(c, "failing in master LEB %d", lnum); c 2499 fs/ubifs/debug.c } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) { c 2504 fs/ubifs/debug.c ubifs_warn(c, "failing in log LEB %d", lnum); c 2505 fs/ubifs/debug.c } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) { c 2510 fs/ubifs/debug.c ubifs_warn(c, "failing in LPT LEB %d", lnum); c 2511 fs/ubifs/debug.c } else if (lnum >= c->orph_first && lnum <= c->orph_last) { c 2516 fs/ubifs/debug.c ubifs_warn(c, "failing in orphan LEB %d", lnum); c 2517 fs/ubifs/debug.c } else if (lnum == c->ihead_lnum) { c 2520 fs/ubifs/debug.c ubifs_warn(c, "failing in index head LEB %d", lnum); c 2521 fs/ubifs/debug.c } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) { c 2524 fs/ubifs/debug.c ubifs_warn(c, "failing in GC head LEB %d", lnum); c 2525 fs/ubifs/debug.c } else if (write && !RB_EMPTY_ROOT(&c->buds) && c 2526 fs/ubifs/debug.c !ubifs_search_bud(c, lnum)) { c 2529 fs/ubifs/debug.c ubifs_warn(c, "failing in non-bud LEB %d", lnum); c 2530 fs/ubifs/debug.c } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND || c 2531 fs/ubifs/debug.c c->cmt_state == COMMIT_RUNNING_REQUIRED) { c 2534 fs/ubifs/debug.c ubifs_warn(c, "failing in bud LEB %d commit running", lnum); c 2538 fs/ubifs/debug.c ubifs_warn(c, "failing in bud LEB %d commit not running", lnum); c 2542 fs/ubifs/debug.c ubifs_warn(c, "========== Power cut emulated =========="); c 2547 fs/ubifs/debug.c static int corrupt_data(const struct ubifs_info *c, const void *buf, c 2555 fs/ubifs/debug.c to = min(len, ALIGN(from + 1, c->max_write_size)); c 2557 fs/ubifs/debug.c ubifs_warn(c, "filled bytes %u-%u with %s", from, to - 1, c 2568 fs/ubifs/debug.c int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, c 2573 fs/ubifs/debug.c if (dbg_is_power_cut(c)) c 2576 fs/ubifs/debug.c failing = power_cut_emulated(c, lnum, 1); c 2578 fs/ubifs/debug.c len = corrupt_data(c, buf, len); c 2579 fs/ubifs/debug.c ubifs_warn(c, "actually write %d bytes to LEB %d:%d (the buffer was corrupted)", c 2582 fs/ubifs/debug.c err = ubi_leb_write(c->ubi, lnum, buf, offs, len); c 2590 fs/ubifs/debug.c int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, c 2595 fs/ubifs/debug.c if (dbg_is_power_cut(c)) c 2597 fs/ubifs/debug.c if (power_cut_emulated(c, lnum, 1)) c 2599 fs/ubifs/debug.c err = ubi_leb_change(c->ubi, lnum, buf, len); c 2602 fs/ubifs/debug.c if (power_cut_emulated(c, lnum, 1)) c 2607 fs/ubifs/debug.c int dbg_leb_unmap(struct ubifs_info *c, int lnum) c 2611 fs/ubifs/debug.c if (dbg_is_power_cut(c)) c 2613 fs/ubifs/debug.c if (power_cut_emulated(c, lnum, 0)) c 2615 fs/ubifs/debug.c err = ubi_leb_unmap(c->ubi, lnum); c 2618 fs/ubifs/debug.c if (power_cut_emulated(c, lnum, 0)) c 2623 fs/ubifs/debug.c int dbg_leb_map(struct ubifs_info *c, int lnum) c 2627 fs/ubifs/debug.c if (dbg_is_power_cut(c)) c 2629 fs/ubifs/debug.c if (power_cut_emulated(c, lnum, 0)) c 2631 fs/ubifs/debug.c err = ubi_leb_map(c->ubi, lnum); c 2634 fs/ubifs/debug.c if (power_cut_emulated(c, lnum, 0)) c 2682 fs/ubifs/debug.c struct ubifs_info *c = file->private_data; c 2683 fs/ubifs/debug.c struct ubifs_debug_info *d = c->dbg; c 2699 fs/ubifs/debug.c val = c->ro_error; c 2735 fs/ubifs/debug.c struct ubifs_info *c = file->private_data; c 2736 fs/ubifs/debug.c struct ubifs_debug_info *d = c->dbg; c 2753 fs/ubifs/debug.c ubifs_dump_lprops(c); c 2757 fs/ubifs/debug.c ubifs_dump_budg(c, &c->bi); c 2761 fs/ubifs/debug.c mutex_lock(&c->tnc_mutex); c 2762 fs/ubifs/debug.c ubifs_dump_tnc(c); c 2763 fs/ubifs/debug.c mutex_unlock(&c->tnc_mutex); c 2784 fs/ubifs/debug.c c->ro_error = !!val; c 2810 fs/ubifs/debug.c void dbg_debugfs_init_fs(struct ubifs_info *c) c 2814 fs/ubifs/debug.c struct ubifs_debug_info *d = c->dbg; c 2817 fs/ubifs/debug.c c->vi.ubi_num, c->vi.vol_id); c 2827 fs/ubifs/debug.c d->dfs_dump_lprops = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, c 2831 fs/ubifs/debug.c d->dfs_dump_budg = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, c 2835 fs/ubifs/debug.c d->dfs_dump_tnc = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, c 2840 fs/ubifs/debug.c d->dfs_dir, c, &dfs_fops); c 2844 fs/ubifs/debug.c d->dfs_dir, c, &dfs_fops); c 2848 fs/ubifs/debug.c d->dfs_dir, c, &dfs_fops); c 2852 fs/ubifs/debug.c d->dfs_dir, c, &dfs_fops); c 2856 fs/ubifs/debug.c d->dfs_dir, c, &dfs_fops); c 2860 fs/ubifs/debug.c d->dfs_dir, c, &dfs_fops); c 2864 fs/ubifs/debug.c d->dfs_dir, c, &dfs_fops); c 2871 fs/ubifs/debug.c void dbg_debugfs_exit_fs(struct ubifs_info *c) c 2873 fs/ubifs/debug.c debugfs_remove_recursive(c->dbg->dfs_dir); c 2991 fs/ubifs/debug.c void ubifs_assert_failed(struct ubifs_info *c, const char *expr, c 2994 fs/ubifs/debug.c ubifs_err(c, "UBIFS assert failed: %s, in %s:%u", expr, file, line); c 2996 fs/ubifs/debug.c switch (c->assert_action) { c 3002 fs/ubifs/debug.c ubifs_ro_mode(c, -EINVAL); c 3021 fs/ubifs/debug.c int ubifs_debugging_init(struct ubifs_info *c) c 3023 fs/ubifs/debug.c c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL); c 3024 fs/ubifs/debug.c if (!c->dbg) c 3034 fs/ubifs/debug.c void ubifs_debugging_exit(struct ubifs_info *c) c 3036 fs/ubifs/debug.c kfree(c->dbg); c 15 fs/ubifs/debug.h typedef int (*dbg_leaf_callback)(struct ubifs_info *c, c 17 fs/ubifs/debug.h typedef int (*dbg_znode_callback)(struct ubifs_info *c, c 139 fs/ubifs/debug.h void ubifs_assert_failed(struct ubifs_info *c, const char *expr, c 142 fs/ubifs/debug.h #define ubifs_assert(c, expr) do { \ c 144 fs/ubifs/debug.h ubifs_assert_failed((struct ubifs_info *)c, #expr, __FILE__, \ c 149 fs/ubifs/debug.h #define ubifs_assert_cmt_locked(c) do { \ c 150 fs/ubifs/debug.h if (unlikely(down_write_trylock(&(c)->commit_sem))) { \ c 151 fs/ubifs/debug.h up_write(&(c)->commit_sem); \ c 152 fs/ubifs/debug.h ubifs_err(c, "commit lock is not locked!\n"); \ c 153 fs/ubifs/debug.h ubifs_assert(c, 0); \ c 166 fs/ubifs/debug.h dbg_snprintf_key(c, key, __tmp_key_buf, DBG_KEY_BUF_LEN)); \ c 204 fs/ubifs/debug.h static inline int dbg_is_chk_gen(const struct ubifs_info *c) c 206 fs/ubifs/debug.h return !!(ubifs_dbg.chk_gen || c->dbg->chk_gen); c 208 fs/ubifs/debug.h static inline int dbg_is_chk_index(const struct ubifs_info *c) c 210 fs/ubifs/debug.h return !!(ubifs_dbg.chk_index || c->dbg->chk_index); c 212 fs/ubifs/debug.h static inline int dbg_is_chk_orph(const struct ubifs_info *c) c 214 fs/ubifs/debug.h return !!(ubifs_dbg.chk_orph || c->dbg->chk_orph); c 216 fs/ubifs/debug.h static inline int dbg_is_chk_lprops(const struct ubifs_info *c) c 218 fs/ubifs/debug.h return !!(ubifs_dbg.chk_lprops || c->dbg->chk_lprops); c 220 fs/ubifs/debug.h static inline int dbg_is_chk_fs(const struct ubifs_info *c) c 222 fs/ubifs/debug.h return !!(ubifs_dbg.chk_fs || c->dbg->chk_fs); c 224 fs/ubifs/debug.h static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) c 226 fs/ubifs/debug.h return !!(ubifs_dbg.tst_rcvry || c->dbg->tst_rcvry); c 228 fs/ubifs/debug.h static inline int dbg_is_power_cut(const struct ubifs_info *c) c 230 fs/ubifs/debug.h return !!c->dbg->pc_happened; c 233 fs/ubifs/debug.h int ubifs_debugging_init(struct ubifs_info *c); c 234 fs/ubifs/debug.h void ubifs_debugging_exit(struct ubifs_info *c); c 240 fs/ubifs/debug.h const char *dbg_get_key_dump(const struct ubifs_info *c, c 242 fs/ubifs/debug.h const char *dbg_snprintf_key(const struct ubifs_info *c, c 244 fs/ubifs/debug.h void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode); c 245 fs/ubifs/debug.h void ubifs_dump_node(const struct ubifs_info *c, const void *node); c 248 fs/ubifs/debug.h void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi); c 249 fs/ubifs/debug.h void ubifs_dump_lprop(const struct ubifs_info *c, c 251 fs/ubifs/debug.h void ubifs_dump_lprops(struct ubifs_info *c); c 252 fs/ubifs/debug.h void ubifs_dump_lpt_info(struct ubifs_info *c); c 253 fs/ubifs/debug.h void ubifs_dump_leb(const struct ubifs_info *c, int lnum); c 254 fs/ubifs/debug.h void ubifs_dump_sleb(const struct ubifs_info *c, c 256 fs/ubifs/debug.h void ubifs_dump_znode(const struct ubifs_info *c, c 258 fs/ubifs/debug.h void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, c 260 fs/ubifs/debug.h void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, c 262 fs/ubifs/debug.h void ubifs_dump_tnc(struct ubifs_info *c); c 263 fs/ubifs/debug.h void ubifs_dump_index(struct ubifs_info *c); c 264 fs/ubifs/debug.h void ubifs_dump_lpt_lebs(const struct ubifs_info *c); c 266 fs/ubifs/debug.h int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, c 270 fs/ubifs/debug.h void dbg_save_space_info(struct ubifs_info *c); c 271 fs/ubifs/debug.h int dbg_check_space_info(struct ubifs_info *c); c 272 fs/ubifs/debug.h int dbg_check_lprops(struct ubifs_info *c); c 273 fs/ubifs/debug.h int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot); c 274 fs/ubifs/debug.h int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot); c 275 fs/ubifs/debug.h int dbg_check_cats(struct ubifs_info *c); c 276 fs/ubifs/debug.h int dbg_check_ltab(struct ubifs_info *c); c 277 fs/ubifs/debug.h int dbg_chk_lpt_free_spc(struct ubifs_info *c); c 278 fs/ubifs/debug.h int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len); c 279 fs/ubifs/debug.h int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode); c 280 fs/ubifs/debug.h int dbg_check_dir(struct ubifs_info *c, const struct inode *dir); c 281 fs/ubifs/debug.h int dbg_check_tnc(struct ubifs_info *c, int extra); c 282 fs/ubifs/debug.h int dbg_check_idx_size(struct ubifs_info *c, long long idx_size); c 283 fs/ubifs/debug.h int dbg_check_filesystem(struct ubifs_info *c); c 284 fs/ubifs/debug.h void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, c 286 fs/ubifs/debug.h int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, c 288 fs/ubifs/debug.h int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, c 290 fs/ubifs/debug.h int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head); c 291 fs/ubifs/debug.h int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); c 293 fs/ubifs/debug.h int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, c 295 fs/ubifs/debug.h int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len); c 296 fs/ubifs/debug.h int dbg_leb_unmap(struct ubifs_info *c, int lnum); c 297 fs/ubifs/debug.h int dbg_leb_map(struct ubifs_info *c, int lnum); c 302 fs/ubifs/debug.h void dbg_debugfs_init_fs(struct ubifs_info *c); c 303 fs/ubifs/debug.h void dbg_debugfs_exit_fs(struct ubifs_info *c); c 76 fs/ubifs/dir.c struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir, c 87 fs/ubifs/dir.c ubifs_err(c, "fscrypt_get_encryption_info failed: %i", err); c 97 fs/ubifs/dir.c inode = new_inode(c->vfs_sb); c 143 fs/ubifs/dir.c ui->compr_type = c->default_compr; c 148 fs/ubifs/dir.c spin_lock(&c->cnt_lock); c 150 fs/ubifs/dir.c if (c->highest_inum >= INUM_WARN_WATERMARK) { c 151 fs/ubifs/dir.c if (c->highest_inum >= INUM_WATERMARK) { c 152 fs/ubifs/dir.c spin_unlock(&c->cnt_lock); c 153 fs/ubifs/dir.c ubifs_err(c, "out of inode numbers"); c 158 fs/ubifs/dir.c ubifs_warn(c, "running out of inode numbers (current %lu, max %u)", c 159 fs/ubifs/dir.c (unsigned long)c->highest_inum, INUM_WATERMARK); c 162 fs/ubifs/dir.c inode->i_ino = ++c->highest_inum; c 170 fs/ubifs/dir.c ui->creat_sqnum = ++c->max_sqnum; c 171 fs/ubifs/dir.c spin_unlock(&c->cnt_lock); c 176 fs/ubifs/dir.c ubifs_err(c, "fscrypt_inherit_context failed: %i", err); c 186 fs/ubifs/dir.c static int dbg_check_name(const struct ubifs_info *c, c 190 fs/ubifs/dir.c if (!dbg_is_chk_gen(c)) c 206 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 229 fs/ubifs/dir.c ubifs_assert(c, fname_len(&nm) == 0); c 230 fs/ubifs/dir.c ubifs_assert(c, fname_name(&nm) == NULL); c 233 fs/ubifs/dir.c dent_key_init_hash(c, &key, dir->i_ino, nm.hash); c 234 fs/ubifs/dir.c err = ubifs_tnc_lookup_dh(c, &key, dent, nm.minor_hash); c 236 fs/ubifs/dir.c dent_key_init(c, &key, dir->i_ino, &nm); c 237 fs/ubifs/dir.c err = ubifs_tnc_lookup_nm(c, &key, dent, &nm); c 248 fs/ubifs/dir.c if (dbg_check_name(c, dent, &nm)) { c 260 fs/ubifs/dir.c ubifs_err(c, "dead directory entry '%pd', error %d", c 262 fs/ubifs/dir.c ubifs_ro_mode(c, err); c 269 fs/ubifs/dir.c ubifs_warn(c, "Inconsistent encryption contexts: %lu/%lu", c 285 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 300 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 310 fs/ubifs/dir.c inode = ubifs_new_inode(c, dir, mode); c 324 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0); c 329 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 345 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 346 fs/ubifs/dir.c ubifs_err(c, "cannot create regular file, error %d", err); c 354 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 373 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 379 fs/ubifs/dir.c err = ubifs_budget_space(c, &ino_req); c 381 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 386 fs/ubifs/dir.c inode = ubifs_new_inode(c, dir, mode); c 395 fs/ubifs/dir.c ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations); c 412 fs/ubifs/dir.c ubifs_assert(c, ui->dirty); c 418 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0); c 423 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 434 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 436 fs/ubifs/dir.c ubifs_release_budget(c, &ino_req); c 438 fs/ubifs/dir.c ubifs_err(c, "cannot create temporary file, error %d", err); c 503 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 547 fs/ubifs/dir.c ubifs_assert(c, !file->private_data); c 555 fs/ubifs/dir.c lowest_dent_key(c, &key, dir->i_ino); c 557 fs/ubifs/dir.c dent = ubifs_tnc_next_ent(c, &key, &nm); c 563 fs/ubifs/dir.c ctx->pos = key_hash_flash(c, &dent->key); c 573 fs/ubifs/dir.c dent_key_init_hash(c, &key, dir->i_ino, ctx->pos); c 575 fs/ubifs/dir.c dent = ubifs_tnc_next_ent(c, &key, &nm); c 580 fs/ubifs/dir.c ctx->pos = key_hash_flash(c, &dent->key); c 587 fs/ubifs/dir.c key_hash_flash(c, &dent->key)); c 588 fs/ubifs/dir.c ubifs_assert(c, le64_to_cpu(dent->ch.sqnum) > c 597 fs/ubifs/dir.c err = fscrypt_fname_disk_to_usr(dir, key_hash_flash(c, c 617 fs/ubifs/dir.c key_read(c, &dent->key, &key); c 618 fs/ubifs/dir.c dent = ubifs_tnc_next_ent(c, &key, &nm); c 625 fs/ubifs/dir.c ctx->pos = key_hash_flash(c, &dent->key); c 638 fs/ubifs/dir.c ubifs_err(c, "cannot find next direntry, error %d", err); c 690 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 707 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(dir)); c 708 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(inode)); c 718 fs/ubifs/dir.c err = dbg_check_synced_i_size(c, inode); c 722 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 730 fs/ubifs/dir.c ubifs_delete_orphan(c, inode->i_ino); c 738 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0); c 743 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 753 fs/ubifs/dir.c ubifs_add_orphan(c, inode->i_ino); c 755 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 764 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 793 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(dir)); c 794 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(inode)); c 795 fs/ubifs/dir.c err = dbg_check_synced_i_size(c, inode); c 799 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 812 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0); c 818 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 821 fs/ubifs/dir.c c->bi.nospace = c->bi.nospace_rp = 0; c 833 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 849 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 855 fs/ubifs/dir.c lowest_dent_key(c, &key, dir->i_ino); c 856 fs/ubifs/dir.c dent = ubifs_tnc_next_ent(c, &key, &nm); c 870 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 885 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(dir)); c 886 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(inode)); c 901 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 915 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0); c 921 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 924 fs/ubifs/dir.c c->bi.nospace = c->bi.nospace_rp = 0; c 937 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 947 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 960 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 970 fs/ubifs/dir.c inode = ubifs_new_inode(c, dir, S_IFDIR | mode); c 987 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0); c 989 fs/ubifs/dir.c ubifs_err(c, "cannot create directory, error %d", err); c 994 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1010 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1020 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 1043 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 1057 fs/ubifs/dir.c inode = ubifs_new_inode(c, dir, mode); c 1078 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0); c 1083 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1099 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1109 fs/ubifs/dir.c struct ubifs_info *c = dir->i_sb->s_fs_info; c 1129 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 1139 fs/ubifs/dir.c inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO); c 1178 fs/ubifs/dir.c err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0); c 1198 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1251 fs/ubifs/dir.c struct ubifs_info *c = old_dir->i_sb->s_fs_info; c 1282 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(new_inode)); c 1308 fs/ubifs/dir.c err = ubifs_budget_space(c, &req); c 1314 fs/ubifs/dir.c err = ubifs_budget_space(c, &ino_req); c 1318 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1341 fs/ubifs/dir.c ubifs_assert(c, !whiteout_ui->dirty); c 1420 fs/ubifs/dir.c err = ubifs_budget_space(c, &wht_req); c 1434 fs/ubifs/dir.c err = ubifs_jnl_rename(c, old_dir, old_inode, &old_nm, new_dir, c 1440 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1448 fs/ubifs/dir.c ubifs_release_budget(c, &ino_req); c 1481 fs/ubifs/dir.c ubifs_release_budget(c, &ino_req); c 1482 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1491 fs/ubifs/dir.c struct ubifs_info *c = old_dir->i_sb->s_fs_info; c 1501 fs/ubifs/dir.c ubifs_assert(c, fst_inode && snd_inode); c 1532 fs/ubifs/dir.c err = ubifs_jnl_xrename(c, old_dir, fst_inode, &fst_nm, new_dir, c 1536 fs/ubifs/dir.c ubifs_release_budget(c, &req); c 1548 fs/ubifs/dir.c struct ubifs_info *c = old_dir->i_sb->s_fs_info; c 1553 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(old_dir)); c 1554 fs/ubifs/dir.c ubifs_assert(c, inode_is_locked(new_dir)); c 48 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 53 fs/ubifs/file.c data_key_init(c, &key, inode->i_ino, block); c 54 fs/ubifs/file.c err = ubifs_tnc_lookup(c, &key, dn); c 62 fs/ubifs/file.c ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > c 77 fs/ubifs/file.c err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, c 93 fs/ubifs/file.c ubifs_err(c, "bad data node (block %u, inode %lu)", c 95 fs/ubifs/file.c ubifs_dump_node(c, dn); c 106 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 111 fs/ubifs/file.c ubifs_assert(c, !PageChecked(page)); c 112 fs/ubifs/file.c ubifs_assert(c, !PagePrivate(page)); c 159 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 166 fs/ubifs/file.c ubifs_err(c, "cannot read page %lu of inode %lu, error %d", c 196 fs/ubifs/file.c static void release_new_page_budget(struct ubifs_info *c) c 200 fs/ubifs/file.c ubifs_release_budget(c, &req); c 210 fs/ubifs/file.c static void release_existing_page_budget(struct ubifs_info *c) c 212 fs/ubifs/file.c struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget}; c 214 fs/ubifs/file.c ubifs_release_budget(c, &req); c 222 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 243 fs/ubifs/file.c err = ubifs_budget_space(c, &req); c 249 fs/ubifs/file.c ubifs_release_budget(c, &req); c 261 fs/ubifs/file.c ubifs_release_budget(c, &req); c 281 fs/ubifs/file.c release_new_page_budget(c); c 289 fs/ubifs/file.c ubifs_convert_page_budget(c); c 305 fs/ubifs/file.c ubifs_release_dirty_inode_budget(c, ui); c 325 fs/ubifs/file.c static int allocate_budget(struct ubifs_info *c, struct page *page, c 386 fs/ubifs/file.c return ubifs_budget_space(c, &req); c 426 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 433 fs/ubifs/file.c ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size); c 434 fs/ubifs/file.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 436 fs/ubifs/file.c if (unlikely(c->ro_error)) c 471 fs/ubifs/file.c err = allocate_budget(c, page, ui, appending); c 473 fs/ubifs/file.c ubifs_assert(c, err == -ENOSPC); c 490 fs/ubifs/file.c ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); c 520 fs/ubifs/file.c static void cancel_budget(struct ubifs_info *c, struct page *page, c 525 fs/ubifs/file.c ubifs_release_dirty_inode_budget(c, ui); c 530 fs/ubifs/file.c release_new_page_budget(c); c 532 fs/ubifs/file.c release_existing_page_budget(c); c 542 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 561 fs/ubifs/file.c cancel_budget(c, page, ui, appending); c 574 fs/ubifs/file.c atomic_long_inc(&c->dirty_pg_cnt); c 587 fs/ubifs/file.c ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); c 606 fs/ubifs/file.c static int populate_page(struct ubifs_info *c, struct page *page, c 635 fs/ubifs/file.c } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { c 640 fs/ubifs/file.c ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > c 656 fs/ubifs/file.c err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, c 666 fs/ubifs/file.c } else if (key_block(c, &bu->zbranch[nn].key) < page_block) { c 704 fs/ubifs/file.c ubifs_err(c, "bad data node (block %u, inode %lu)", c 717 fs/ubifs/file.c static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, c 729 fs/ubifs/file.c err = ubifs_tnc_get_bu_keys(c, bu); c 759 fs/ubifs/file.c ubifs_assert(c, bu->buf_len > 0); c 760 fs/ubifs/file.c ubifs_assert(c, bu->buf_len <= c->leb_size); c 766 fs/ubifs/file.c err = ubifs_tnc_bulk_read(c, bu); c 771 fs/ubifs/file.c err = populate_page(c, page1, bu, &n); c 795 fs/ubifs/file.c err = populate_page(c, page, bu, &n); c 810 fs/ubifs/file.c ubifs_warn(c, "ignoring error %d and skipping bulk-read", err); c 830 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 837 fs/ubifs/file.c if (!c->bulk_read) c 867 fs/ubifs/file.c if (mutex_trylock(&c->bu_mutex)) c 868 fs/ubifs/file.c bu = &c->bu; c 878 fs/ubifs/file.c bu->buf_len = c->max_bu_buf_len; c 879 fs/ubifs/file.c data_key_init(c, &bu->key, inode->i_ino, c 881 fs/ubifs/file.c err = ubifs_do_bulk_read(c, bu, page); c 884 fs/ubifs/file.c mutex_unlock(&c->bu_mutex); c 909 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 914 fs/ubifs/file.c ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT); c 926 fs/ubifs/file.c data_key_init(c, &key, inode->i_ino, block); c 927 fs/ubifs/file.c err = ubifs_jnl_write_data(c, inode, &key, addr, blen); c 938 fs/ubifs/file.c ubifs_err(c, "cannot write page %lu of inode %lu, error %d", c 940 fs/ubifs/file.c ubifs_ro_mode(c, err); c 943 fs/ubifs/file.c ubifs_assert(c, PagePrivate(page)); c 945 fs/ubifs/file.c release_new_page_budget(c); c 947 fs/ubifs/file.c release_existing_page_budget(c); c 949 fs/ubifs/file.c atomic_long_dec(&c->dirty_pg_cnt); c 1008 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1017 fs/ubifs/file.c ubifs_assert(c, PagePrivate(page)); c 1108 fs/ubifs/file.c static int do_truncation(struct ubifs_info *c, struct inode *inode, c 1131 fs/ubifs/file.c err = ubifs_budget_space(c, &req); c 1159 fs/ubifs/file.c ubifs_assert(c, PagePrivate(page)); c 1191 fs/ubifs/file.c err = ubifs_jnl_truncate(c, inode, old_size, new_size); c 1196 fs/ubifs/file.c ubifs_release_budget(c, &req); c 1198 fs/ubifs/file.c c->bi.nospace = c->bi.nospace_rp = 0; c 1214 fs/ubifs/file.c static int do_setattr(struct ubifs_info *c, struct inode *inode, c 1223 fs/ubifs/file.c err = ubifs_budget_space(c, &req); c 1254 fs/ubifs/file.c ubifs_release_budget(c, &req); c 1264 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1272 fs/ubifs/file.c err = dbg_check_synced_i_size(c, inode); c 1282 fs/ubifs/file.c err = do_truncation(c, inode, attr); c 1284 fs/ubifs/file.c err = do_setattr(c, inode, attr); c 1293 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1295 fs/ubifs/file.c ubifs_assert(c, PagePrivate(page)); c 1301 fs/ubifs/file.c release_new_page_budget(c); c 1303 fs/ubifs/file.c release_existing_page_budget(c); c 1305 fs/ubifs/file.c atomic_long_dec(&c->dirty_pg_cnt); c 1313 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1318 fs/ubifs/file.c if (c->ro_mount) c 1341 fs/ubifs/file.c err = ubifs_sync_wbufs_by_inode(c, inode); c 1375 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1383 fs/ubifs/file.c err = ubifs_budget_space(c, &req); c 1399 fs/ubifs/file.c ubifs_release_budget(c, &req); c 1415 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1422 fs/ubifs/file.c err = ubifs_budget_space(c, &req); c 1432 fs/ubifs/file.c ubifs_release_budget(c, &req); c 1451 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1458 fs/ubifs/file.c ubifs_assert(c, ret == 0); c 1488 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1496 fs/ubifs/file.c ubifs_assert(c, PagePrivate(page)); c 1497 fs/ubifs/file.c ubifs_assert(c, 0); c 1511 fs/ubifs/file.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 1518 fs/ubifs/file.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 1520 fs/ubifs/file.c if (unlikely(c->ro_error)) c 1549 fs/ubifs/file.c err = ubifs_budget_space(c, &req); c 1552 fs/ubifs/file.c ubifs_warn(c, "out of space for mmapped file (inode number %lu)", c 1565 fs/ubifs/file.c release_new_page_budget(c); c 1568 fs/ubifs/file.c ubifs_convert_page_budget(c); c 1570 fs/ubifs/file.c atomic_long_inc(&c->dirty_pg_cnt); c 1584 fs/ubifs/file.c ubifs_release_dirty_inode_budget(c, ui); c 1592 fs/ubifs/file.c ubifs_release_budget(c, &req); c 42 fs/ubifs/find.c static int valuable(struct ubifs_info *c, const struct ubifs_lprops *lprops) c 51 fs/ubifs/find.c heap = &c->lpt_heap[cat - 1]; c 54 fs/ubifs/find.c if (lprops->free + lprops->dirty >= c->dark_wm) c 58 fs/ubifs/find.c n = c->lst.empty_lebs + c->freeable_cnt - c 59 fs/ubifs/find.c c->lst.taken_empty_lebs; c 60 fs/ubifs/find.c if (n < c->lsave_cnt) c 83 fs/ubifs/find.c static int scan_for_dirty_cb(struct ubifs_info *c, c 93 fs/ubifs/find.c if (!in_tree && valuable(c, lprops)) c 102 fs/ubifs/find.c if (lprops->free + lprops->dirty == c->leb_size) { c 106 fs/ubifs/find.c } else if (lprops->dirty < c->dead_wm) c 124 fs/ubifs/find.c static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c, c 134 fs/ubifs/find.c heap = &c->lpt_heap[LPROPS_FREE - 1]; c 139 fs/ubifs/find.c if (lprops->dirty < c->dead_wm) c 150 fs/ubifs/find.c list_for_each_entry(lprops, &c->uncat_list, list) { c 157 fs/ubifs/find.c if (lprops->dirty < c->dead_wm) c 162 fs/ubifs/find.c if (c->pnodes_have >= c->pnode_cnt) c 169 fs/ubifs/find.c err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, c 174 fs/ubifs/find.c ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); c 175 fs/ubifs/find.c c->lscan_lnum = data.lnum; c 176 fs/ubifs/find.c lprops = ubifs_lpt_lookup_dirty(c, data.lnum); c 179 fs/ubifs/find.c ubifs_assert(c, lprops->lnum == data.lnum); c 180 fs/ubifs/find.c ubifs_assert(c, lprops->free + lprops->dirty >= min_space); c 181 fs/ubifs/find.c ubifs_assert(c, lprops->dirty >= c->dead_wm || c 183 fs/ubifs/find.c lprops->free + lprops->dirty == c->leb_size)); c 184 fs/ubifs/find.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 185 fs/ubifs/find.c ubifs_assert(c, !exclude_index || !(lprops->flags & LPROPS_INDEX)); c 221 fs/ubifs/find.c int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, c 228 fs/ubifs/find.c ubifs_get_lprops(c); c 233 fs/ubifs/find.c spin_lock(&c->space_lock); c 234 fs/ubifs/find.c lebs = c->lst.empty_lebs + c->idx_gc_cnt; c 235 fs/ubifs/find.c lebs += c->freeable_cnt - c->lst.taken_empty_lebs; c 243 fs/ubifs/find.c if (c->bi.min_idx_lebs >= c->lst.idx_lebs) { c 244 fs/ubifs/find.c rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs; c 247 fs/ubifs/find.c spin_unlock(&c->space_lock); c 252 fs/ubifs/find.c lp = ubifs_fast_find_empty(c); c 257 fs/ubifs/find.c lp = ubifs_fast_find_freeable(c); c 266 fs/ubifs/find.c spin_lock(&c->space_lock); c 267 fs/ubifs/find.c exclude_index = (c->bi.min_idx_lebs >= c->lst.idx_lebs); c 268 fs/ubifs/find.c spin_unlock(&c->space_lock); c 272 fs/ubifs/find.c heap = &c->lpt_heap[LPROPS_DIRTY - 1]; c 273 fs/ubifs/find.c idx_heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; c 288 fs/ubifs/find.c if (sum < min_space || sum < c->half_leb_size) c 306 fs/ubifs/find.c ubifs_assert(c, lp->free + lp->dirty >= c->dead_wm); c 312 fs/ubifs/find.c lp = scan_for_dirty(c, min_space, pick_free, exclude_index); c 317 fs/ubifs/find.c ubifs_assert(c, lp->dirty >= c->dead_wm || c 318 fs/ubifs/find.c (pick_free && lp->free + lp->dirty == c->leb_size)); c 324 fs/ubifs/find.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, c 334 fs/ubifs/find.c ubifs_release_lprops(c); c 350 fs/ubifs/find.c static int scan_for_free_cb(struct ubifs_info *c, c 360 fs/ubifs/find.c if (!in_tree && valuable(c, lprops)) c 369 fs/ubifs/find.c if (!data->pick_free && lprops->free == c->leb_size) c 377 fs/ubifs/find.c if (lprops->free + lprops->dirty == c->leb_size && lprops->dirty > 0) c 395 fs/ubifs/find.c const struct ubifs_lprops *do_find_free_space(struct ubifs_info *c, c 405 fs/ubifs/find.c lprops = ubifs_fast_find_free(c); c 410 fs/ubifs/find.c lprops = ubifs_fast_find_empty(c); c 415 fs/ubifs/find.c lprops = ubifs_fast_find_free(c); c 420 fs/ubifs/find.c heap = &c->lpt_heap[LPROPS_DIRTY - 1]; c 433 fs/ubifs/find.c list_for_each_entry(lprops, &c->uncat_list, list) { c 442 fs/ubifs/find.c if (c->pnodes_have >= c->pnode_cnt) c 448 fs/ubifs/find.c err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, c 453 fs/ubifs/find.c ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); c 454 fs/ubifs/find.c c->lscan_lnum = data.lnum; c 455 fs/ubifs/find.c lprops = ubifs_lpt_lookup_dirty(c, data.lnum); c 458 fs/ubifs/find.c ubifs_assert(c, lprops->lnum == data.lnum); c 459 fs/ubifs/find.c ubifs_assert(c, lprops->free >= min_space); c 460 fs/ubifs/find.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 461 fs/ubifs/find.c ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); c 481 fs/ubifs/find.c int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs, c 488 fs/ubifs/find.c ubifs_get_lprops(c); c 491 fs/ubifs/find.c spin_lock(&c->space_lock); c 492 fs/ubifs/find.c if (c->bi.min_idx_lebs > c->lst.idx_lebs) c 493 fs/ubifs/find.c rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs; c 496 fs/ubifs/find.c lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c 497 fs/ubifs/find.c c->lst.taken_empty_lebs; c 503 fs/ubifs/find.c if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { c 524 fs/ubifs/find.c c->lst.taken_empty_lebs += 1; c 526 fs/ubifs/find.c spin_unlock(&c->space_lock); c 528 fs/ubifs/find.c lprops = do_find_free_space(c, min_space, pick_free, squeeze); c 537 fs/ubifs/find.c lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC, flags, 0); c 544 fs/ubifs/find.c spin_lock(&c->space_lock); c 545 fs/ubifs/find.c c->lst.taken_empty_lebs -= 1; c 546 fs/ubifs/find.c spin_unlock(&c->space_lock); c 549 fs/ubifs/find.c *offs = c->leb_size - lprops->free; c 550 fs/ubifs/find.c ubifs_release_lprops(c); c 559 fs/ubifs/find.c err = ubifs_leb_unmap(c, lnum); c 564 fs/ubifs/find.c dbg_find("found LEB %d, free %d", lnum, c->leb_size - *offs); c 565 fs/ubifs/find.c ubifs_assert(c, *offs <= c->leb_size - min_space); c 570 fs/ubifs/find.c spin_lock(&c->space_lock); c 571 fs/ubifs/find.c c->lst.taken_empty_lebs -= 1; c 572 fs/ubifs/find.c spin_unlock(&c->space_lock); c 574 fs/ubifs/find.c ubifs_release_lprops(c); c 590 fs/ubifs/find.c static int scan_for_idx_cb(struct ubifs_info *c, c 600 fs/ubifs/find.c if (!in_tree && valuable(c, lprops)) c 606 fs/ubifs/find.c if (lprops->free + lprops->dirty != c->leb_size) c 621 fs/ubifs/find.c static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c) c 628 fs/ubifs/find.c err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, c 633 fs/ubifs/find.c ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); c 634 fs/ubifs/find.c c->lscan_lnum = data.lnum; c 635 fs/ubifs/find.c lprops = ubifs_lpt_lookup_dirty(c, data.lnum); c 638 fs/ubifs/find.c ubifs_assert(c, lprops->lnum == data.lnum); c 639 fs/ubifs/find.c ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); c 640 fs/ubifs/find.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 641 fs/ubifs/find.c ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); c 661 fs/ubifs/find.c int ubifs_find_free_leb_for_idx(struct ubifs_info *c) c 666 fs/ubifs/find.c ubifs_get_lprops(c); c 668 fs/ubifs/find.c lprops = ubifs_fast_find_empty(c); c 670 fs/ubifs/find.c lprops = ubifs_fast_find_freeable(c); c 679 fs/ubifs/find.c if (c->in_a_category_cnt != c->main_lebs || c 680 fs/ubifs/find.c c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { c 681 fs/ubifs/find.c ubifs_assert(c, c->freeable_cnt == 0); c 682 fs/ubifs/find.c lprops = scan_for_leb_for_idx(c); c 702 fs/ubifs/find.c lprops = ubifs_change_lp(c, lprops, c->leb_size, 0, flags, 0); c 708 fs/ubifs/find.c ubifs_release_lprops(c); c 715 fs/ubifs/find.c err = ubifs_leb_unmap(c, lnum); c 717 fs/ubifs/find.c ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0, c 725 fs/ubifs/find.c ubifs_release_lprops(c); c 746 fs/ubifs/find.c int ubifs_save_dirty_idx_lnums(struct ubifs_info *c) c 750 fs/ubifs/find.c ubifs_get_lprops(c); c 752 fs/ubifs/find.c c->dirty_idx.cnt = c->lpt_heap[LPROPS_DIRTY_IDX - 1].cnt; c 753 fs/ubifs/find.c memcpy(c->dirty_idx.arr, c->lpt_heap[LPROPS_DIRTY_IDX - 1].arr, c 754 fs/ubifs/find.c sizeof(void *) * c->dirty_idx.cnt); c 756 fs/ubifs/find.c sort(c->dirty_idx.arr, c->dirty_idx.cnt, sizeof(void *), c 758 fs/ubifs/find.c dbg_find("found %d dirty index LEBs", c->dirty_idx.cnt); c 759 fs/ubifs/find.c if (c->dirty_idx.cnt) c 761 fs/ubifs/find.c c->dirty_idx.arr[c->dirty_idx.cnt - 1]->lnum, c 762 fs/ubifs/find.c c->dirty_idx.arr[c->dirty_idx.cnt - 1]->dirty, c 763 fs/ubifs/find.c c->dirty_idx.arr[c->dirty_idx.cnt - 1]->free); c 765 fs/ubifs/find.c for (i = 0; i < c->dirty_idx.cnt; i++) c 766 fs/ubifs/find.c c->dirty_idx.arr[i] = (void *)(size_t)c->dirty_idx.arr[i]->lnum; c 767 fs/ubifs/find.c ubifs_release_lprops(c); c 783 fs/ubifs/find.c static int scan_dirty_idx_cb(struct ubifs_info *c, c 793 fs/ubifs/find.c if (!in_tree && valuable(c, lprops)) c 799 fs/ubifs/find.c if (lprops->free + lprops->dirty < c->min_idx_node_sz) c 816 fs/ubifs/find.c static int find_dirty_idx_leb(struct ubifs_info *c) c 825 fs/ubifs/find.c heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; c 828 fs/ubifs/find.c ret = scan_dirty_idx_cb(c, lprops, 1, &data); c 832 fs/ubifs/find.c list_for_each_entry(lprops, &c->frdi_idx_list, list) { c 833 fs/ubifs/find.c ret = scan_dirty_idx_cb(c, lprops, 1, &data); c 837 fs/ubifs/find.c list_for_each_entry(lprops, &c->uncat_list, list) { c 838 fs/ubifs/find.c ret = scan_dirty_idx_cb(c, lprops, 1, &data); c 842 fs/ubifs/find.c if (c->pnodes_have >= c->pnode_cnt) c 845 fs/ubifs/find.c err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, c 851 fs/ubifs/find.c ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); c 852 fs/ubifs/find.c c->lscan_lnum = data.lnum; c 853 fs/ubifs/find.c lprops = ubifs_lpt_lookup_dirty(c, data.lnum); c 856 fs/ubifs/find.c ubifs_assert(c, lprops->lnum == data.lnum); c 857 fs/ubifs/find.c ubifs_assert(c, lprops->free + lprops->dirty >= c->min_idx_node_sz); c 858 fs/ubifs/find.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 859 fs/ubifs/find.c ubifs_assert(c, (lprops->flags & LPROPS_INDEX)); c 864 fs/ubifs/find.c lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC, c 876 fs/ubifs/find.c static int get_idx_gc_leb(struct ubifs_info *c) c 881 fs/ubifs/find.c err = ubifs_get_idx_gc_leb(c); c 889 fs/ubifs/find.c lp = ubifs_lpt_lookup_dirty(c, lnum); c 892 fs/ubifs/find.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, c 905 fs/ubifs/find.c static int find_dirtiest_idx_leb(struct ubifs_info *c) c 911 fs/ubifs/find.c if (!c->dirty_idx.cnt) c 914 fs/ubifs/find.c lnum = (size_t)c->dirty_idx.arr[--c->dirty_idx.cnt]; c 915 fs/ubifs/find.c lp = ubifs_lpt_lookup(c, lnum); c 920 fs/ubifs/find.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, c 928 fs/ubifs/find.c ubifs_assert(c, lp->flags & LPROPS_TAKEN); c 929 fs/ubifs/find.c ubifs_assert(c, lp->flags & LPROPS_INDEX); c 941 fs/ubifs/find.c int ubifs_find_dirty_idx_leb(struct ubifs_info *c) c 945 fs/ubifs/find.c ubifs_get_lprops(c); c 951 fs/ubifs/find.c err = find_dirtiest_idx_leb(c); c 955 fs/ubifs/find.c err = find_dirty_idx_leb(c); c 959 fs/ubifs/find.c err = get_idx_gc_leb(c); c 961 fs/ubifs/find.c ubifs_release_lprops(c); c 69 fs/ubifs/gc.c static int switch_gc_head(struct ubifs_info *c) c 71 fs/ubifs/gc.c int err, gc_lnum = c->gc_lnum; c 72 fs/ubifs/gc.c struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; c 74 fs/ubifs/gc.c ubifs_assert(c, gc_lnum != -1); c 77 fs/ubifs/gc.c c->leb_size - wbuf->offs - wbuf->used); c 87 fs/ubifs/gc.c err = ubifs_leb_unmap(c, gc_lnum); c 91 fs/ubifs/gc.c err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0); c 95 fs/ubifs/gc.c c->gc_lnum = -1; c 112 fs/ubifs/gc.c struct ubifs_info *c = priv; c 122 fs/ubifs/gc.c ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DATA_KEY); c 123 fs/ubifs/gc.c ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DATA_KEY); c 124 fs/ubifs/gc.c ubifs_assert(c, sa->type == UBIFS_DATA_NODE); c 125 fs/ubifs/gc.c ubifs_assert(c, sb->type == UBIFS_DATA_NODE); c 127 fs/ubifs/gc.c inuma = key_inum(c, &sa->key); c 128 fs/ubifs/gc.c inumb = key_inum(c, &sb->key); c 131 fs/ubifs/gc.c unsigned int blka = key_block(c, &sa->key); c 132 fs/ubifs/gc.c unsigned int blkb = key_block(c, &sb->key); c 156 fs/ubifs/gc.c struct ubifs_info *c = priv; c 166 fs/ubifs/gc.c ubifs_assert(c, key_type(c, &sa->key) != UBIFS_DATA_KEY && c 167 fs/ubifs/gc.c key_type(c, &sb->key) != UBIFS_DATA_KEY); c 168 fs/ubifs/gc.c ubifs_assert(c, sa->type != UBIFS_DATA_NODE && c 180 fs/ubifs/gc.c ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DENT_KEY || c 181 fs/ubifs/gc.c key_type(c, &sa->key) == UBIFS_XENT_KEY); c 182 fs/ubifs/gc.c ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DENT_KEY || c 183 fs/ubifs/gc.c key_type(c, &sb->key) == UBIFS_XENT_KEY); c 184 fs/ubifs/gc.c ubifs_assert(c, sa->type == UBIFS_DENT_NODE || c 186 fs/ubifs/gc.c ubifs_assert(c, sb->type == UBIFS_DENT_NODE || c 189 fs/ubifs/gc.c inuma = key_inum(c, &sa->key); c 190 fs/ubifs/gc.c inumb = key_inum(c, &sb->key); c 193 fs/ubifs/gc.c uint32_t hasha = key_hash(c, &sa->key); c 194 fs/ubifs/gc.c uint32_t hashb = key_hash(c, &sb->key); c 231 fs/ubifs/gc.c static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 241 fs/ubifs/gc.c ubifs_assert(c, snod->type == UBIFS_INO_NODE || c 258 fs/ubifs/gc.c ubifs_assert(c, key_type(c, &snod->key) == UBIFS_DATA_KEY || c 259 fs/ubifs/gc.c key_type(c, &snod->key) == UBIFS_INO_KEY || c 260 fs/ubifs/gc.c key_type(c, &snod->key) == UBIFS_DENT_KEY || c 261 fs/ubifs/gc.c key_type(c, &snod->key) == UBIFS_XENT_KEY); c 263 fs/ubifs/gc.c err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum, c 278 fs/ubifs/gc.c if (key_type(c, &snod->key) != UBIFS_DATA_KEY) c 283 fs/ubifs/gc.c list_sort(c, &sleb->nodes, &data_nodes_cmp); c 284 fs/ubifs/gc.c list_sort(c, nondata, &nondata_nodes_cmp); c 286 fs/ubifs/gc.c err = dbg_check_data_nodes_order(c, &sleb->nodes); c 289 fs/ubifs/gc.c err = dbg_check_nondata_nodes_order(c, nondata); c 306 fs/ubifs/gc.c static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 316 fs/ubifs/gc.c err = ubifs_tnc_replace(c, &snod->key, sleb->lnum, c 334 fs/ubifs/gc.c static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) c 338 fs/ubifs/gc.c struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; c 345 fs/ubifs/gc.c err = switch_gc_head(c); c 350 fs/ubifs/gc.c err = sort_nodes(c, sleb, &nondata, &min); c 361 fs/ubifs/gc.c avail = c->leb_size - wbuf->offs - wbuf->used - c 362 fs/ubifs/gc.c ubifs_auth_node_sz(c); c 370 fs/ubifs/gc.c err = ubifs_shash_update(c, c->jheads[GCHD].log_hash, c 375 fs/ubifs/gc.c err = move_node(c, sleb, snod, wbuf); c 383 fs/ubifs/gc.c avail = c->leb_size - wbuf->offs - wbuf->used - c 384 fs/ubifs/gc.c ubifs_auth_node_sz(c); c 396 fs/ubifs/gc.c if (key_type(c, &snod->key) == UBIFS_DENT_KEY || c 402 fs/ubifs/gc.c err = ubifs_shash_update(c, c->jheads[GCHD].log_hash, c 407 fs/ubifs/gc.c err = move_node(c, sleb, snod, wbuf); c 413 fs/ubifs/gc.c if (ubifs_authenticated(c) && moved) { c 416 fs/ubifs/gc.c auth = kmalloc(ubifs_auth_node_sz(c), GFP_NOFS); c 422 fs/ubifs/gc.c err = ubifs_prepare_auth_node(c, auth, c 423 fs/ubifs/gc.c c->jheads[GCHD].log_hash); c 430 fs/ubifs/gc.c ubifs_auth_node_sz(c)); c 436 fs/ubifs/gc.c ubifs_add_dirt(c, wbuf->lnum, ubifs_auth_node_sz(c)); c 446 fs/ubifs/gc.c err = switch_gc_head(c); c 471 fs/ubifs/gc.c static int gc_sync_wbufs(struct ubifs_info *c) c 475 fs/ubifs/gc.c for (i = 0; i < c->jhead_cnt; i++) { c 478 fs/ubifs/gc.c err = ubifs_wbuf_sync(&c->jheads[i].wbuf); c 494 fs/ubifs/gc.c int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) c 498 fs/ubifs/gc.c struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; c 501 fs/ubifs/gc.c ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 || c 502 fs/ubifs/gc.c c->need_recovery); c 503 fs/ubifs/gc.c ubifs_assert(c, c->gc_lnum != lnum); c 504 fs/ubifs/gc.c ubifs_assert(c, wbuf->lnum != lnum); c 506 fs/ubifs/gc.c if (lp->free + lp->dirty == c->leb_size) { c 509 fs/ubifs/gc.c ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); c 511 fs/ubifs/gc.c if (lp->free != c->leb_size) { c 517 fs/ubifs/gc.c err = gc_sync_wbufs(c); c 520 fs/ubifs/gc.c err = ubifs_change_one_lp(c, lp->lnum, c->leb_size, c 525 fs/ubifs/gc.c err = ubifs_leb_unmap(c, lp->lnum); c 529 fs/ubifs/gc.c if (c->gc_lnum == -1) { c 530 fs/ubifs/gc.c c->gc_lnum = lnum; c 541 fs/ubifs/gc.c sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); c 545 fs/ubifs/gc.c ubifs_assert(c, !list_empty(&sleb->nodes)); c 557 fs/ubifs/gc.c ubifs_assert(c, snod->type == UBIFS_IDX_NODE); c 558 fs/ubifs/gc.c key_read(c, ubifs_idx_key(c, idx), &snod->key); c 559 fs/ubifs/gc.c err = ubifs_dirty_idx_node(c, &snod->key, level, lnum, c 573 fs/ubifs/gc.c list_add(&idx_gc->list, &c->idx_gc); c 581 fs/ubifs/gc.c err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, c 590 fs/ubifs/gc.c err = move_nodes(c, sleb); c 594 fs/ubifs/gc.c err = gc_sync_wbufs(c); c 598 fs/ubifs/gc.c err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0); c 603 fs/ubifs/gc.c c->gced_lnum = lnum; c 605 fs/ubifs/gc.c c->gc_seq += 1; c 608 fs/ubifs/gc.c if (c->gc_lnum == -1) { c 609 fs/ubifs/gc.c c->gc_lnum = lnum; c 616 fs/ubifs/gc.c err = ubifs_leb_unmap(c, lnum); c 630 fs/ubifs/gc.c c->gced_lnum = lnum; c 632 fs/ubifs/gc.c c->gc_seq += 1; c 673 fs/ubifs/gc.c int ubifs_garbage_collect(struct ubifs_info *c, int anyway) c 675 fs/ubifs/gc.c int i, err, ret, min_space = c->dead_wm; c 677 fs/ubifs/gc.c struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; c 679 fs/ubifs/gc.c ubifs_assert_cmt_locked(c); c 680 fs/ubifs/gc.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 682 fs/ubifs/gc.c if (ubifs_gc_should_commit(c)) c 687 fs/ubifs/gc.c if (c->ro_error) { c 693 fs/ubifs/gc.c ubifs_assert(c, !wbuf->used); c 701 fs/ubifs/gc.c if (ubifs_gc_should_commit(c)) { c 706 fs/ubifs/gc.c if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) { c 712 fs/ubifs/gc.c ubifs_commit_required(c); c 734 fs/ubifs/gc.c ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1); c 745 fs/ubifs/gc.c space_before = c->leb_size - wbuf->offs - wbuf->used; c 749 fs/ubifs/gc.c ret = ubifs_garbage_collect_leb(c, &lp); c 758 fs/ubifs/gc.c err = ubifs_return_leb(c, lp.lnum); c 784 fs/ubifs/gc.c ubifs_assert(c, ret == LEB_RETAINED); c 785 fs/ubifs/gc.c space_after = c->leb_size - wbuf->offs - wbuf->used; c 792 fs/ubifs/gc.c if (min_space < c->dead_wm) c 793 fs/ubifs/gc.c min_space = c->dead_wm; c 821 fs/ubifs/gc.c if (min_space > c->dark_wm) c 822 fs/ubifs/gc.c min_space = c->dark_wm; c 826 fs/ubifs/gc.c if (ret == -ENOSPC && !list_empty(&c->idx_gc)) { c 828 fs/ubifs/gc.c ubifs_commit_required(c); c 834 fs/ubifs/gc.c err = ubifs_leb_unmap(c, c->gc_lnum); c 844 fs/ubifs/gc.c ubifs_assert(c, ret < 0); c 845 fs/ubifs/gc.c ubifs_assert(c, ret != -ENOSPC && ret != -EAGAIN); c 847 fs/ubifs/gc.c ubifs_ro_mode(c, ret); c 849 fs/ubifs/gc.c ubifs_return_leb(c, lp.lnum); c 864 fs/ubifs/gc.c int ubifs_gc_start_commit(struct ubifs_info *c) c 870 fs/ubifs/gc.c ubifs_get_lprops(c); c 877 fs/ubifs/gc.c lp = ubifs_fast_find_freeable(c); c 880 fs/ubifs/gc.c ubifs_assert(c, !(lp->flags & LPROPS_TAKEN)); c 881 fs/ubifs/gc.c ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); c 882 fs/ubifs/gc.c err = ubifs_leb_unmap(c, lp->lnum); c 885 fs/ubifs/gc.c lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0); c 890 fs/ubifs/gc.c ubifs_assert(c, !(lp->flags & LPROPS_TAKEN)); c 891 fs/ubifs/gc.c ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); c 895 fs/ubifs/gc.c list_for_each_entry(idx_gc, &c->idx_gc, list) c 900 fs/ubifs/gc.c lp = ubifs_fast_find_frdi_idx(c); c 912 fs/ubifs/gc.c ubifs_assert(c, !(lp->flags & LPROPS_TAKEN)); c 913 fs/ubifs/gc.c ubifs_assert(c, lp->flags & LPROPS_INDEX); c 916 fs/ubifs/gc.c lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1); c 922 fs/ubifs/gc.c ubifs_assert(c, lp->flags & LPROPS_TAKEN); c 923 fs/ubifs/gc.c ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); c 926 fs/ubifs/gc.c list_add(&idx_gc->list, &c->idx_gc); c 929 fs/ubifs/gc.c ubifs_release_lprops(c); c 939 fs/ubifs/gc.c int ubifs_gc_end_commit(struct ubifs_info *c) c 945 fs/ubifs/gc.c wbuf = &c->jheads[GCHD].wbuf; c 947 fs/ubifs/gc.c list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list) c 950 fs/ubifs/gc.c err = ubifs_leb_unmap(c, idx_gc->lnum); c 953 fs/ubifs/gc.c err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC, c 973 fs/ubifs/gc.c void ubifs_destroy_idx_gc(struct ubifs_info *c) c 975 fs/ubifs/gc.c while (!list_empty(&c->idx_gc)) { c 978 fs/ubifs/gc.c idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, c 980 fs/ubifs/gc.c c->idx_gc_cnt -= 1; c 992 fs/ubifs/gc.c int ubifs_get_idx_gc_leb(struct ubifs_info *c) c 997 fs/ubifs/gc.c if (list_empty(&c->idx_gc)) c 999 fs/ubifs/gc.c idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list); c 70 fs/ubifs/io.c void ubifs_ro_mode(struct ubifs_info *c, int err) c 72 fs/ubifs/io.c if (!c->ro_error) { c 73 fs/ubifs/io.c c->ro_error = 1; c 74 fs/ubifs/io.c c->no_chk_data_crc = 0; c 75 fs/ubifs/io.c c->vfs_sb->s_flags |= SB_RDONLY; c 76 fs/ubifs/io.c ubifs_warn(c, "switched to read-only mode, error %d", err); c 87 fs/ubifs/io.c int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, c 92 fs/ubifs/io.c err = ubi_read(c->ubi, lnum, buf, offs, len); c 98 fs/ubifs/io.c ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d", c 105 fs/ubifs/io.c int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, c 110 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 111 fs/ubifs/io.c if (c->ro_error) c 113 fs/ubifs/io.c if (!dbg_is_tst_rcvry(c)) c 114 fs/ubifs/io.c err = ubi_leb_write(c->ubi, lnum, buf, offs, len); c 116 fs/ubifs/io.c err = dbg_leb_write(c, lnum, buf, offs, len); c 118 fs/ubifs/io.c ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d", c 120 fs/ubifs/io.c ubifs_ro_mode(c, err); c 126 fs/ubifs/io.c int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len) c 130 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 131 fs/ubifs/io.c if (c->ro_error) c 133 fs/ubifs/io.c if (!dbg_is_tst_rcvry(c)) c 134 fs/ubifs/io.c err = ubi_leb_change(c->ubi, lnum, buf, len); c 136 fs/ubifs/io.c err = dbg_leb_change(c, lnum, buf, len); c 138 fs/ubifs/io.c ubifs_err(c, "changing %d bytes in LEB %d failed, error %d", c 140 fs/ubifs/io.c ubifs_ro_mode(c, err); c 146 fs/ubifs/io.c int ubifs_leb_unmap(struct ubifs_info *c, int lnum) c 150 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 151 fs/ubifs/io.c if (c->ro_error) c 153 fs/ubifs/io.c if (!dbg_is_tst_rcvry(c)) c 154 fs/ubifs/io.c err = ubi_leb_unmap(c->ubi, lnum); c 156 fs/ubifs/io.c err = dbg_leb_unmap(c, lnum); c 158 fs/ubifs/io.c ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err); c 159 fs/ubifs/io.c ubifs_ro_mode(c, err); c 165 fs/ubifs/io.c int ubifs_leb_map(struct ubifs_info *c, int lnum) c 169 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 170 fs/ubifs/io.c if (c->ro_error) c 172 fs/ubifs/io.c if (!dbg_is_tst_rcvry(c)) c 173 fs/ubifs/io.c err = ubi_leb_map(c->ubi, lnum); c 175 fs/ubifs/io.c err = dbg_leb_map(c, lnum); c 177 fs/ubifs/io.c ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err); c 178 fs/ubifs/io.c ubifs_ro_mode(c, err); c 184 fs/ubifs/io.c int ubifs_is_mapped(const struct ubifs_info *c, int lnum) c 188 fs/ubifs/io.c err = ubi_is_mapped(c->ubi, lnum); c 190 fs/ubifs/io.c ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d", c 225 fs/ubifs/io.c int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, c 232 fs/ubifs/io.c ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); c 233 fs/ubifs/io.c ubifs_assert(c, !(offs & 7) && offs < c->leb_size); c 238 fs/ubifs/io.c ubifs_err(c, "bad magic %#08x, expected %#08x", c 247 fs/ubifs/io.c ubifs_err(c, "bad node type %d", type); c 252 fs/ubifs/io.c if (node_len + offs > c->leb_size) c 255 fs/ubifs/io.c if (c->ranges[type].max_len == 0) { c 256 fs/ubifs/io.c if (node_len != c->ranges[type].len) c 258 fs/ubifs/io.c } else if (node_len < c->ranges[type].min_len || c 259 fs/ubifs/io.c node_len > c->ranges[type].max_len) c 262 fs/ubifs/io.c if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting && c 263 fs/ubifs/io.c !c->remounting_rw && c->no_chk_data_crc) c 270 fs/ubifs/io.c ubifs_err(c, "bad CRC: calculated %#08x, read %#08x", c 280 fs/ubifs/io.c ubifs_err(c, "bad node length %d", node_len); c 283 fs/ubifs/io.c ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); c 284 fs/ubifs/io.c ubifs_dump_node(c, buf); c 306 fs/ubifs/io.c void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) c 310 fs/ubifs/io.c ubifs_assert(c, pad >= 0 && !(pad & 7)); c 336 fs/ubifs/io.c static unsigned long long next_sqnum(struct ubifs_info *c) c 340 fs/ubifs/io.c spin_lock(&c->cnt_lock); c 341 fs/ubifs/io.c sqnum = ++c->max_sqnum; c 342 fs/ubifs/io.c spin_unlock(&c->cnt_lock); c 346 fs/ubifs/io.c ubifs_err(c, "sequence number overflow %llu, end of life", c 348 fs/ubifs/io.c ubifs_ro_mode(c, -EINVAL); c 350 fs/ubifs/io.c ubifs_warn(c, "running out of sequence numbers, end of life soon"); c 356 fs/ubifs/io.c void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad) c 359 fs/ubifs/io.c unsigned long long sqnum = next_sqnum(c); c 361 fs/ubifs/io.c ubifs_assert(c, len >= UBIFS_CH_SZ); c 371 fs/ubifs/io.c pad = ALIGN(len, c->min_io_size) - len; c 372 fs/ubifs/io.c ubifs_pad(c, node + len, pad); c 376 fs/ubifs/io.c void ubifs_crc_node(struct ubifs_info *c, void *node, int len) c 400 fs/ubifs/io.c int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len, c 405 fs/ubifs/io.c ubifs_init_node(c, node, len, pad); c 408 fs/ubifs/io.c err = ubifs_node_insert_hmac(c, node, len, hmac_offs); c 413 fs/ubifs/io.c ubifs_crc_node(c, node, len); c 429 fs/ubifs/io.c void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) c 435 fs/ubifs/io.c ubifs_prepare_node_hmac(c, node, len, 0, pad); c 448 fs/ubifs/io.c void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) c 452 fs/ubifs/io.c unsigned long long sqnum = next_sqnum(c); c 454 fs/ubifs/io.c ubifs_assert(c, len >= UBIFS_CH_SZ); c 480 fs/ubifs/io.c wbuf->c->need_wbuf_sync = 1; c 481 fs/ubifs/io.c ubifs_wake_up_bgt(wbuf->c); c 490 fs/ubifs/io.c static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf) c 498 fs/ubifs/io.c ubifs_assert(c, !hrtimer_active(&wbuf->timer)); c 499 fs/ubifs/io.c ubifs_assert(c, delta <= ULONG_MAX); c 538 fs/ubifs/io.c struct ubifs_info *c = wbuf->c; c 548 fs/ubifs/io.c ubifs_assert(c, !(wbuf->avail & 7)); c 549 fs/ubifs/io.c ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size); c 550 fs/ubifs/io.c ubifs_assert(c, wbuf->size >= c->min_io_size); c 551 fs/ubifs/io.c ubifs_assert(c, wbuf->size <= c->max_write_size); c 552 fs/ubifs/io.c ubifs_assert(c, wbuf->size % c->min_io_size == 0); c 553 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 554 fs/ubifs/io.c if (c->leb_size - wbuf->offs >= c->max_write_size) c 555 fs/ubifs/io.c ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); c 557 fs/ubifs/io.c if (c->ro_error) c 564 fs/ubifs/io.c sync_len = ALIGN(wbuf->used, c->min_io_size); c 567 fs/ubifs/io.c ubifs_pad(c, wbuf->buf + wbuf->used, dirt); c 568 fs/ubifs/io.c err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len); c 584 fs/ubifs/io.c if (c->leb_size - wbuf->offs < c->max_write_size) c 585 fs/ubifs/io.c wbuf->size = c->leb_size - wbuf->offs; c 586 fs/ubifs/io.c else if (wbuf->offs & (c->max_write_size - 1)) c 587 fs/ubifs/io.c wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; c 589 fs/ubifs/io.c wbuf->size = c->max_write_size; c 596 fs/ubifs/io.c err = wbuf->sync_callback(c, wbuf->lnum, c 597 fs/ubifs/io.c c->leb_size - wbuf->offs, dirt); c 613 fs/ubifs/io.c const struct ubifs_info *c = wbuf->c; c 616 fs/ubifs/io.c ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt); c 617 fs/ubifs/io.c ubifs_assert(c, offs >= 0 && offs <= c->leb_size); c 618 fs/ubifs/io.c ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7)); c 619 fs/ubifs/io.c ubifs_assert(c, lnum != wbuf->lnum); c 620 fs/ubifs/io.c ubifs_assert(c, wbuf->used == 0); c 625 fs/ubifs/io.c if (c->leb_size - wbuf->offs < c->max_write_size) c 626 fs/ubifs/io.c wbuf->size = c->leb_size - wbuf->offs; c 627 fs/ubifs/io.c else if (wbuf->offs & (c->max_write_size - 1)) c 628 fs/ubifs/io.c wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; c 630 fs/ubifs/io.c wbuf->size = c->max_write_size; c 646 fs/ubifs/io.c int ubifs_bg_wbufs_sync(struct ubifs_info *c) c 650 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 651 fs/ubifs/io.c if (!c->need_wbuf_sync) c 653 fs/ubifs/io.c c->need_wbuf_sync = 0; c 655 fs/ubifs/io.c if (c->ro_error) { c 661 fs/ubifs/io.c for (i = 0; i < c->jhead_cnt; i++) { c 662 fs/ubifs/io.c struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; c 682 fs/ubifs/io.c ubifs_err(c, "cannot sync write-buffer, error %d", err); c 683 fs/ubifs/io.c ubifs_ro_mode(c, err); c 692 fs/ubifs/io.c for (i = 0; i < c->jhead_cnt; i++) { c 693 fs/ubifs/io.c struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; c 720 fs/ubifs/io.c struct ubifs_info *c = wbuf->c; c 726 fs/ubifs/io.c ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); c 727 fs/ubifs/io.c ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); c 728 fs/ubifs/io.c ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size); c 729 fs/ubifs/io.c ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size); c 730 fs/ubifs/io.c ubifs_assert(c, wbuf->size >= c->min_io_size); c 731 fs/ubifs/io.c ubifs_assert(c, wbuf->size <= c->max_write_size); c 732 fs/ubifs/io.c ubifs_assert(c, wbuf->size % c->min_io_size == 0); c 733 fs/ubifs/io.c ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex)); c 734 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 735 fs/ubifs/io.c ubifs_assert(c, !c->space_fixup); c 736 fs/ubifs/io.c if (c->leb_size - wbuf->offs >= c->max_write_size) c 737 fs/ubifs/io.c ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); c 739 fs/ubifs/io.c if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { c 746 fs/ubifs/io.c if (c->ro_error) c 759 fs/ubifs/io.c err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, c 766 fs/ubifs/io.c if (c->leb_size - wbuf->offs >= c->max_write_size) c 767 fs/ubifs/io.c wbuf->size = c->max_write_size; c 769 fs/ubifs/io.c wbuf->size = c->leb_size - wbuf->offs; c 795 fs/ubifs/io.c err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, c 804 fs/ubifs/io.c } else if (wbuf->offs & (c->max_write_size - 1)) { c 814 fs/ubifs/io.c err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, c 831 fs/ubifs/io.c n = aligned_len >> c->max_write_shift; c 833 fs/ubifs/io.c n <<= c->max_write_shift; c 836 fs/ubifs/io.c err = ubifs_leb_write(c, wbuf->lnum, buf + written, c 855 fs/ubifs/io.c if (c->leb_size - wbuf->offs >= c->max_write_size) c 856 fs/ubifs/io.c wbuf->size = c->max_write_size; c 858 fs/ubifs/io.c wbuf->size = c->leb_size - wbuf->offs; c 866 fs/ubifs/io.c int free = c->leb_size - wbuf->offs - wbuf->used; c 868 fs/ubifs/io.c err = wbuf->sync_callback(c, wbuf->lnum, free, 0); c 874 fs/ubifs/io.c new_wbuf_timer_nolock(c, wbuf); c 879 fs/ubifs/io.c ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d", c 881 fs/ubifs/io.c ubifs_dump_node(c, buf); c 883 fs/ubifs/io.c ubifs_dump_leb(c, wbuf->lnum); c 902 fs/ubifs/io.c int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum, c 905 fs/ubifs/io.c int err, buf_len = ALIGN(len, c->min_io_size); c 910 fs/ubifs/io.c ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); c 911 fs/ubifs/io.c ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size); c 912 fs/ubifs/io.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 913 fs/ubifs/io.c ubifs_assert(c, !c->space_fixup); c 915 fs/ubifs/io.c if (c->ro_error) c 918 fs/ubifs/io.c err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1); c 922 fs/ubifs/io.c err = ubifs_leb_write(c, lnum, buf, offs, buf_len); c 924 fs/ubifs/io.c ubifs_dump_node(c, buf); c 943 fs/ubifs/io.c int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, c 946 fs/ubifs/io.c return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1); c 967 fs/ubifs/io.c const struct ubifs_info *c = wbuf->c; c 973 fs/ubifs/io.c ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); c 974 fs/ubifs/io.c ubifs_assert(c, !(offs & 7) && offs < c->leb_size); c 975 fs/ubifs/io.c ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT); c 982 fs/ubifs/io.c return ubifs_read_node(c, buf, type, len, lnum, offs); c 996 fs/ubifs/io.c err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0); c 1002 fs/ubifs/io.c ubifs_err(c, "bad node type (%d but expected %d)", c 1007 fs/ubifs/io.c err = ubifs_check_node(c, buf, lnum, offs, 0, 0); c 1009 fs/ubifs/io.c ubifs_err(c, "expected node type %d", type); c 1015 fs/ubifs/io.c ubifs_err(c, "bad node length %d, expected %d", rlen, len); c 1022 fs/ubifs/io.c ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); c 1023 fs/ubifs/io.c ubifs_dump_node(c, buf); c 1041 fs/ubifs/io.c int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, c 1048 fs/ubifs/io.c ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); c 1049 fs/ubifs/io.c ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size); c 1050 fs/ubifs/io.c ubifs_assert(c, !(offs & 7) && offs < c->leb_size); c 1051 fs/ubifs/io.c ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT); c 1053 fs/ubifs/io.c err = ubifs_leb_read(c, lnum, buf, offs, len, 0); c 1058 fs/ubifs/io.c ubifs_errc(c, "bad node type (%d but expected %d)", c 1063 fs/ubifs/io.c err = ubifs_check_node(c, buf, lnum, offs, 0, 0); c 1065 fs/ubifs/io.c ubifs_errc(c, "expected node type %d", type); c 1071 fs/ubifs/io.c ubifs_errc(c, "bad node length %d, expected %d", l, len); c 1078 fs/ubifs/io.c ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum, c 1079 fs/ubifs/io.c offs, ubi_is_mapped(c->ubi, lnum)); c 1080 fs/ubifs/io.c if (!c->probing) { c 1081 fs/ubifs/io.c ubifs_dump_node(c, buf); c 1095 fs/ubifs/io.c int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) c 1099 fs/ubifs/io.c wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL); c 1103 fs/ubifs/io.c size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); c 1119 fs/ubifs/io.c size = c->max_write_size - (c->leb_start % c->max_write_size); c 1124 fs/ubifs/io.c wbuf->c = c; c 1183 fs/ubifs/io.c int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) c 1187 fs/ubifs/io.c for (i = 0; i < c->jhead_cnt; i++) { c 1188 fs/ubifs/io.c struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; c 1208 fs/ubifs/io.c ubifs_ro_mode(c, err); c 102 fs/ubifs/ioctl.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 106 fs/ubifs/ioctl.c err = ubifs_budget_space(c, &req); c 125 fs/ubifs/ioctl.c ubifs_release_budget(c, &req); c 131 fs/ubifs/ioctl.c ubifs_err(c, "can't modify inode %lu attributes", inode->i_ino); c 133 fs/ubifs/ioctl.c ubifs_release_budget(c, &req); c 178 fs/ubifs/ioctl.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 180 fs/ubifs/ioctl.c err = ubifs_enable_encryption(c); c 81 fs/ubifs/journal.c static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum) c 83 fs/ubifs/journal.c if (ubifs_authenticated(c)) c 84 fs/ubifs/journal.c ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c)); c 98 fs/ubifs/journal.c static int reserve_space(struct ubifs_info *c, int jhead, int len) c 101 fs/ubifs/journal.c struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; c 108 fs/ubifs/journal.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 113 fs/ubifs/journal.c if (c->ro_error) { c 118 fs/ubifs/journal.c avail = c->leb_size - wbuf->offs - wbuf->used; c 126 fs/ubifs/journal.c lnum = ubifs_find_free_space(c, len, &offs, squeeze); c 142 fs/ubifs/journal.c lnum = ubifs_garbage_collect(c, 0); c 167 fs/ubifs/journal.c avail = c->leb_size - wbuf->offs - wbuf->used; c 177 fs/ubifs/journal.c err = ubifs_return_leb(c, lnum); c 197 fs/ubifs/journal.c err = ubifs_add_bud_to_log(c, jhead, lnum, offs); c 212 fs/ubifs/journal.c ubifs_assert(c, err < 0); c 213 fs/ubifs/journal.c err1 = ubifs_return_leb(c, lnum); c 225 fs/ubifs/journal.c static int ubifs_hash_nodes(struct ubifs_info *c, void *node, c 228 fs/ubifs/journal.c int auth_node_size = ubifs_auth_node_sz(c); c 235 fs/ubifs/journal.c ubifs_assert(c, len >= auth_node_size); c 240 fs/ubifs/journal.c ubifs_assert(c, len > nodelen); c 241 fs/ubifs/journal.c ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC)); c 243 fs/ubifs/journal.c err = ubifs_shash_update(c, hash, (void *)node, nodelen); c 251 fs/ubifs/journal.c return ubifs_prepare_auth_node(c, node, hash); c 268 fs/ubifs/journal.c static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, c 272 fs/ubifs/journal.c struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; c 274 fs/ubifs/journal.c ubifs_assert(c, jhead != GCHD); c 276 fs/ubifs/journal.c *lnum = c->jheads[jhead].wbuf.lnum; c 277 fs/ubifs/journal.c *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; c 281 fs/ubifs/journal.c if (ubifs_authenticated(c)) { c 282 fs/ubifs/journal.c err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash); c 311 fs/ubifs/journal.c static int make_reservation(struct ubifs_info *c, int jhead, int len) c 316 fs/ubifs/journal.c down_read(&c->commit_sem); c 317 fs/ubifs/journal.c err = reserve_space(c, jhead, len); c 321 fs/ubifs/journal.c up_read(&c->commit_sem); c 355 fs/ubifs/journal.c ubifs_err(c, "stuck in space allocation"); c 359 fs/ubifs/journal.c ubifs_warn(c, "too many space allocation re-tries (%d)", c 366 fs/ubifs/journal.c err = ubifs_run_commit(c); c 372 fs/ubifs/journal.c ubifs_err(c, "cannot reserve %d bytes in jhead %d, error %d", c 376 fs/ubifs/journal.c down_write(&c->commit_sem); c 378 fs/ubifs/journal.c ubifs_dump_budg(c, &c->bi); c 379 fs/ubifs/journal.c ubifs_dump_lprops(c); c 380 fs/ubifs/journal.c cmt_retries = dbg_check_lprops(c); c 381 fs/ubifs/journal.c up_write(&c->commit_sem); c 395 fs/ubifs/journal.c static inline void release_head(struct ubifs_info *c, int jhead) c 397 fs/ubifs/journal.c mutex_unlock(&c->jheads[jhead].wbuf.io_mutex); c 407 fs/ubifs/journal.c static void finish_reservation(struct ubifs_info *c) c 409 fs/ubifs/journal.c up_read(&c->commit_sem); c 446 fs/ubifs/journal.c static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino, c 453 fs/ubifs/journal.c ino_key_init_flash(c, &ino->key, inode->i_ino); c 483 fs/ubifs/journal.c ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last); c 496 fs/ubifs/journal.c static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui) c 499 fs/ubifs/journal.c ubifs_release_dirty_inode_budget(c, ui); c 503 fs/ubifs/journal.c static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent) c 505 fs/ubifs/journal.c if (c->double_hash) c 538 fs/ubifs/journal.c int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, c 554 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); c 575 fs/ubifs/journal.c if (ubifs_authenticated(c)) c 576 fs/ubifs/journal.c len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c); c 585 fs/ubifs/journal.c err = make_reservation(c, BASEHD, len); c 592 fs/ubifs/journal.c dent_key_init_hash(c, &dent_key, dir->i_ino, nm->hash); c 594 fs/ubifs/journal.c dent_key_init(c, &dent_key, dir->i_ino, nm); c 597 fs/ubifs/journal.c xent_key_init(c, &dent_key, dir->i_ino, nm); c 600 fs/ubifs/journal.c key_write(c, &dent_key, dent->key); c 606 fs/ubifs/journal.c set_dent_cookie(c, dent); c 609 fs/ubifs/journal.c ubifs_prep_grp_node(c, dent, dlen, 0); c 610 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, dent, hash_dent); c 615 fs/ubifs/journal.c pack_inode(c, ino, inode, 0); c 616 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, ino, hash_ino); c 621 fs/ubifs/journal.c pack_inode(c, ino, dir, 1); c 622 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, ino, hash_ino_host); c 627 fs/ubifs/journal.c err = ubifs_add_orphan(c, inode->i_ino); c 629 fs/ubifs/journal.c release_head(c, BASEHD); c 632 fs/ubifs/journal.c ui->del_cmtno = c->cmt_no; c 635 fs/ubifs/journal.c err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); c 639 fs/ubifs/journal.c struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; c 644 fs/ubifs/journal.c release_head(c, BASEHD); c 646 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 650 fs/ubifs/journal.c err = ubifs_tnc_remove_dh(c, &dent_key, nm->minor_hash); c 652 fs/ubifs/journal.c err = ubifs_tnc_remove_nm(c, &dent_key, nm); c 655 fs/ubifs/journal.c err = ubifs_add_dirt(c, lnum, dlen); c 657 fs/ubifs/journal.c err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, c 668 fs/ubifs/journal.c ino_key_init(c, &ino_key, inode->i_ino); c 670 fs/ubifs/journal.c err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino); c 674 fs/ubifs/journal.c ino_key_init(c, &ino_key, dir->i_ino); c 676 fs/ubifs/journal.c err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, c 681 fs/ubifs/journal.c finish_reservation(c); c 690 fs/ubifs/journal.c mark_inode_clean(c, ui); c 691 fs/ubifs/journal.c mark_inode_clean(c, host_ui); c 695 fs/ubifs/journal.c finish_reservation(c); c 701 fs/ubifs/journal.c release_head(c, BASEHD); c 704 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 706 fs/ubifs/journal.c ubifs_delete_orphan(c, inode->i_ino); c 707 fs/ubifs/journal.c finish_reservation(c); c 722 fs/ubifs/journal.c int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, c 734 fs/ubifs/journal.c (unsigned long)key_inum(c, key), key_block(c, key), len); c 735 fs/ubifs/journal.c ubifs_assert(c, len <= UBIFS_BLOCK_SIZE); c 740 fs/ubifs/journal.c auth_len = ubifs_auth_node_sz(c); c 752 fs/ubifs/journal.c mutex_lock(&c->write_reserve_mutex); c 753 fs/ubifs/journal.c data = c->write_reserve_buf; c 757 fs/ubifs/journal.c key_write(c, key, &data->key); c 767 fs/ubifs/journal.c ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type); c 768 fs/ubifs/journal.c ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE); c 771 fs/ubifs/journal.c err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key)); c 781 fs/ubifs/journal.c if (ubifs_authenticated(c)) c 789 fs/ubifs/journal.c err = make_reservation(c, DATAHD, write_len); c 793 fs/ubifs/journal.c ubifs_prepare_node(c, data, dlen, 0); c 794 fs/ubifs/journal.c err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0); c 798 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, data, hash); c 802 fs/ubifs/journal.c ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key)); c 803 fs/ubifs/journal.c release_head(c, DATAHD); c 805 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 807 fs/ubifs/journal.c err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash); c 811 fs/ubifs/journal.c finish_reservation(c); c 813 fs/ubifs/journal.c mutex_unlock(&c->write_reserve_mutex); c 819 fs/ubifs/journal.c release_head(c, DATAHD); c 821 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 822 fs/ubifs/journal.c finish_reservation(c); c 825 fs/ubifs/journal.c mutex_unlock(&c->write_reserve_mutex); c 840 fs/ubifs/journal.c int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) c 863 fs/ubifs/journal.c if (ubifs_authenticated(c)) c 864 fs/ubifs/journal.c write_len += ALIGN(ilen, 8) + ubifs_auth_node_sz(c); c 873 fs/ubifs/journal.c err = make_reservation(c, BASEHD, write_len); c 883 fs/ubifs/journal.c if (ui->xattr_cnt >= ubifs_xattr_max_cnt(c)) { c 884 fs/ubifs/journal.c ubifs_err(c, "Cannot delete inode, it has too much xattrs!"); c 888 fs/ubifs/journal.c lowest_xent_key(c, &key, inode->i_ino); c 890 fs/ubifs/journal.c xent = ubifs_tnc_next_ent(c, &key, &nm); c 902 fs/ubifs/journal.c xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum)); c 905 fs/ubifs/journal.c ubifs_err(c, "dead directory entry '%s', error %d", c 907 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 910 fs/ubifs/journal.c ubifs_assert(c, ubifs_inode(xino)->xattr); c 913 fs/ubifs/journal.c pack_inode(c, ino, xino, 0); c 919 fs/ubifs/journal.c key_read(c, &xent->key, &key); c 924 fs/ubifs/journal.c pack_inode(c, ino, inode, 1); c 925 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, ino, hash); c 929 fs/ubifs/journal.c err = write_head(c, BASEHD, ino_start, write_len, &lnum, &offs, sync); c 933 fs/ubifs/journal.c ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, c 935 fs/ubifs/journal.c release_head(c, BASEHD); c 937 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 940 fs/ubifs/journal.c err = ubifs_tnc_remove_ino(c, inode->i_ino); c 943 fs/ubifs/journal.c ubifs_delete_orphan(c, inode->i_ino); c 944 fs/ubifs/journal.c err = ubifs_add_dirt(c, lnum, write_len); c 948 fs/ubifs/journal.c ino_key_init(c, &key, inode->i_ino); c 949 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash); c 954 fs/ubifs/journal.c finish_reservation(c); c 962 fs/ubifs/journal.c release_head(c, BASEHD); c 964 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 965 fs/ubifs/journal.c finish_reservation(c); c 1000 fs/ubifs/journal.c int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode) c 1005 fs/ubifs/journal.c ubifs_assert(c, inode->i_nlink == 0); c 1007 fs/ubifs/journal.c if (ui->xattr_cnt || ui->del_cmtno != c->cmt_no) c 1009 fs/ubifs/journal.c return ubifs_jnl_write_inode(c, inode); c 1011 fs/ubifs/journal.c down_read(&c->commit_sem); c 1016 fs/ubifs/journal.c if (ui->del_cmtno != c->cmt_no) { c 1017 fs/ubifs/journal.c up_read(&c->commit_sem); c 1018 fs/ubifs/journal.c return ubifs_jnl_write_inode(c, inode); c 1021 fs/ubifs/journal.c err = ubifs_tnc_remove_ino(c, inode->i_ino); c 1023 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 1025 fs/ubifs/journal.c ubifs_delete_orphan(c, inode->i_ino); c 1026 fs/ubifs/journal.c up_read(&c->commit_sem); c 1046 fs/ubifs/journal.c int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, c 1064 fs/ubifs/journal.c ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0); c 1065 fs/ubifs/journal.c ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0); c 1066 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex)); c 1067 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex)); c 1078 fs/ubifs/journal.c len += ubifs_auth_node_sz(c); c 1085 fs/ubifs/journal.c err = make_reservation(c, BASEHD, len); c 1091 fs/ubifs/journal.c dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm); c 1097 fs/ubifs/journal.c set_dent_cookie(c, dent1); c 1099 fs/ubifs/journal.c ubifs_prep_grp_node(c, dent1, dlen1, 0); c 1100 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, dent1, hash_dent1); c 1107 fs/ubifs/journal.c dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm); c 1113 fs/ubifs/journal.c set_dent_cookie(c, dent2); c 1115 fs/ubifs/journal.c ubifs_prep_grp_node(c, dent2, dlen2, 0); c 1116 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, dent2, hash_dent2); c 1122 fs/ubifs/journal.c pack_inode(c, p, fst_dir, 1); c 1123 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, p, hash_p1); c 1127 fs/ubifs/journal.c pack_inode(c, p, fst_dir, 0); c 1128 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, p, hash_p1); c 1132 fs/ubifs/journal.c pack_inode(c, p, snd_dir, 1); c 1133 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, p, hash_p2); c 1138 fs/ubifs/journal.c err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync); c 1142 fs/ubifs/journal.c struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; c 1147 fs/ubifs/journal.c release_head(c, BASEHD); c 1149 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 1151 fs/ubifs/journal.c dent_key_init(c, &key, snd_dir->i_ino, snd_nm); c 1152 fs/ubifs/journal.c err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm); c 1157 fs/ubifs/journal.c dent_key_init(c, &key, fst_dir->i_ino, fst_nm); c 1158 fs/ubifs/journal.c err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm); c 1164 fs/ubifs/journal.c ino_key_init(c, &key, fst_dir->i_ino); c 1165 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1); c 1171 fs/ubifs/journal.c ino_key_init(c, &key, snd_dir->i_ino); c 1172 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2); c 1177 fs/ubifs/journal.c finish_reservation(c); c 1179 fs/ubifs/journal.c mark_inode_clean(c, ubifs_inode(fst_dir)); c 1181 fs/ubifs/journal.c mark_inode_clean(c, ubifs_inode(snd_dir)); c 1186 fs/ubifs/journal.c release_head(c, BASEHD); c 1188 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 1189 fs/ubifs/journal.c finish_reservation(c); c 1209 fs/ubifs/journal.c int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, c 1231 fs/ubifs/journal.c ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0); c 1232 fs/ubifs/journal.c ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0); c 1233 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex)); c 1234 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex)); c 1240 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex)); c 1253 fs/ubifs/journal.c len += ubifs_auth_node_sz(c); c 1260 fs/ubifs/journal.c err = make_reservation(c, BASEHD, len); c 1266 fs/ubifs/journal.c dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm); c 1272 fs/ubifs/journal.c set_dent_cookie(c, dent); c 1274 fs/ubifs/journal.c ubifs_prep_grp_node(c, dent, dlen1, 0); c 1275 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, dent, hash_dent1); c 1281 fs/ubifs/journal.c dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm); c 1294 fs/ubifs/journal.c set_dent_cookie(c, dent2); c 1296 fs/ubifs/journal.c ubifs_prep_grp_node(c, dent2, dlen2, 0); c 1297 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, dent2, hash_dent2); c 1303 fs/ubifs/journal.c pack_inode(c, p, new_inode, 0); c 1304 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, p, hash_new_inode); c 1312 fs/ubifs/journal.c pack_inode(c, p, old_dir, 1); c 1313 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, p, hash_old_dir); c 1317 fs/ubifs/journal.c pack_inode(c, p, old_dir, 0); c 1318 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, p, hash_old_dir); c 1323 fs/ubifs/journal.c pack_inode(c, p, new_dir, 1); c 1324 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, p, hash_new_dir); c 1330 fs/ubifs/journal.c err = ubifs_add_orphan(c, new_inode->i_ino); c 1332 fs/ubifs/journal.c release_head(c, BASEHD); c 1335 fs/ubifs/journal.c new_ui->del_cmtno = c->cmt_no; c 1338 fs/ubifs/journal.c err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); c 1342 fs/ubifs/journal.c struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; c 1347 fs/ubifs/journal.c ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, c 1350 fs/ubifs/journal.c release_head(c, BASEHD); c 1352 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 1354 fs/ubifs/journal.c dent_key_init(c, &key, new_dir->i_ino, new_nm); c 1355 fs/ubifs/journal.c err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm); c 1361 fs/ubifs/journal.c dent_key_init(c, &key, old_dir->i_ino, old_nm); c 1362 fs/ubifs/journal.c err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm); c 1366 fs/ubifs/journal.c ubifs_delete_orphan(c, whiteout->i_ino); c 1368 fs/ubifs/journal.c err = ubifs_add_dirt(c, lnum, dlen2); c 1372 fs/ubifs/journal.c dent_key_init(c, &key, old_dir->i_ino, old_nm); c 1373 fs/ubifs/journal.c err = ubifs_tnc_remove_nm(c, &key, old_nm); c 1380 fs/ubifs/journal.c ino_key_init(c, &key, new_inode->i_ino); c 1381 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode); c 1387 fs/ubifs/journal.c ino_key_init(c, &key, old_dir->i_ino); c 1388 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir); c 1394 fs/ubifs/journal.c ino_key_init(c, &key, new_dir->i_ino); c 1395 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir); c 1400 fs/ubifs/journal.c finish_reservation(c); c 1402 fs/ubifs/journal.c mark_inode_clean(c, new_ui); c 1407 fs/ubifs/journal.c mark_inode_clean(c, ubifs_inode(old_dir)); c 1409 fs/ubifs/journal.c mark_inode_clean(c, ubifs_inode(new_dir)); c 1414 fs/ubifs/journal.c release_head(c, BASEHD); c 1416 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 1418 fs/ubifs/journal.c ubifs_delete_orphan(c, new_inode->i_ino); c 1420 fs/ubifs/journal.c finish_reservation(c); c 1437 fs/ubifs/journal.c static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode, c 1461 fs/ubifs/journal.c err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type); c 1465 fs/ubifs/journal.c ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); c 1478 fs/ubifs/journal.c ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE); c 1503 fs/ubifs/journal.c int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, c 1519 fs/ubifs/journal.c ubifs_assert(c, !ui->data_len); c 1520 fs/ubifs/journal.c ubifs_assert(c, S_ISREG(inode->i_mode)); c 1521 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); c 1526 fs/ubifs/journal.c sz += ubifs_auth_node_sz(c); c 1544 fs/ubifs/journal.c data_key_init(c, &key, inum, blk); c 1546 fs/ubifs/journal.c err = ubifs_tnc_lookup(c, &key, dn); c 1555 fs/ubifs/journal.c ubifs_err(c, "bad data node (block %u, inode %lu)", c 1557 fs/ubifs/journal.c ubifs_dump_node(c, dn); c 1564 fs/ubifs/journal.c err = truncate_data_node(c, inode, blk, dn, &dlen); c 1574 fs/ubifs/journal.c if (ubifs_authenticated(c)) c 1575 fs/ubifs/journal.c len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c); c 1579 fs/ubifs/journal.c err = make_reservation(c, BASEHD, len); c 1583 fs/ubifs/journal.c pack_inode(c, ino, inode, 0); c 1584 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, ino, hash_ino); c 1588 fs/ubifs/journal.c ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1); c 1590 fs/ubifs/journal.c ubifs_prep_grp_node(c, dn, dlen, 1); c 1591 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, dn, hash_dn); c 1596 fs/ubifs/journal.c err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync); c 1600 fs/ubifs/journal.c ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum); c 1601 fs/ubifs/journal.c release_head(c, BASEHD); c 1603 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 1607 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn); c 1612 fs/ubifs/journal.c ino_key_init(c, &key, inum); c 1613 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino); c 1617 fs/ubifs/journal.c err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ); c 1623 fs/ubifs/journal.c data_key_init(c, &key, inum, blk); c 1627 fs/ubifs/journal.c data_key_init(c, &to_key, inum, blk); c 1629 fs/ubifs/journal.c err = ubifs_tnc_remove_range(c, &key, &to_key); c 1633 fs/ubifs/journal.c finish_reservation(c); c 1637 fs/ubifs/journal.c mark_inode_clean(c, ui); c 1642 fs/ubifs/journal.c release_head(c, BASEHD); c 1644 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 1645 fs/ubifs/journal.c finish_reservation(c); c 1664 fs/ubifs/journal.c int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, c 1676 fs/ubifs/journal.c ubifs_assert(c, inode->i_nlink == 0); c 1677 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); c 1688 fs/ubifs/journal.c write_len = len + ubifs_auth_node_sz(c); c 1695 fs/ubifs/journal.c err = make_reservation(c, BASEHD, write_len); c 1702 fs/ubifs/journal.c xent_key_init(c, &xent_key, host->i_ino, nm); c 1703 fs/ubifs/journal.c key_write(c, &xent_key, xent->key); c 1710 fs/ubifs/journal.c ubifs_prep_grp_node(c, xent, xlen, 0); c 1713 fs/ubifs/journal.c pack_inode(c, ino, inode, 0); c 1715 fs/ubifs/journal.c pack_inode(c, ino, host, 1); c 1716 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, ino, hash); c 1720 fs/ubifs/journal.c err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync); c 1722 fs/ubifs/journal.c ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino); c 1723 fs/ubifs/journal.c release_head(c, BASEHD); c 1725 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 1731 fs/ubifs/journal.c err = ubifs_tnc_remove_nm(c, &xent_key, nm); c 1734 fs/ubifs/journal.c err = ubifs_add_dirt(c, lnum, xlen); c 1742 fs/ubifs/journal.c lowest_ino_key(c, &key1, inode->i_ino); c 1743 fs/ubifs/journal.c highest_ino_key(c, &key2, inode->i_ino); c 1744 fs/ubifs/journal.c err = ubifs_tnc_remove_range(c, &key1, &key2); c 1747 fs/ubifs/journal.c err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ); c 1752 fs/ubifs/journal.c ino_key_init(c, &key1, host->i_ino); c 1753 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash); c 1757 fs/ubifs/journal.c finish_reservation(c); c 1761 fs/ubifs/journal.c mark_inode_clean(c, host_ui); c 1766 fs/ubifs/journal.c release_head(c, BASEHD); c 1768 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 1769 fs/ubifs/journal.c finish_reservation(c); c 1786 fs/ubifs/journal.c int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, c 1798 fs/ubifs/journal.c ubifs_assert(c, host->i_nlink > 0); c 1799 fs/ubifs/journal.c ubifs_assert(c, inode->i_nlink > 0); c 1800 fs/ubifs/journal.c ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); c 1807 fs/ubifs/journal.c aligned_len += ubifs_auth_node_sz(c); c 1814 fs/ubifs/journal.c err = make_reservation(c, BASEHD, aligned_len); c 1818 fs/ubifs/journal.c pack_inode(c, ino, host, 0); c 1819 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, ino, hash_host); c 1822 fs/ubifs/journal.c pack_inode(c, (void *)ino + aligned_len1, inode, 1); c 1823 fs/ubifs/journal.c err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash); c 1827 fs/ubifs/journal.c err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0); c 1829 fs/ubifs/journal.c struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf; c 1834 fs/ubifs/journal.c release_head(c, BASEHD); c 1838 fs/ubifs/journal.c ubifs_add_auth_dirt(c, lnum); c 1840 fs/ubifs/journal.c ino_key_init(c, &key, host->i_ino); c 1841 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host); c 1845 fs/ubifs/journal.c ino_key_init(c, &key, inode->i_ino); c 1846 fs/ubifs/journal.c err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash); c 1850 fs/ubifs/journal.c finish_reservation(c); c 1854 fs/ubifs/journal.c mark_inode_clean(c, host_ui); c 1859 fs/ubifs/journal.c release_head(c, BASEHD); c 1861 fs/ubifs/journal.c ubifs_ro_mode(c, err); c 1862 fs/ubifs/journal.c finish_reservation(c); c 90 fs/ubifs/key.h static inline void ino_key_init(const struct ubifs_info *c, c 103 fs/ubifs/key.h static inline void ino_key_init_flash(const struct ubifs_info *c, void *k, c 119 fs/ubifs/key.h static inline void lowest_ino_key(const struct ubifs_info *c, c 132 fs/ubifs/key.h static inline void highest_ino_key(const struct ubifs_info *c, c 146 fs/ubifs/key.h static inline void dent_key_init(const struct ubifs_info *c, c 150 fs/ubifs/key.h uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); c 152 fs/ubifs/key.h ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); c 153 fs/ubifs/key.h ubifs_assert(c, !nm->hash && !nm->minor_hash); c 166 fs/ubifs/key.h static inline void dent_key_init_hash(const struct ubifs_info *c, c 170 fs/ubifs/key.h ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); c 182 fs/ubifs/key.h static inline void dent_key_init_flash(const struct ubifs_info *c, void *k, c 187 fs/ubifs/key.h uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); c 189 fs/ubifs/key.h ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); c 202 fs/ubifs/key.h static inline void lowest_dent_key(const struct ubifs_info *c, c 216 fs/ubifs/key.h static inline void xent_key_init(const struct ubifs_info *c, c 220 fs/ubifs/key.h uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); c 222 fs/ubifs/key.h ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); c 234 fs/ubifs/key.h static inline void xent_key_init_flash(const struct ubifs_info *c, void *k, c 238 fs/ubifs/key.h uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); c 240 fs/ubifs/key.h ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); c 253 fs/ubifs/key.h static inline void lowest_xent_key(const struct ubifs_info *c, c 267 fs/ubifs/key.h static inline void data_key_init(const struct ubifs_info *c, c 271 fs/ubifs/key.h ubifs_assert(c, !(block & ~UBIFS_S_KEY_BLOCK_MASK)); c 282 fs/ubifs/key.h static inline void highest_data_key(const struct ubifs_info *c, c 285 fs/ubifs/key.h data_key_init(c, key, inum, UBIFS_S_KEY_BLOCK_MASK); c 297 fs/ubifs/key.h static inline void trun_key_init(const struct ubifs_info *c, c 311 fs/ubifs/key.h static inline void invalid_key_init(const struct ubifs_info *c, c 323 fs/ubifs/key.h static inline int key_type(const struct ubifs_info *c, c 334 fs/ubifs/key.h static inline int key_type_flash(const struct ubifs_info *c, const void *k) c 346 fs/ubifs/key.h static inline ino_t key_inum(const struct ubifs_info *c, const void *k) c 358 fs/ubifs/key.h static inline ino_t key_inum_flash(const struct ubifs_info *c, const void *k) c 370 fs/ubifs/key.h static inline uint32_t key_hash(const struct ubifs_info *c, c 381 fs/ubifs/key.h static inline uint32_t key_hash_flash(const struct ubifs_info *c, const void *k) c 393 fs/ubifs/key.h static inline unsigned int key_block(const struct ubifs_info *c, c 404 fs/ubifs/key.h static inline unsigned int key_block_flash(const struct ubifs_info *c, c 418 fs/ubifs/key.h static inline void key_read(const struct ubifs_info *c, const void *from, c 433 fs/ubifs/key.h static inline void key_write(const struct ubifs_info *c, c 449 fs/ubifs/key.h static inline void key_write_idx(const struct ubifs_info *c, c 464 fs/ubifs/key.h static inline void key_copy(const struct ubifs_info *c, c 479 fs/ubifs/key.h static inline int keys_cmp(const struct ubifs_info *c, c 504 fs/ubifs/key.h static inline int keys_eq(const struct ubifs_info *c, c 522 fs/ubifs/key.h static inline int is_hash_key(const struct ubifs_info *c, c 525 fs/ubifs/key.h int type = key_type(c, key); c 534 fs/ubifs/key.h static inline unsigned long long key_max_inode_size(const struct ubifs_info *c) c 536 fs/ubifs/key.h switch (c->key_fmt) { c 20 fs/ubifs/log.c static int dbg_check_bud_bytes(struct ubifs_info *c); c 30 fs/ubifs/log.c struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum) c 35 fs/ubifs/log.c spin_lock(&c->buds_lock); c 36 fs/ubifs/log.c p = c->buds.rb_node; c 44 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 48 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 59 fs/ubifs/log.c struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum) c 65 fs/ubifs/log.c if (!c->jheads) c 68 fs/ubifs/log.c spin_lock(&c->buds_lock); c 69 fs/ubifs/log.c p = c->buds.rb_node; c 78 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 79 fs/ubifs/log.c return &c->jheads[jhead].wbuf; c 82 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 90 fs/ubifs/log.c static inline long long empty_log_bytes(const struct ubifs_info *c) c 94 fs/ubifs/log.c h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs; c 95 fs/ubifs/log.c t = (long long)c->ltail_lnum * c->leb_size; c 98 fs/ubifs/log.c return c->log_bytes - h + t; c 101 fs/ubifs/log.c else if (c->lhead_lnum != c->ltail_lnum) c 104 fs/ubifs/log.c return c->log_bytes; c 112 fs/ubifs/log.c void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) c 118 fs/ubifs/log.c spin_lock(&c->buds_lock); c 119 fs/ubifs/log.c p = &c->buds.rb_node; c 123 fs/ubifs/log.c ubifs_assert(c, bud->lnum != b->lnum); c 131 fs/ubifs/log.c rb_insert_color(&bud->rb, &c->buds); c 132 fs/ubifs/log.c if (c->jheads) { c 133 fs/ubifs/log.c jhead = &c->jheads[bud->jhead]; c 136 fs/ubifs/log.c ubifs_assert(c, c->replaying && c->ro_mount); c 144 fs/ubifs/log.c c->bud_bytes += c->leb_size - bud->start; c 147 fs/ubifs/log.c bud->start, dbg_jhead(bud->jhead), c->bud_bytes); c 148 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 164 fs/ubifs/log.c int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) c 173 fs/ubifs/log.c ref = kzalloc(c->ref_node_alsz, GFP_NOFS); c 179 fs/ubifs/log.c mutex_lock(&c->log_mutex); c 180 fs/ubifs/log.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 181 fs/ubifs/log.c if (c->ro_error) { c 187 fs/ubifs/log.c if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) { c 189 fs/ubifs/log.c empty_log_bytes(c), c->min_log_bytes); c 190 fs/ubifs/log.c ubifs_commit_required(c); c 204 fs/ubifs/log.c if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) { c 206 fs/ubifs/log.c c->bud_bytes, c->max_bud_bytes); c 207 fs/ubifs/log.c ubifs_commit_required(c); c 217 fs/ubifs/log.c if (c->bud_bytes >= c->bg_bud_bytes && c 218 fs/ubifs/log.c c->cmt_state == COMMIT_RESTING) { c 220 fs/ubifs/log.c c->bud_bytes, c->max_bud_bytes); c 221 fs/ubifs/log.c ubifs_request_bg_commit(c); c 234 fs/ubifs/log.c if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { c 235 fs/ubifs/log.c c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); c 236 fs/ubifs/log.c ubifs_assert(c, c->lhead_lnum != c->ltail_lnum); c 237 fs/ubifs/log.c c->lhead_offs = 0; c 240 fs/ubifs/log.c if (c->lhead_offs == 0) { c 242 fs/ubifs/log.c err = ubifs_leb_unmap(c, c->lhead_lnum); c 255 fs/ubifs/log.c err = ubifs_leb_map(c, bud->lnum); c 261 fs/ubifs/log.c c->lhead_lnum, c->lhead_offs); c 262 fs/ubifs/log.c err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, c 263 fs/ubifs/log.c c->lhead_offs); c 267 fs/ubifs/log.c err = ubifs_shash_update(c, c->log_hash, ref, UBIFS_REF_NODE_SZ); c 271 fs/ubifs/log.c err = ubifs_shash_copy_state(c, c->log_hash, c->jheads[jhead].log_hash); c 275 fs/ubifs/log.c c->lhead_offs += c->ref_node_alsz; c 277 fs/ubifs/log.c ubifs_add_bud(c, bud); c 279 fs/ubifs/log.c mutex_unlock(&c->log_mutex); c 284 fs/ubifs/log.c mutex_unlock(&c->log_mutex); c 297 fs/ubifs/log.c static void remove_buds(struct ubifs_info *c) c 301 fs/ubifs/log.c ubifs_assert(c, list_empty(&c->old_buds)); c 302 fs/ubifs/log.c c->cmt_bud_bytes = 0; c 303 fs/ubifs/log.c spin_lock(&c->buds_lock); c 304 fs/ubifs/log.c p = rb_first(&c->buds); c 312 fs/ubifs/log.c wbuf = &c->jheads[bud->jhead].wbuf; c 319 fs/ubifs/log.c c->cmt_bud_bytes += wbuf->offs - bud->start; c 322 fs/ubifs/log.c wbuf->offs - bud->start, c->cmt_bud_bytes); c 325 fs/ubifs/log.c c->cmt_bud_bytes += c->leb_size - bud->start; c 328 fs/ubifs/log.c c->leb_size - bud->start, c->cmt_bud_bytes); c 329 fs/ubifs/log.c rb_erase(p1, &c->buds); c 337 fs/ubifs/log.c list_move(&bud->list, &c->old_buds); c 340 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 356 fs/ubifs/log.c int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) c 363 fs/ubifs/log.c err = dbg_check_bud_bytes(c); c 367 fs/ubifs/log.c max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ; c 368 fs/ubifs/log.c max_len = ALIGN(max_len, c->min_io_size); c 374 fs/ubifs/log.c cs->cmt_no = cpu_to_le64(c->cmt_no); c 375 fs/ubifs/log.c ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0); c 377 fs/ubifs/log.c err = ubifs_shash_init(c, c->log_hash); c 381 fs/ubifs/log.c err = ubifs_shash_update(c, c->log_hash, cs, UBIFS_CS_NODE_SZ); c 393 fs/ubifs/log.c for (i = 0; i < c->jhead_cnt; i++) { c 394 fs/ubifs/log.c int lnum = c->jheads[i].wbuf.lnum; c 395 fs/ubifs/log.c int offs = c->jheads[i].wbuf.offs; c 397 fs/ubifs/log.c if (lnum == -1 || offs == c->leb_size) c 408 fs/ubifs/log.c ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0); c 411 fs/ubifs/log.c err = ubifs_shash_update(c, c->log_hash, ref, c 415 fs/ubifs/log.c ubifs_shash_copy_state(c, c->log_hash, c->jheads[i].log_hash); c 418 fs/ubifs/log.c ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len); c 421 fs/ubifs/log.c if (c->lhead_offs) { c 422 fs/ubifs/log.c c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); c 423 fs/ubifs/log.c ubifs_assert(c, c->lhead_lnum != c->ltail_lnum); c 424 fs/ubifs/log.c c->lhead_offs = 0; c 428 fs/ubifs/log.c err = ubifs_leb_unmap(c, c->lhead_lnum); c 432 fs/ubifs/log.c len = ALIGN(len, c->min_io_size); c 433 fs/ubifs/log.c dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); c 434 fs/ubifs/log.c err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len); c 438 fs/ubifs/log.c *ltail_lnum = c->lhead_lnum; c 440 fs/ubifs/log.c c->lhead_offs += len; c 441 fs/ubifs/log.c ubifs_assert(c, c->lhead_offs < c->leb_size); c 443 fs/ubifs/log.c remove_buds(c); c 449 fs/ubifs/log.c c->min_log_bytes = 0; c 466 fs/ubifs/log.c int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum) c 475 fs/ubifs/log.c mutex_lock(&c->log_mutex); c 478 fs/ubifs/log.c c->ltail_lnum, ltail_lnum); c 480 fs/ubifs/log.c c->ltail_lnum = ltail_lnum; c 485 fs/ubifs/log.c c->min_log_bytes = c->leb_size; c 487 fs/ubifs/log.c spin_lock(&c->buds_lock); c 488 fs/ubifs/log.c c->bud_bytes -= c->cmt_bud_bytes; c 489 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 491 fs/ubifs/log.c err = dbg_check_bud_bytes(c); c 495 fs/ubifs/log.c err = ubifs_write_master(c); c 498 fs/ubifs/log.c mutex_unlock(&c->log_mutex); c 515 fs/ubifs/log.c int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum) c 519 fs/ubifs/log.c while (!list_empty(&c->old_buds)) { c 522 fs/ubifs/log.c bud = list_entry(c->old_buds.next, struct ubifs_bud, list); c 523 fs/ubifs/log.c err = ubifs_return_leb(c, bud->lnum); c 530 fs/ubifs/log.c mutex_lock(&c->log_mutex); c 531 fs/ubifs/log.c for (lnum = old_ltail_lnum; lnum != c->ltail_lnum; c 532 fs/ubifs/log.c lnum = ubifs_next_log_lnum(c, lnum)) { c 534 fs/ubifs/log.c err = ubifs_leb_unmap(c, lnum); c 539 fs/ubifs/log.c mutex_unlock(&c->log_mutex); c 611 fs/ubifs/log.c static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs, c 615 fs/ubifs/log.c int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs; c 618 fs/ubifs/log.c int sz = ALIGN(*offs, c->min_io_size), err; c 620 fs/ubifs/log.c ubifs_pad(c, buf + *offs, sz - *offs); c 621 fs/ubifs/log.c err = ubifs_leb_change(c, *lnum, buf, sz); c 624 fs/ubifs/log.c *lnum = ubifs_next_log_lnum(c, *lnum); c 642 fs/ubifs/log.c int ubifs_consolidate_log(struct ubifs_info *c) c 650 fs/ubifs/log.c dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum, c 651 fs/ubifs/log.c c->lhead_lnum); c 652 fs/ubifs/log.c buf = vmalloc(c->leb_size); c 655 fs/ubifs/log.c lnum = c->ltail_lnum; c 658 fs/ubifs/log.c sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); c 673 fs/ubifs/log.c err = add_node(c, buf, &write_lnum, c 683 fs/ubifs/log.c err = add_node(c, buf, &write_lnum, &offs, c 692 fs/ubifs/log.c if (lnum == c->lhead_lnum) c 694 fs/ubifs/log.c lnum = ubifs_next_log_lnum(c, lnum); c 697 fs/ubifs/log.c int sz = ALIGN(offs, c->min_io_size); c 699 fs/ubifs/log.c ubifs_pad(c, buf + offs, sz - offs); c 700 fs/ubifs/log.c err = ubifs_leb_change(c, write_lnum, buf, sz); c 703 fs/ubifs/log.c offs = ALIGN(offs, c->min_io_size); c 707 fs/ubifs/log.c if (write_lnum == c->lhead_lnum) { c 708 fs/ubifs/log.c ubifs_err(c, "log is too full"); c 714 fs/ubifs/log.c lnum = ubifs_next_log_lnum(c, lnum); c 715 fs/ubifs/log.c err = ubifs_leb_unmap(c, lnum); c 718 fs/ubifs/log.c } while (lnum != c->lhead_lnum); c 719 fs/ubifs/log.c c->lhead_lnum = write_lnum; c 720 fs/ubifs/log.c c->lhead_offs = offs; c 721 fs/ubifs/log.c dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs); c 740 fs/ubifs/log.c static int dbg_check_bud_bytes(struct ubifs_info *c) c 746 fs/ubifs/log.c if (!dbg_is_chk_gen(c)) c 749 fs/ubifs/log.c spin_lock(&c->buds_lock); c 750 fs/ubifs/log.c for (i = 0; i < c->jhead_cnt; i++) c 751 fs/ubifs/log.c list_for_each_entry(bud, &c->jheads[i].buds_list, list) c 752 fs/ubifs/log.c bud_bytes += c->leb_size - bud->start; c 754 fs/ubifs/log.c if (c->bud_bytes != bud_bytes) { c 755 fs/ubifs/log.c ubifs_err(c, "bad bud_bytes %lld, calculated %lld", c 756 fs/ubifs/log.c c->bud_bytes, bud_bytes); c 759 fs/ubifs/log.c spin_unlock(&c->buds_lock); c 50 fs/ubifs/lprops.c static void move_up_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, c 87 fs/ubifs/lprops.c static void adjust_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, c 166 fs/ubifs/lprops.c static int add_to_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops, c 169 fs/ubifs/lprops.c struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; c 178 fs/ubifs/lprops.c ubifs_assert(c, cpos >= b); c 179 fs/ubifs/lprops.c ubifs_assert(c, cpos < LPT_HEAP_SZ); c 180 fs/ubifs/lprops.c ubifs_assert(c, cpos < heap->cnt); c 190 fs/ubifs/lprops.c list_add(&lp->list, &c->uncat_list); c 193 fs/ubifs/lprops.c move_up_lpt_heap(c, heap, lprops, cat); c 194 fs/ubifs/lprops.c dbg_check_heap(c, heap, cat, lprops->hpos); c 197 fs/ubifs/lprops.c dbg_check_heap(c, heap, cat, -1); c 202 fs/ubifs/lprops.c move_up_lpt_heap(c, heap, lprops, cat); c 203 fs/ubifs/lprops.c dbg_check_heap(c, heap, cat, lprops->hpos); c 214 fs/ubifs/lprops.c static void remove_from_lpt_heap(struct ubifs_info *c, c 220 fs/ubifs/lprops.c heap = &c->lpt_heap[cat - 1]; c 221 fs/ubifs/lprops.c ubifs_assert(c, hpos >= 0 && hpos < heap->cnt); c 222 fs/ubifs/lprops.c ubifs_assert(c, heap->arr[hpos] == lprops); c 227 fs/ubifs/lprops.c adjust_lpt_heap(c, heap, heap->arr[hpos], hpos, cat); c 229 fs/ubifs/lprops.c dbg_check_heap(c, heap, cat, -1); c 243 fs/ubifs/lprops.c static void lpt_heap_replace(struct ubifs_info *c, c 249 fs/ubifs/lprops.c heap = &c->lpt_heap[cat - 1]; c 261 fs/ubifs/lprops.c void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, c 268 fs/ubifs/lprops.c if (add_to_lpt_heap(c, lprops, cat)) c 274 fs/ubifs/lprops.c list_add(&lprops->list, &c->uncat_list); c 277 fs/ubifs/lprops.c list_add(&lprops->list, &c->empty_list); c 280 fs/ubifs/lprops.c list_add(&lprops->list, &c->freeable_list); c 281 fs/ubifs/lprops.c c->freeable_cnt += 1; c 284 fs/ubifs/lprops.c list_add(&lprops->list, &c->frdi_idx_list); c 287 fs/ubifs/lprops.c ubifs_assert(c, 0); c 292 fs/ubifs/lprops.c c->in_a_category_cnt += 1; c 293 fs/ubifs/lprops.c ubifs_assert(c, c->in_a_category_cnt <= c->main_lebs); c 304 fs/ubifs/lprops.c static void ubifs_remove_from_cat(struct ubifs_info *c, c 311 fs/ubifs/lprops.c remove_from_lpt_heap(c, lprops, cat); c 314 fs/ubifs/lprops.c c->freeable_cnt -= 1; c 315 fs/ubifs/lprops.c ubifs_assert(c, c->freeable_cnt >= 0); c 320 fs/ubifs/lprops.c ubifs_assert(c, !list_empty(&lprops->list)); c 324 fs/ubifs/lprops.c ubifs_assert(c, 0); c 327 fs/ubifs/lprops.c c->in_a_category_cnt -= 1; c 328 fs/ubifs/lprops.c ubifs_assert(c, c->in_a_category_cnt >= 0); c 341 fs/ubifs/lprops.c void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, c 351 fs/ubifs/lprops.c lpt_heap_replace(c, new_lprops, cat); c 360 fs/ubifs/lprops.c ubifs_assert(c, 0); c 373 fs/ubifs/lprops.c void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops) c 379 fs/ubifs/lprops.c cat = ubifs_categorize_lprops(c, lprops); c 382 fs/ubifs/lprops.c ubifs_remove_from_cat(c, lprops, LPROPS_UNCAT); c 383 fs/ubifs/lprops.c ubifs_add_to_cat(c, lprops, cat); c 396 fs/ubifs/lprops.c int ubifs_categorize_lprops(const struct ubifs_info *c, c 402 fs/ubifs/lprops.c if (lprops->free == c->leb_size) { c 403 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); c 407 fs/ubifs/lprops.c if (lprops->free + lprops->dirty == c->leb_size) { c 415 fs/ubifs/lprops.c if (lprops->dirty + lprops->free >= c->min_idx_node_sz) c 418 fs/ubifs/lprops.c if (lprops->dirty >= c->dead_wm && c 436 fs/ubifs/lprops.c static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) c 439 fs/ubifs/lprops.c int new_cat = ubifs_categorize_lprops(c, lprops); c 447 fs/ubifs/lprops.c heap = &c->lpt_heap[new_cat - 1]; c 448 fs/ubifs/lprops.c adjust_lpt_heap(c, heap, lprops, lprops->hpos, new_cat); c 450 fs/ubifs/lprops.c ubifs_remove_from_cat(c, lprops, old_cat); c 451 fs/ubifs/lprops.c ubifs_add_to_cat(c, lprops, new_cat); c 467 fs/ubifs/lprops.c int ubifs_calc_dark(const struct ubifs_info *c, int spc) c 469 fs/ubifs/lprops.c ubifs_assert(c, !(spc & 7)); c 471 fs/ubifs/lprops.c if (spc < c->dark_wm) c 479 fs/ubifs/lprops.c if (spc - c->dark_wm < MIN_WRITE_SZ) c 482 fs/ubifs/lprops.c return c->dark_wm; c 490 fs/ubifs/lprops.c static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) c 495 fs/ubifs/lprops.c pos = (lprops->lnum - c->main_first) & (UBIFS_LPT_FANOUT - 1); c 520 fs/ubifs/lprops.c const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, c 534 fs/ubifs/lprops.c ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); c 535 fs/ubifs/lprops.c ubifs_assert(c, c->lst.empty_lebs >= 0 && c 536 fs/ubifs/lprops.c c->lst.empty_lebs <= c->main_lebs); c 537 fs/ubifs/lprops.c ubifs_assert(c, c->freeable_cnt >= 0); c 538 fs/ubifs/lprops.c ubifs_assert(c, c->freeable_cnt <= c->main_lebs); c 539 fs/ubifs/lprops.c ubifs_assert(c, c->lst.taken_empty_lebs >= 0); c 540 fs/ubifs/lprops.c ubifs_assert(c, c->lst.taken_empty_lebs <= c->lst.empty_lebs); c 541 fs/ubifs/lprops.c ubifs_assert(c, !(c->lst.total_free & 7) && !(c->lst.total_dirty & 7)); c 542 fs/ubifs/lprops.c ubifs_assert(c, !(c->lst.total_dead & 7) && !(c->lst.total_dark & 7)); c 543 fs/ubifs/lprops.c ubifs_assert(c, !(c->lst.total_used & 7)); c 544 fs/ubifs/lprops.c ubifs_assert(c, free == LPROPS_NC || free >= 0); c 545 fs/ubifs/lprops.c ubifs_assert(c, dirty == LPROPS_NC || dirty >= 0); c 547 fs/ubifs/lprops.c if (!is_lprops_dirty(c, lprops)) { c 548 fs/ubifs/lprops.c lprops = ubifs_lpt_lookup_dirty(c, lprops->lnum); c 552 fs/ubifs/lprops.c ubifs_assert(c, lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum)); c 554 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->free & 7) && !(lprops->dirty & 7)); c 556 fs/ubifs/lprops.c spin_lock(&c->space_lock); c 557 fs/ubifs/lprops.c if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) c 558 fs/ubifs/lprops.c c->lst.taken_empty_lebs -= 1; c 564 fs/ubifs/lprops.c if (old_spc < c->dead_wm) c 565 fs/ubifs/lprops.c c->lst.total_dead -= old_spc; c 567 fs/ubifs/lprops.c c->lst.total_dark -= ubifs_calc_dark(c, old_spc); c 569 fs/ubifs/lprops.c c->lst.total_used -= c->leb_size - old_spc; c 574 fs/ubifs/lprops.c c->lst.total_free += free - lprops->free; c 577 fs/ubifs/lprops.c if (free == c->leb_size) { c 578 fs/ubifs/lprops.c if (lprops->free != c->leb_size) c 579 fs/ubifs/lprops.c c->lst.empty_lebs += 1; c 580 fs/ubifs/lprops.c } else if (lprops->free == c->leb_size) c 581 fs/ubifs/lprops.c c->lst.empty_lebs -= 1; c 587 fs/ubifs/lprops.c c->lst.total_dirty += dirty - lprops->dirty; c 595 fs/ubifs/lprops.c c->lst.idx_lebs -= 1; c 597 fs/ubifs/lprops.c c->lst.idx_lebs += 1; c 605 fs/ubifs/lprops.c if (new_spc < c->dead_wm) c 606 fs/ubifs/lprops.c c->lst.total_dead += new_spc; c 608 fs/ubifs/lprops.c c->lst.total_dark += ubifs_calc_dark(c, new_spc); c 610 fs/ubifs/lprops.c c->lst.total_used += c->leb_size - new_spc; c 613 fs/ubifs/lprops.c if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) c 614 fs/ubifs/lprops.c c->lst.taken_empty_lebs += 1; c 616 fs/ubifs/lprops.c change_category(c, lprops); c 617 fs/ubifs/lprops.c c->idx_gc_cnt += idx_gc_cnt; c 618 fs/ubifs/lprops.c spin_unlock(&c->space_lock); c 627 fs/ubifs/lprops.c void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst) c 629 fs/ubifs/lprops.c spin_lock(&c->space_lock); c 630 fs/ubifs/lprops.c memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats)); c 631 fs/ubifs/lprops.c spin_unlock(&c->space_lock); c 649 fs/ubifs/lprops.c int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, c 655 fs/ubifs/lprops.c ubifs_get_lprops(c); c 657 fs/ubifs/lprops.c lp = ubifs_lpt_lookup_dirty(c, lnum); c 664 fs/ubifs/lprops.c lp = ubifs_change_lp(c, lp, free, dirty, flags, idx_gc_cnt); c 669 fs/ubifs/lprops.c ubifs_release_lprops(c); c 671 fs/ubifs/lprops.c ubifs_err(c, "cannot change properties of LEB %d, error %d", c 688 fs/ubifs/lprops.c int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, c 694 fs/ubifs/lprops.c ubifs_get_lprops(c); c 696 fs/ubifs/lprops.c lp = ubifs_lpt_lookup_dirty(c, lnum); c 703 fs/ubifs/lprops.c lp = ubifs_change_lp(c, lp, free, lp->dirty + dirty, flags, 0); c 708 fs/ubifs/lprops.c ubifs_release_lprops(c); c 710 fs/ubifs/lprops.c ubifs_err(c, "cannot update properties of LEB %d, error %d", c 725 fs/ubifs/lprops.c int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp) c 730 fs/ubifs/lprops.c ubifs_get_lprops(c); c 732 fs/ubifs/lprops.c lpp = ubifs_lpt_lookup(c, lnum); c 735 fs/ubifs/lprops.c ubifs_err(c, "cannot read properties of LEB %d, error %d", c 743 fs/ubifs/lprops.c ubifs_release_lprops(c); c 754 fs/ubifs/lprops.c const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c) c 759 fs/ubifs/lprops.c ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); c 761 fs/ubifs/lprops.c heap = &c->lpt_heap[LPROPS_FREE - 1]; c 766 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 767 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); c 778 fs/ubifs/lprops.c const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c) c 782 fs/ubifs/lprops.c ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); c 784 fs/ubifs/lprops.c if (list_empty(&c->empty_list)) c 787 fs/ubifs/lprops.c lprops = list_entry(c->empty_list.next, struct ubifs_lprops, list); c 788 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 789 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); c 790 fs/ubifs/lprops.c ubifs_assert(c, lprops->free == c->leb_size); c 801 fs/ubifs/lprops.c const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c) c 805 fs/ubifs/lprops.c ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); c 807 fs/ubifs/lprops.c if (list_empty(&c->freeable_list)) c 810 fs/ubifs/lprops.c lprops = list_entry(c->freeable_list.next, struct ubifs_lprops, list); c 811 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 812 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); c 813 fs/ubifs/lprops.c ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); c 814 fs/ubifs/lprops.c ubifs_assert(c, c->freeable_cnt > 0); c 825 fs/ubifs/lprops.c const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c) c 829 fs/ubifs/lprops.c ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); c 831 fs/ubifs/lprops.c if (list_empty(&c->frdi_idx_list)) c 834 fs/ubifs/lprops.c lprops = list_entry(c->frdi_idx_list.next, struct ubifs_lprops, list); c 835 fs/ubifs/lprops.c ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); c 836 fs/ubifs/lprops.c ubifs_assert(c, (lprops->flags & LPROPS_INDEX)); c 837 fs/ubifs/lprops.c ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); c 851 fs/ubifs/lprops.c int dbg_check_cats(struct ubifs_info *c) c 857 fs/ubifs/lprops.c if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) c 860 fs/ubifs/lprops.c list_for_each_entry(lprops, &c->empty_list, list) { c 861 fs/ubifs/lprops.c if (lprops->free != c->leb_size) { c 862 fs/ubifs/lprops.c ubifs_err(c, "non-empty LEB %d on empty list (free %d dirty %d flags %d)", c 868 fs/ubifs/lprops.c ubifs_err(c, "taken LEB %d on empty list (free %d dirty %d flags %d)", c 876 fs/ubifs/lprops.c list_for_each_entry(lprops, &c->freeable_list, list) { c 877 fs/ubifs/lprops.c if (lprops->free + lprops->dirty != c->leb_size) { c 878 fs/ubifs/lprops.c ubifs_err(c, "non-freeable LEB %d on freeable list (free %d dirty %d flags %d)", c 884 fs/ubifs/lprops.c ubifs_err(c, "taken LEB %d on freeable list (free %d dirty %d flags %d)", c 891 fs/ubifs/lprops.c if (i != c->freeable_cnt) { c 892 fs/ubifs/lprops.c ubifs_err(c, "freeable list count %d expected %d", i, c 893 fs/ubifs/lprops.c c->freeable_cnt); c 898 fs/ubifs/lprops.c list_for_each(pos, &c->idx_gc) c 900 fs/ubifs/lprops.c if (i != c->idx_gc_cnt) { c 901 fs/ubifs/lprops.c ubifs_err(c, "idx_gc list count %d expected %d", i, c 902 fs/ubifs/lprops.c c->idx_gc_cnt); c 906 fs/ubifs/lprops.c list_for_each_entry(lprops, &c->frdi_idx_list, list) { c 907 fs/ubifs/lprops.c if (lprops->free + lprops->dirty != c->leb_size) { c 908 fs/ubifs/lprops.c ubifs_err(c, "non-freeable LEB %d on frdi_idx list (free %d dirty %d flags %d)", c 914 fs/ubifs/lprops.c ubifs_err(c, "taken LEB %d on frdi_idx list (free %d dirty %d flags %d)", c 920 fs/ubifs/lprops.c ubifs_err(c, "non-index LEB %d on frdi_idx list (free %d dirty %d flags %d)", c 928 fs/ubifs/lprops.c struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; c 933 fs/ubifs/lprops.c ubifs_err(c, "null ptr in LPT heap cat %d", cat); c 937 fs/ubifs/lprops.c ubifs_err(c, "bad ptr in LPT heap cat %d", cat); c 941 fs/ubifs/lprops.c ubifs_err(c, "taken LEB in LPT heap cat %d", cat); c 950 fs/ubifs/lprops.c void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, c 955 fs/ubifs/lprops.c if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) c 971 fs/ubifs/lprops.c lp = ubifs_lpt_lookup(c, lprops->lnum); c 977 fs/ubifs/lprops.c ubifs_err(c, "lprops %zx lp %zx lprops->lnum %d lp->lnum %d", c 997 fs/ubifs/lprops.c ubifs_err(c, "failed cat %d hpos %d err %d", cat, i, err); c 999 fs/ubifs/lprops.c ubifs_dump_heap(c, heap, cat); c 1015 fs/ubifs/lprops.c static int scan_check_cb(struct ubifs_info *c, c 1026 fs/ubifs/lprops.c cat = ubifs_categorize_lprops(c, lp); c 1028 fs/ubifs/lprops.c ubifs_err(c, "bad LEB category %d expected %d", c 1040 fs/ubifs/lprops.c list = &c->empty_list; c 1043 fs/ubifs/lprops.c list = &c->freeable_list; c 1046 fs/ubifs/lprops.c list = &c->frdi_idx_list; c 1049 fs/ubifs/lprops.c list = &c->uncat_list; c 1063 fs/ubifs/lprops.c ubifs_err(c, "bad LPT list (category %d)", cat); c 1071 fs/ubifs/lprops.c struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1]; c 1075 fs/ubifs/lprops.c ubifs_err(c, "bad LPT heap (category %d)", cat); c 1084 fs/ubifs/lprops.c if (lp->free == c->leb_size) { c 1086 fs/ubifs/lprops.c lst->total_free += c->leb_size; c 1087 fs/ubifs/lprops.c lst->total_dark += ubifs_calc_dark(c, c->leb_size); c 1090 fs/ubifs/lprops.c if (lp->free + lp->dirty == c->leb_size && c 1094 fs/ubifs/lprops.c lst->total_dark += ubifs_calc_dark(c, c->leb_size); c 1098 fs/ubifs/lprops.c buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); c 1102 fs/ubifs/lprops.c sleb = ubifs_scan(c, lnum, 0, buf, 0); c 1106 fs/ubifs/lprops.c ubifs_dump_lprops(c); c 1107 fs/ubifs/lprops.c ubifs_dump_budg(c, &c->bi); c 1122 fs/ubifs/lprops.c ubifs_err(c, "indexing node in data LEB %d:%d", c 1130 fs/ubifs/lprops.c key_read(c, ubifs_idx_key(c, idx), &snod->key); c 1134 fs/ubifs/lprops.c found = ubifs_tnc_has_node(c, &snod->key, level, lnum, c 1143 fs/ubifs/lprops.c free = c->leb_size - sleb->endpt; c 1146 fs/ubifs/lprops.c if (free > c->leb_size || free < 0 || dirty > c->leb_size || c 1148 fs/ubifs/lprops.c ubifs_err(c, "bad calculated accounting for LEB %d: free %d, dirty %d", c 1153 fs/ubifs/lprops.c if (lp->free + lp->dirty == c->leb_size && c 1154 fs/ubifs/lprops.c free + dirty == c->leb_size) c 1156 fs/ubifs/lprops.c (!is_idx && free == c->leb_size) || c 1157 fs/ubifs/lprops.c lp->free == c->leb_size) { c 1171 fs/ubifs/lprops.c lnum != c->ihead_lnum) { c 1191 fs/ubifs/lprops.c if (free == c->leb_size) c 1195 fs/ubifs/lprops.c ubifs_err(c, "indexing node without indexing flag"); c 1201 fs/ubifs/lprops.c ubifs_err(c, "data node with indexing flag"); c 1205 fs/ubifs/lprops.c if (free == c->leb_size) c 1212 fs/ubifs/lprops.c lst->total_used += c->leb_size - free - dirty; c 1219 fs/ubifs/lprops.c if (spc < c->dead_wm) c 1222 fs/ubifs/lprops.c lst->total_dark += ubifs_calc_dark(c, spc); c 1230 fs/ubifs/lprops.c ubifs_err(c, "bad accounting of LEB %d: free %d, dirty %d flags %#x, should be free %d, dirty %d", c 1232 fs/ubifs/lprops.c ubifs_dump_leb(c, lnum); c 1252 fs/ubifs/lprops.c int dbg_check_lprops(struct ubifs_info *c) c 1257 fs/ubifs/lprops.c if (!dbg_is_chk_lprops(c)) c 1264 fs/ubifs/lprops.c for (i = 0; i < c->jhead_cnt; i++) { c 1265 fs/ubifs/lprops.c err = ubifs_wbuf_sync(&c->jheads[i].wbuf); c 1271 fs/ubifs/lprops.c err = ubifs_lpt_scan_nolock(c, c->main_first, c->leb_cnt - 1, c 1277 fs/ubifs/lprops.c if (lst.empty_lebs != c->lst.empty_lebs || c 1278 fs/ubifs/lprops.c lst.idx_lebs != c->lst.idx_lebs || c 1279 fs/ubifs/lprops.c lst.total_free != c->lst.total_free || c 1280 fs/ubifs/lprops.c lst.total_dirty != c->lst.total_dirty || c 1281 fs/ubifs/lprops.c lst.total_used != c->lst.total_used) { c 1282 fs/ubifs/lprops.c ubifs_err(c, "bad overall accounting"); c 1283 fs/ubifs/lprops.c ubifs_err(c, "calculated: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", c 1286 fs/ubifs/lprops.c ubifs_err(c, "read from lprops: empty_lebs %d, idx_lebs %d, total_free %lld, total_dirty %lld, total_used %lld", c 1287 fs/ubifs/lprops.c c->lst.empty_lebs, c->lst.idx_lebs, c->lst.total_free, c 1288 fs/ubifs/lprops.c c->lst.total_dirty, c->lst.total_used); c 1293 fs/ubifs/lprops.c if (lst.total_dead != c->lst.total_dead || c 1294 fs/ubifs/lprops.c lst.total_dark != c->lst.total_dark) { c 1295 fs/ubifs/lprops.c ubifs_err(c, "bad dead/dark space accounting"); c 1296 fs/ubifs/lprops.c ubifs_err(c, "calculated: total_dead %lld, total_dark %lld", c 1298 fs/ubifs/lprops.c ubifs_err(c, "read from lprops: total_dead %lld, total_dark %lld", c 1299 fs/ubifs/lprops.c c->lst.total_dead, c->lst.total_dark); c 1304 fs/ubifs/lprops.c err = dbg_check_cats(c); c 46 fs/ubifs/lpt.c static void do_calc_lpt_geom(struct ubifs_info *c) c 51 fs/ubifs/lpt.c n = c->main_lebs + c->max_leb_cnt - c->leb_cnt; c 54 fs/ubifs/lpt.c c->lpt_hght = 1; c 57 fs/ubifs/lpt.c c->lpt_hght += 1; c 61 fs/ubifs/lpt.c c->pnode_cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT); c 63 fs/ubifs/lpt.c n = DIV_ROUND_UP(c->pnode_cnt, UBIFS_LPT_FANOUT); c 64 fs/ubifs/lpt.c c->nnode_cnt = n; c 65 fs/ubifs/lpt.c for (i = 1; i < c->lpt_hght; i++) { c 67 fs/ubifs/lpt.c c->nnode_cnt += n; c 70 fs/ubifs/lpt.c c->space_bits = fls(c->leb_size) - 3; c 71 fs/ubifs/lpt.c c->lpt_lnum_bits = fls(c->lpt_lebs); c 72 fs/ubifs/lpt.c c->lpt_offs_bits = fls(c->leb_size - 1); c 73 fs/ubifs/lpt.c c->lpt_spc_bits = fls(c->leb_size); c 75 fs/ubifs/lpt.c n = DIV_ROUND_UP(c->max_leb_cnt, UBIFS_LPT_FANOUT); c 76 fs/ubifs/lpt.c c->pcnt_bits = fls(n - 1); c 78 fs/ubifs/lpt.c c->lnum_bits = fls(c->max_leb_cnt - 1); c 81 fs/ubifs/lpt.c (c->big_lpt ? c->pcnt_bits : 0) + c 82 fs/ubifs/lpt.c (c->space_bits * 2 + 1) * UBIFS_LPT_FANOUT; c 83 fs/ubifs/lpt.c c->pnode_sz = (bits + 7) / 8; c 86 fs/ubifs/lpt.c (c->big_lpt ? c->pcnt_bits : 0) + c 87 fs/ubifs/lpt.c (c->lpt_lnum_bits + c->lpt_offs_bits) * UBIFS_LPT_FANOUT; c 88 fs/ubifs/lpt.c c->nnode_sz = (bits + 7) / 8; c 91 fs/ubifs/lpt.c c->lpt_lebs * c->lpt_spc_bits * 2; c 92 fs/ubifs/lpt.c c->ltab_sz = (bits + 7) / 8; c 95 fs/ubifs/lpt.c c->lnum_bits * c->lsave_cnt; c 96 fs/ubifs/lpt.c c->lsave_sz = (bits + 7) / 8; c 99 fs/ubifs/lpt.c c->lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; c 100 fs/ubifs/lpt.c c->lpt_sz += (long long)c->nnode_cnt * c->nnode_sz; c 101 fs/ubifs/lpt.c c->lpt_sz += c->ltab_sz; c 102 fs/ubifs/lpt.c if (c->big_lpt) c 103 fs/ubifs/lpt.c c->lpt_sz += c->lsave_sz; c 106 fs/ubifs/lpt.c sz = c->lpt_sz; c 107 fs/ubifs/lpt.c per_leb_wastage = max_t(int, c->pnode_sz, c->nnode_sz); c 110 fs/ubifs/lpt.c while (sz > c->leb_size) { c 112 fs/ubifs/lpt.c sz -= c->leb_size; c 115 fs/ubifs/lpt.c tot_wastage += ALIGN(sz, c->min_io_size) - sz; c 116 fs/ubifs/lpt.c c->lpt_sz += tot_wastage; c 125 fs/ubifs/lpt.c int ubifs_calc_lpt_geom(struct ubifs_info *c) c 130 fs/ubifs/lpt.c do_calc_lpt_geom(c); c 133 fs/ubifs/lpt.c sz = c->lpt_sz * 2; /* Must have at least 2 times the size */ c 134 fs/ubifs/lpt.c lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); c 135 fs/ubifs/lpt.c if (lebs_needed > c->lpt_lebs) { c 136 fs/ubifs/lpt.c ubifs_err(c, "too few LPT LEBs"); c 141 fs/ubifs/lpt.c if (c->ltab_sz > c->leb_size) { c 142 fs/ubifs/lpt.c ubifs_err(c, "LPT ltab too big"); c 146 fs/ubifs/lpt.c c->check_lpt_free = c->big_lpt; c 162 fs/ubifs/lpt.c static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs, c 169 fs/ubifs/lpt.c c->lpt_lebs = UBIFS_MIN_LPT_LEBS; c 170 fs/ubifs/lpt.c c->main_lebs = *main_lebs - c->lpt_lebs; c 171 fs/ubifs/lpt.c if (c->main_lebs <= 0) c 175 fs/ubifs/lpt.c c->big_lpt = 0; c 181 fs/ubifs/lpt.c do_calc_lpt_geom(c); c 184 fs/ubifs/lpt.c if (c->lpt_sz > c->leb_size) { c 186 fs/ubifs/lpt.c c->big_lpt = 1; c 187 fs/ubifs/lpt.c do_calc_lpt_geom(c); c 192 fs/ubifs/lpt.c sz = c->lpt_sz * 4; /* Allow 4 times the size */ c 193 fs/ubifs/lpt.c lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); c 194 fs/ubifs/lpt.c if (lebs_needed > c->lpt_lebs) { c 196 fs/ubifs/lpt.c c->lpt_lebs = lebs_needed; c 197 fs/ubifs/lpt.c c->main_lebs = *main_lebs - c->lpt_lebs; c 198 fs/ubifs/lpt.c if (c->main_lebs <= 0) c 200 fs/ubifs/lpt.c do_calc_lpt_geom(c); c 203 fs/ubifs/lpt.c if (c->ltab_sz > c->leb_size) { c 204 fs/ubifs/lpt.c ubifs_err(c, "LPT ltab too big"); c 207 fs/ubifs/lpt.c *main_lebs = c->main_lebs; c 208 fs/ubifs/lpt.c *big_lpt = c->big_lpt; c 222 fs/ubifs/lpt.c static void pack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, uint32_t val, int nrbits) c 227 fs/ubifs/lpt.c ubifs_assert(c, nrbits > 0); c 228 fs/ubifs/lpt.c ubifs_assert(c, nrbits <= 32); c 229 fs/ubifs/lpt.c ubifs_assert(c, *pos >= 0); c 230 fs/ubifs/lpt.c ubifs_assert(c, *pos < 8); c 231 fs/ubifs/lpt.c ubifs_assert(c, (val >> nrbits) == 0 || nrbits == 32); c 273 fs/ubifs/lpt.c uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, int nrbits) c 281 fs/ubifs/lpt.c ubifs_assert(c, nrbits > 0); c 282 fs/ubifs/lpt.c ubifs_assert(c, nrbits <= 32); c 283 fs/ubifs/lpt.c ubifs_assert(c, *pos >= 0); c 284 fs/ubifs/lpt.c ubifs_assert(c, *pos < 8); c 330 fs/ubifs/lpt.c ubifs_assert(c, (val >> nrbits) == 0 || nrbits - b == 32); c 340 fs/ubifs/lpt.c void ubifs_pack_pnode(struct ubifs_info *c, void *buf, c 347 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS); c 348 fs/ubifs/lpt.c if (c->big_lpt) c 349 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, pnode->num, c->pcnt_bits); c 351 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, pnode->lprops[i].free >> 3, c 352 fs/ubifs/lpt.c c->space_bits); c 353 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, pnode->lprops[i].dirty >> 3, c 354 fs/ubifs/lpt.c c->space_bits); c 356 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, 1, 1); c 358 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, 0, 1); c 361 fs/ubifs/lpt.c c->pnode_sz - UBIFS_LPT_CRC_BYTES); c 364 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); c 373 fs/ubifs/lpt.c void ubifs_pack_nnode(struct ubifs_info *c, void *buf, c 380 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS); c 381 fs/ubifs/lpt.c if (c->big_lpt) c 382 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, nnode->num, c->pcnt_bits); c 387 fs/ubifs/lpt.c lnum = c->lpt_last + 1; c 388 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits); c 389 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, nnode->nbranch[i].offs, c 390 fs/ubifs/lpt.c c->lpt_offs_bits); c 393 fs/ubifs/lpt.c c->nnode_sz - UBIFS_LPT_CRC_BYTES); c 396 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); c 405 fs/ubifs/lpt.c void ubifs_pack_ltab(struct ubifs_info *c, void *buf, c 412 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS); c 413 fs/ubifs/lpt.c for (i = 0; i < c->lpt_lebs; i++) { c 414 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, ltab[i].free, c->lpt_spc_bits); c 415 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, ltab[i].dirty, c->lpt_spc_bits); c 418 fs/ubifs/lpt.c c->ltab_sz - UBIFS_LPT_CRC_BYTES); c 421 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); c 430 fs/ubifs/lpt.c void ubifs_pack_lsave(struct ubifs_info *c, void *buf, int *lsave) c 436 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS); c 437 fs/ubifs/lpt.c for (i = 0; i < c->lsave_cnt; i++) c 438 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, lsave[i], c->lnum_bits); c 440 fs/ubifs/lpt.c c->lsave_sz - UBIFS_LPT_CRC_BYTES); c 443 fs/ubifs/lpt.c pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); c 452 fs/ubifs/lpt.c void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty) c 457 fs/ubifs/lpt.c lnum, dirty, c->ltab[lnum - c->lpt_first].dirty); c 458 fs/ubifs/lpt.c ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last); c 459 fs/ubifs/lpt.c c->ltab[lnum - c->lpt_first].dirty += dirty; c 469 fs/ubifs/lpt.c static void set_ltab(struct ubifs_info *c, int lnum, int free, int dirty) c 472 fs/ubifs/lpt.c lnum, c->ltab[lnum - c->lpt_first].free, c 473 fs/ubifs/lpt.c c->ltab[lnum - c->lpt_first].dirty, free, dirty); c 474 fs/ubifs/lpt.c ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last); c 475 fs/ubifs/lpt.c c->ltab[lnum - c->lpt_first].free = free; c 476 fs/ubifs/lpt.c c->ltab[lnum - c->lpt_first].dirty = dirty; c 484 fs/ubifs/lpt.c void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode) c 489 fs/ubifs/lpt.c ubifs_add_lpt_dirt(c, np->nbranch[nnode->iip].lnum, c 490 fs/ubifs/lpt.c c->nnode_sz); c 492 fs/ubifs/lpt.c ubifs_add_lpt_dirt(c, c->lpt_lnum, c->nnode_sz); c 493 fs/ubifs/lpt.c if (!(c->lpt_drty_flgs & LTAB_DIRTY)) { c 494 fs/ubifs/lpt.c c->lpt_drty_flgs |= LTAB_DIRTY; c 495 fs/ubifs/lpt.c ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz); c 505 fs/ubifs/lpt.c static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode) c 507 fs/ubifs/lpt.c ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum, c 508 fs/ubifs/lpt.c c->pnode_sz); c 548 fs/ubifs/lpt.c static int calc_nnode_num_from_parent(const struct ubifs_info *c, c 555 fs/ubifs/lpt.c shft = (c->lpt_hght - parent->level) * UBIFS_LPT_FANOUT_SHIFT; c 573 fs/ubifs/lpt.c static int calc_pnode_num_from_parent(const struct ubifs_info *c, c 576 fs/ubifs/lpt.c int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0; c 599 fs/ubifs/lpt.c int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, c 611 fs/ubifs/lpt.c err = calc_dflt_lpt_geom(c, main_lebs, big_lpt); c 614 fs/ubifs/lpt.c *lpt_lebs = c->lpt_lebs; c 617 fs/ubifs/lpt.c c->lpt_first = lpt_first; c 619 fs/ubifs/lpt.c c->lpt_last = lpt_first + c->lpt_lebs - 1; c 621 fs/ubifs/lpt.c c->main_first = c->leb_cnt - *main_lebs; c 623 fs/ubifs/lpt.c desc = ubifs_hash_get_desc(c); c 627 fs/ubifs/lpt.c lsave = kmalloc_array(c->lsave_cnt, sizeof(int), GFP_KERNEL); c 630 fs/ubifs/lpt.c buf = vmalloc(c->leb_size); c 632 fs/ubifs/lpt.c c->lpt_lebs)); c 638 fs/ubifs/lpt.c ubifs_assert(c, !c->ltab); c 639 fs/ubifs/lpt.c c->ltab = ltab; /* Needed by set_ltab */ c 642 fs/ubifs/lpt.c for (i = 0; i < c->lpt_lebs; i++) { c 643 fs/ubifs/lpt.c ltab[i].free = c->leb_size; c 652 fs/ubifs/lpt.c cnt = c->pnode_cnt; c 658 fs/ubifs/lpt.c node_sz = ALIGN(ubifs_idx_node_sz(c, 1), 8); c 659 fs/ubifs/lpt.c iopos = ALIGN(node_sz, c->min_io_size); c 660 fs/ubifs/lpt.c pnode->lprops[0].free = c->leb_size - iopos; c 665 fs/ubifs/lpt.c iopos = ALIGN(node_sz, c->min_io_size); c 666 fs/ubifs/lpt.c pnode->lprops[1].free = c->leb_size - iopos; c 670 fs/ubifs/lpt.c pnode->lprops[i].free = c->leb_size; c 673 fs/ubifs/lpt.c ubifs_pack_pnode(c, p, pnode); c 674 fs/ubifs/lpt.c err = ubifs_shash_update(c, desc, p, c->pnode_sz); c 678 fs/ubifs/lpt.c p += c->pnode_sz; c 679 fs/ubifs/lpt.c len = c->pnode_sz; c 683 fs/ubifs/lpt.c pnode->lprops[0].free = c->leb_size; c 687 fs/ubifs/lpt.c pnode->lprops[1].free = c->leb_size; c 697 fs/ubifs/lpt.c bsz = c->pnode_sz; /* Size of nodes in level below */ c 701 fs/ubifs/lpt.c if (len + c->pnode_sz > c->leb_size) { c 702 fs/ubifs/lpt.c alen = ALIGN(len, c->min_io_size); c 703 fs/ubifs/lpt.c set_ltab(c, lnum, c->leb_size - alen, alen - len); c 705 fs/ubifs/lpt.c err = ubifs_leb_change(c, lnum++, buf, alen); c 711 fs/ubifs/lpt.c ubifs_pack_pnode(c, p, pnode); c 712 fs/ubifs/lpt.c err = ubifs_shash_update(c, desc, p, c->pnode_sz); c 716 fs/ubifs/lpt.c p += c->pnode_sz; c 717 fs/ubifs/lpt.c len += c->pnode_sz; c 734 fs/ubifs/lpt.c if (len + c->nnode_sz > c->leb_size) { c 735 fs/ubifs/lpt.c alen = ALIGN(len, c->min_io_size); c 736 fs/ubifs/lpt.c set_ltab(c, lnum, c->leb_size - alen, c 739 fs/ubifs/lpt.c err = ubifs_leb_change(c, lnum++, buf, alen); c 747 fs/ubifs/lpt.c c->lpt_lnum = lnum; c 748 fs/ubifs/lpt.c c->lpt_offs = len; c 753 fs/ubifs/lpt.c if (boffs + bsz > c->leb_size) { c 767 fs/ubifs/lpt.c ubifs_pack_nnode(c, p, nnode); c 768 fs/ubifs/lpt.c p += c->nnode_sz; c 769 fs/ubifs/lpt.c len += c->nnode_sz; c 776 fs/ubifs/lpt.c bsz = c->nnode_sz; c 782 fs/ubifs/lpt.c if (len + c->lsave_sz > c->leb_size) { c 783 fs/ubifs/lpt.c alen = ALIGN(len, c->min_io_size); c 784 fs/ubifs/lpt.c set_ltab(c, lnum, c->leb_size - alen, alen - len); c 786 fs/ubifs/lpt.c err = ubifs_leb_change(c, lnum++, buf, alen); c 793 fs/ubifs/lpt.c c->lsave_lnum = lnum; c 794 fs/ubifs/lpt.c c->lsave_offs = len; c 796 fs/ubifs/lpt.c for (i = 0; i < c->lsave_cnt && i < *main_lebs; i++) c 797 fs/ubifs/lpt.c lsave[i] = c->main_first + i; c 798 fs/ubifs/lpt.c for (; i < c->lsave_cnt; i++) c 799 fs/ubifs/lpt.c lsave[i] = c->main_first; c 801 fs/ubifs/lpt.c ubifs_pack_lsave(c, p, lsave); c 802 fs/ubifs/lpt.c p += c->lsave_sz; c 803 fs/ubifs/lpt.c len += c->lsave_sz; c 807 fs/ubifs/lpt.c if (len + c->ltab_sz > c->leb_size) { c 808 fs/ubifs/lpt.c alen = ALIGN(len, c->min_io_size); c 809 fs/ubifs/lpt.c set_ltab(c, lnum, c->leb_size - alen, alen - len); c 811 fs/ubifs/lpt.c err = ubifs_leb_change(c, lnum++, buf, alen); c 818 fs/ubifs/lpt.c c->ltab_lnum = lnum; c 819 fs/ubifs/lpt.c c->ltab_offs = len; c 822 fs/ubifs/lpt.c len += c->ltab_sz; c 823 fs/ubifs/lpt.c alen = ALIGN(len, c->min_io_size); c 824 fs/ubifs/lpt.c set_ltab(c, lnum, c->leb_size - alen, alen - len); c 826 fs/ubifs/lpt.c ubifs_pack_ltab(c, p, ltab); c 827 fs/ubifs/lpt.c p += c->ltab_sz; c 831 fs/ubifs/lpt.c err = ubifs_leb_change(c, lnum, buf, alen); c 835 fs/ubifs/lpt.c err = ubifs_shash_final(c, desc, hash); c 839 fs/ubifs/lpt.c c->nhead_lnum = lnum; c 840 fs/ubifs/lpt.c c->nhead_offs = ALIGN(len, c->min_io_size); c 842 fs/ubifs/lpt.c dbg_lp("space_bits %d", c->space_bits); c 843 fs/ubifs/lpt.c dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits); c 844 fs/ubifs/lpt.c dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits); c 845 fs/ubifs/lpt.c dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits); c 846 fs/ubifs/lpt.c dbg_lp("pcnt_bits %d", c->pcnt_bits); c 847 fs/ubifs/lpt.c dbg_lp("lnum_bits %d", c->lnum_bits); c 848 fs/ubifs/lpt.c dbg_lp("pnode_sz %d", c->pnode_sz); c 849 fs/ubifs/lpt.c dbg_lp("nnode_sz %d", c->nnode_sz); c 850 fs/ubifs/lpt.c dbg_lp("ltab_sz %d", c->ltab_sz); c 851 fs/ubifs/lpt.c dbg_lp("lsave_sz %d", c->lsave_sz); c 852 fs/ubifs/lpt.c dbg_lp("lsave_cnt %d", c->lsave_cnt); c 853 fs/ubifs/lpt.c dbg_lp("lpt_hght %d", c->lpt_hght); c 854 fs/ubifs/lpt.c dbg_lp("big_lpt %d", c->big_lpt); c 855 fs/ubifs/lpt.c dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs); c 856 fs/ubifs/lpt.c dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs); c 857 fs/ubifs/lpt.c dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs); c 858 fs/ubifs/lpt.c if (c->big_lpt) c 859 fs/ubifs/lpt.c dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); c 861 fs/ubifs/lpt.c c->ltab = NULL; c 879 fs/ubifs/lpt.c static void update_cats(struct ubifs_info *c, struct ubifs_pnode *pnode) c 889 fs/ubifs/lpt.c ubifs_add_to_cat(c, &pnode->lprops[i], cat); c 903 fs/ubifs/lpt.c static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode, c 911 fs/ubifs/lpt.c ubifs_replace_cat(c, &old_pnode->lprops[i], c 924 fs/ubifs/lpt.c static int check_lpt_crc(const struct ubifs_info *c, void *buf, int len) c 930 fs/ubifs/lpt.c crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS); c 934 fs/ubifs/lpt.c ubifs_err(c, "invalid crc in LPT node: crc %hx calc %hx", c 951 fs/ubifs/lpt.c static int check_lpt_type(const struct ubifs_info *c, uint8_t **addr, c 956 fs/ubifs/lpt.c node_type = ubifs_unpack_bits(c, addr, pos, UBIFS_LPT_TYPE_BITS); c 958 fs/ubifs/lpt.c ubifs_err(c, "invalid type (%d) in LPT node type %d", c 974 fs/ubifs/lpt.c static int unpack_pnode(const struct ubifs_info *c, void *buf, c 980 fs/ubifs/lpt.c err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_PNODE); c 983 fs/ubifs/lpt.c if (c->big_lpt) c 984 fs/ubifs/lpt.c pnode->num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits); c 988 fs/ubifs/lpt.c lprops->free = ubifs_unpack_bits(c, &addr, &pos, c->space_bits); c 990 fs/ubifs/lpt.c lprops->dirty = ubifs_unpack_bits(c, &addr, &pos, c->space_bits); c 993 fs/ubifs/lpt.c if (ubifs_unpack_bits(c, &addr, &pos, 1)) c 997 fs/ubifs/lpt.c lprops->flags |= ubifs_categorize_lprops(c, lprops); c 999 fs/ubifs/lpt.c err = check_lpt_crc(c, buf, c->pnode_sz); c 1011 fs/ubifs/lpt.c int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, c 1017 fs/ubifs/lpt.c err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_NNODE); c 1020 fs/ubifs/lpt.c if (c->big_lpt) c 1021 fs/ubifs/lpt.c nnode->num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits); c 1025 fs/ubifs/lpt.c lnum = ubifs_unpack_bits(c, &addr, &pos, c->lpt_lnum_bits) + c 1026 fs/ubifs/lpt.c c->lpt_first; c 1027 fs/ubifs/lpt.c if (lnum == c->lpt_last + 1) c 1030 fs/ubifs/lpt.c nnode->nbranch[i].offs = ubifs_unpack_bits(c, &addr, &pos, c 1031 fs/ubifs/lpt.c c->lpt_offs_bits); c 1033 fs/ubifs/lpt.c err = check_lpt_crc(c, buf, c->nnode_sz); c 1044 fs/ubifs/lpt.c static int unpack_ltab(const struct ubifs_info *c, void *buf) c 1049 fs/ubifs/lpt.c err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LTAB); c 1052 fs/ubifs/lpt.c for (i = 0; i < c->lpt_lebs; i++) { c 1053 fs/ubifs/lpt.c int free = ubifs_unpack_bits(c, &addr, &pos, c->lpt_spc_bits); c 1054 fs/ubifs/lpt.c int dirty = ubifs_unpack_bits(c, &addr, &pos, c->lpt_spc_bits); c 1056 fs/ubifs/lpt.c if (free < 0 || free > c->leb_size || dirty < 0 || c 1057 fs/ubifs/lpt.c dirty > c->leb_size || free + dirty > c->leb_size) c 1060 fs/ubifs/lpt.c c->ltab[i].free = free; c 1061 fs/ubifs/lpt.c c->ltab[i].dirty = dirty; c 1062 fs/ubifs/lpt.c c->ltab[i].tgc = 0; c 1063 fs/ubifs/lpt.c c->ltab[i].cmt = 0; c 1065 fs/ubifs/lpt.c err = check_lpt_crc(c, buf, c->ltab_sz); c 1076 fs/ubifs/lpt.c static int unpack_lsave(const struct ubifs_info *c, void *buf) c 1081 fs/ubifs/lpt.c err = check_lpt_type(c, &addr, &pos, UBIFS_LPT_LSAVE); c 1084 fs/ubifs/lpt.c for (i = 0; i < c->lsave_cnt; i++) { c 1085 fs/ubifs/lpt.c int lnum = ubifs_unpack_bits(c, &addr, &pos, c->lnum_bits); c 1087 fs/ubifs/lpt.c if (lnum < c->main_first || lnum >= c->leb_cnt) c 1089 fs/ubifs/lpt.c c->lsave[i] = lnum; c 1091 fs/ubifs/lpt.c err = check_lpt_crc(c, buf, c->lsave_sz); c 1104 fs/ubifs/lpt.c static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode, c 1109 fs/ubifs/lpt.c if (c->big_lpt) { c 1110 fs/ubifs/lpt.c int num = calc_nnode_num_from_parent(c, parent, iip); c 1115 fs/ubifs/lpt.c lvl = parent ? parent->level - 1 : c->lpt_hght; c 1119 fs/ubifs/lpt.c max_offs = c->leb_size - c->pnode_sz; c 1121 fs/ubifs/lpt.c max_offs = c->leb_size - c->nnode_sz; c 1131 fs/ubifs/lpt.c if (lnum < c->lpt_first || lnum > c->lpt_last) c 1148 fs/ubifs/lpt.c static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode, c 1153 fs/ubifs/lpt.c if (c->big_lpt) { c 1154 fs/ubifs/lpt.c int num = calc_pnode_num_from_parent(c, parent, iip); c 1163 fs/ubifs/lpt.c if (free < 0 || free > c->leb_size || free % c->min_io_size || c 1166 fs/ubifs/lpt.c if (dirty < 0 || dirty > c->leb_size || (dirty & 7)) c 1168 fs/ubifs/lpt.c if (dirty + free > c->leb_size) c 1182 fs/ubifs/lpt.c static void set_pnode_lnum(const struct ubifs_info *c, c 1187 fs/ubifs/lpt.c lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + c->main_first; c 1189 fs/ubifs/lpt.c if (lnum >= c->leb_cnt) c 1203 fs/ubifs/lpt.c int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) c 1207 fs/ubifs/lpt.c void *buf = c->lpt_nod_buf; c 1215 fs/ubifs/lpt.c lnum = c->lpt_lnum; c 1216 fs/ubifs/lpt.c offs = c->lpt_offs; c 1230 fs/ubifs/lpt.c if (c->big_lpt) c 1231 fs/ubifs/lpt.c nnode->num = calc_nnode_num_from_parent(c, parent, iip); c 1233 fs/ubifs/lpt.c err = ubifs_leb_read(c, lnum, buf, offs, c->nnode_sz, 1); c 1236 fs/ubifs/lpt.c err = ubifs_unpack_nnode(c, buf, nnode); c 1240 fs/ubifs/lpt.c err = validate_nnode(c, nnode, parent, iip); c 1243 fs/ubifs/lpt.c if (!c->big_lpt) c 1244 fs/ubifs/lpt.c nnode->num = calc_nnode_num_from_parent(c, parent, iip); c 1249 fs/ubifs/lpt.c c->nroot = nnode; c 1250 fs/ubifs/lpt.c nnode->level = c->lpt_hght; c 1257 fs/ubifs/lpt.c ubifs_err(c, "error %d reading nnode at %d:%d", err, lnum, offs); c 1271 fs/ubifs/lpt.c static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) c 1275 fs/ubifs/lpt.c void *buf = c->lpt_nod_buf; c 1293 fs/ubifs/lpt.c if (c->big_lpt) c 1294 fs/ubifs/lpt.c pnode->num = calc_pnode_num_from_parent(c, parent, iip); c 1298 fs/ubifs/lpt.c lprops->free = c->leb_size; c 1299 fs/ubifs/lpt.c lprops->flags = ubifs_categorize_lprops(c, lprops); c 1302 fs/ubifs/lpt.c err = ubifs_leb_read(c, lnum, buf, offs, c->pnode_sz, 1); c 1305 fs/ubifs/lpt.c err = unpack_pnode(c, buf, pnode); c 1309 fs/ubifs/lpt.c err = validate_pnode(c, pnode, parent, iip); c 1312 fs/ubifs/lpt.c if (!c->big_lpt) c 1313 fs/ubifs/lpt.c pnode->num = calc_pnode_num_from_parent(c, parent, iip); c 1317 fs/ubifs/lpt.c set_pnode_lnum(c, pnode); c 1318 fs/ubifs/lpt.c c->pnodes_have += 1; c 1322 fs/ubifs/lpt.c ubifs_err(c, "error %d reading pnode at %d:%d", err, lnum, offs); c 1323 fs/ubifs/lpt.c ubifs_dump_pnode(c, pnode, parent, iip); c 1325 fs/ubifs/lpt.c ubifs_err(c, "calc num: %d", calc_pnode_num_from_parent(c, parent, iip)); c 1336 fs/ubifs/lpt.c static int read_ltab(struct ubifs_info *c) c 1341 fs/ubifs/lpt.c buf = vmalloc(c->ltab_sz); c 1344 fs/ubifs/lpt.c err = ubifs_leb_read(c, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz, 1); c 1347 fs/ubifs/lpt.c err = unpack_ltab(c, buf); c 1359 fs/ubifs/lpt.c static int read_lsave(struct ubifs_info *c) c 1364 fs/ubifs/lpt.c buf = vmalloc(c->lsave_sz); c 1367 fs/ubifs/lpt.c err = ubifs_leb_read(c, c->lsave_lnum, buf, c->lsave_offs, c 1368 fs/ubifs/lpt.c c->lsave_sz, 1); c 1371 fs/ubifs/lpt.c err = unpack_lsave(c, buf); c 1374 fs/ubifs/lpt.c for (i = 0; i < c->lsave_cnt; i++) { c 1375 fs/ubifs/lpt.c int lnum = c->lsave[i]; c 1382 fs/ubifs/lpt.c if (lnum >= c->leb_cnt) c 1384 fs/ubifs/lpt.c lprops = ubifs_lpt_lookup(c, lnum); c 1404 fs/ubifs/lpt.c struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c, c 1415 fs/ubifs/lpt.c err = ubifs_read_nnode(c, parent, iip); c 1430 fs/ubifs/lpt.c struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c, c 1441 fs/ubifs/lpt.c err = read_pnode(c, parent, iip); c 1444 fs/ubifs/lpt.c update_cats(c, branch->pnode); c 1456 fs/ubifs/lpt.c struct ubifs_pnode *ubifs_pnode_lookup(struct ubifs_info *c, int i) c 1461 fs/ubifs/lpt.c if (!c->nroot) { c 1462 fs/ubifs/lpt.c err = ubifs_read_nnode(c, NULL, 0); c 1467 fs/ubifs/lpt.c nnode = c->nroot; c 1468 fs/ubifs/lpt.c shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; c 1469 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { c 1472 fs/ubifs/lpt.c nnode = ubifs_get_nnode(c, nnode, iip); c 1477 fs/ubifs/lpt.c return ubifs_get_pnode(c, nnode, iip); c 1488 fs/ubifs/lpt.c struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum) c 1493 fs/ubifs/lpt.c i = lnum - c->main_first; c 1494 fs/ubifs/lpt.c pnode = ubifs_pnode_lookup(c, i >> UBIFS_LPT_FANOUT_SHIFT); c 1511 fs/ubifs/lpt.c static struct ubifs_nnode *dirty_cow_nnode(struct ubifs_info *c, c 1520 fs/ubifs/lpt.c c->dirty_nn_cnt += 1; c 1521 fs/ubifs/lpt.c ubifs_add_nnode_dirt(c, nnode); c 1543 fs/ubifs/lpt.c ubifs_assert(c, !test_bit(OBSOLETE_CNODE, &nnode->flags)); c 1546 fs/ubifs/lpt.c c->dirty_nn_cnt += 1; c 1547 fs/ubifs/lpt.c ubifs_add_nnode_dirt(c, nnode); c 1551 fs/ubifs/lpt.c c->nroot = n; c 1562 fs/ubifs/lpt.c static struct ubifs_pnode *dirty_cow_pnode(struct ubifs_info *c, c 1570 fs/ubifs/lpt.c c->dirty_pn_cnt += 1; c 1571 fs/ubifs/lpt.c add_pnode_dirt(c, pnode); c 1584 fs/ubifs/lpt.c replace_cats(c, pnode, p); c 1586 fs/ubifs/lpt.c ubifs_assert(c, !test_bit(OBSOLETE_CNODE, &pnode->flags)); c 1589 fs/ubifs/lpt.c c->dirty_pn_cnt += 1; c 1590 fs/ubifs/lpt.c add_pnode_dirt(c, pnode); c 1603 fs/ubifs/lpt.c struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum) c 1609 fs/ubifs/lpt.c if (!c->nroot) { c 1610 fs/ubifs/lpt.c err = ubifs_read_nnode(c, NULL, 0); c 1614 fs/ubifs/lpt.c nnode = c->nroot; c 1615 fs/ubifs/lpt.c nnode = dirty_cow_nnode(c, nnode); c 1618 fs/ubifs/lpt.c i = lnum - c->main_first; c 1619 fs/ubifs/lpt.c shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; c 1620 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { c 1623 fs/ubifs/lpt.c nnode = ubifs_get_nnode(c, nnode, iip); c 1626 fs/ubifs/lpt.c nnode = dirty_cow_nnode(c, nnode); c 1631 fs/ubifs/lpt.c pnode = ubifs_get_pnode(c, nnode, iip); c 1634 fs/ubifs/lpt.c pnode = dirty_cow_pnode(c, pnode); c 1641 fs/ubifs/lpt.c ubifs_assert(c, test_bit(DIRTY_CNODE, &pnode->flags)); c 1653 fs/ubifs/lpt.c int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash) c 1659 fs/ubifs/lpt.c int bufsiz = max_t(int, c->nnode_sz, c->pnode_sz); c 1663 fs/ubifs/lpt.c if (!ubifs_authenticated(c)) c 1666 fs/ubifs/lpt.c if (!c->nroot) { c 1667 fs/ubifs/lpt.c err = ubifs_read_nnode(c, NULL, 0); c 1672 fs/ubifs/lpt.c desc = ubifs_hash_get_desc(c); c 1682 fs/ubifs/lpt.c cnode = (struct ubifs_cnode *)c->nroot; c 1695 fs/ubifs/lpt.c nnode = ubifs_get_nnode(c, nn, iip); c 1714 fs/ubifs/lpt.c pnode = ubifs_get_pnode(c, nn, i); c 1720 fs/ubifs/lpt.c ubifs_pack_pnode(c, buf, pnode); c 1721 fs/ubifs/lpt.c err = ubifs_shash_update(c, desc, buf, c 1722 fs/ubifs/lpt.c c->pnode_sz); c 1732 fs/ubifs/lpt.c err = ubifs_shash_final(c, desc, hash); c 1748 fs/ubifs/lpt.c static int lpt_check_hash(struct ubifs_info *c) c 1753 fs/ubifs/lpt.c if (!ubifs_authenticated(c)) c 1756 fs/ubifs/lpt.c err = ubifs_lpt_calc_hash(c, hash); c 1760 fs/ubifs/lpt.c if (ubifs_check_hash(c, c->mst_node->hash_lpt, hash)) { c 1762 fs/ubifs/lpt.c ubifs_err(c, "Failed to authenticate LPT"); c 1776 fs/ubifs/lpt.c static int lpt_init_rd(struct ubifs_info *c) c 1780 fs/ubifs/lpt.c c->ltab = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops), c 1781 fs/ubifs/lpt.c c->lpt_lebs)); c 1782 fs/ubifs/lpt.c if (!c->ltab) c 1785 fs/ubifs/lpt.c i = max_t(int, c->nnode_sz, c->pnode_sz); c 1786 fs/ubifs/lpt.c c->lpt_nod_buf = kmalloc(i, GFP_KERNEL); c 1787 fs/ubifs/lpt.c if (!c->lpt_nod_buf) c 1791 fs/ubifs/lpt.c c->lpt_heap[i].arr = kmalloc_array(LPT_HEAP_SZ, c 1794 fs/ubifs/lpt.c if (!c->lpt_heap[i].arr) c 1796 fs/ubifs/lpt.c c->lpt_heap[i].cnt = 0; c 1797 fs/ubifs/lpt.c c->lpt_heap[i].max_cnt = LPT_HEAP_SZ; c 1800 fs/ubifs/lpt.c c->dirty_idx.arr = kmalloc_array(LPT_HEAP_SZ, sizeof(void *), c 1802 fs/ubifs/lpt.c if (!c->dirty_idx.arr) c 1804 fs/ubifs/lpt.c c->dirty_idx.cnt = 0; c 1805 fs/ubifs/lpt.c c->dirty_idx.max_cnt = LPT_HEAP_SZ; c 1807 fs/ubifs/lpt.c err = read_ltab(c); c 1811 fs/ubifs/lpt.c err = lpt_check_hash(c); c 1815 fs/ubifs/lpt.c dbg_lp("space_bits %d", c->space_bits); c 1816 fs/ubifs/lpt.c dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits); c 1817 fs/ubifs/lpt.c dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits); c 1818 fs/ubifs/lpt.c dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits); c 1819 fs/ubifs/lpt.c dbg_lp("pcnt_bits %d", c->pcnt_bits); c 1820 fs/ubifs/lpt.c dbg_lp("lnum_bits %d", c->lnum_bits); c 1821 fs/ubifs/lpt.c dbg_lp("pnode_sz %d", c->pnode_sz); c 1822 fs/ubifs/lpt.c dbg_lp("nnode_sz %d", c->nnode_sz); c 1823 fs/ubifs/lpt.c dbg_lp("ltab_sz %d", c->ltab_sz); c 1824 fs/ubifs/lpt.c dbg_lp("lsave_sz %d", c->lsave_sz); c 1825 fs/ubifs/lpt.c dbg_lp("lsave_cnt %d", c->lsave_cnt); c 1826 fs/ubifs/lpt.c dbg_lp("lpt_hght %d", c->lpt_hght); c 1827 fs/ubifs/lpt.c dbg_lp("big_lpt %d", c->big_lpt); c 1828 fs/ubifs/lpt.c dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs); c 1829 fs/ubifs/lpt.c dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs); c 1830 fs/ubifs/lpt.c dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs); c 1831 fs/ubifs/lpt.c if (c->big_lpt) c 1832 fs/ubifs/lpt.c dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); c 1845 fs/ubifs/lpt.c static int lpt_init_wr(struct ubifs_info *c) c 1849 fs/ubifs/lpt.c c->ltab_cmt = vmalloc(array_size(sizeof(struct ubifs_lpt_lprops), c 1850 fs/ubifs/lpt.c c->lpt_lebs)); c 1851 fs/ubifs/lpt.c if (!c->ltab_cmt) c 1854 fs/ubifs/lpt.c c->lpt_buf = vmalloc(c->leb_size); c 1855 fs/ubifs/lpt.c if (!c->lpt_buf) c 1858 fs/ubifs/lpt.c if (c->big_lpt) { c 1859 fs/ubifs/lpt.c c->lsave = kmalloc_array(c->lsave_cnt, sizeof(int), GFP_NOFS); c 1860 fs/ubifs/lpt.c if (!c->lsave) c 1862 fs/ubifs/lpt.c err = read_lsave(c); c 1867 fs/ubifs/lpt.c for (i = 0; i < c->lpt_lebs; i++) c 1868 fs/ubifs/lpt.c if (c->ltab[i].free == c->leb_size) { c 1869 fs/ubifs/lpt.c err = ubifs_leb_unmap(c, i + c->lpt_first); c 1889 fs/ubifs/lpt.c int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr) c 1894 fs/ubifs/lpt.c err = lpt_init_rd(c); c 1900 fs/ubifs/lpt.c err = lpt_init_wr(c); c 1909 fs/ubifs/lpt.c ubifs_lpt_free(c, 1); c 1911 fs/ubifs/lpt.c ubifs_lpt_free(c, 0); c 1950 fs/ubifs/lpt.c static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c, c 1956 fs/ubifs/lpt.c void *buf = c->lpt_nod_buf; c 1977 fs/ubifs/lpt.c if (c->big_lpt) c 1978 fs/ubifs/lpt.c nnode->num = calc_nnode_num_from_parent(c, parent, iip); c 1980 fs/ubifs/lpt.c err = ubifs_leb_read(c, branch->lnum, buf, branch->offs, c 1981 fs/ubifs/lpt.c c->nnode_sz, 1); c 1984 fs/ubifs/lpt.c err = ubifs_unpack_nnode(c, buf, nnode); c 1988 fs/ubifs/lpt.c err = validate_nnode(c, nnode, parent, iip); c 1991 fs/ubifs/lpt.c if (!c->big_lpt) c 1992 fs/ubifs/lpt.c nnode->num = calc_nnode_num_from_parent(c, parent, iip); c 2009 fs/ubifs/lpt.c static struct ubifs_pnode *scan_get_pnode(struct ubifs_info *c, c 2015 fs/ubifs/lpt.c void *buf = c->lpt_nod_buf; c 2037 fs/ubifs/lpt.c if (c->big_lpt) c 2038 fs/ubifs/lpt.c pnode->num = calc_pnode_num_from_parent(c, parent, iip); c 2042 fs/ubifs/lpt.c lprops->free = c->leb_size; c 2043 fs/ubifs/lpt.c lprops->flags = ubifs_categorize_lprops(c, lprops); c 2046 fs/ubifs/lpt.c ubifs_assert(c, branch->lnum >= c->lpt_first && c 2047 fs/ubifs/lpt.c branch->lnum <= c->lpt_last); c 2048 fs/ubifs/lpt.c ubifs_assert(c, branch->offs >= 0 && branch->offs < c->leb_size); c 2049 fs/ubifs/lpt.c err = ubifs_leb_read(c, branch->lnum, buf, branch->offs, c 2050 fs/ubifs/lpt.c c->pnode_sz, 1); c 2053 fs/ubifs/lpt.c err = unpack_pnode(c, buf, pnode); c 2057 fs/ubifs/lpt.c err = validate_pnode(c, pnode, parent, iip); c 2060 fs/ubifs/lpt.c if (!c->big_lpt) c 2061 fs/ubifs/lpt.c pnode->num = calc_pnode_num_from_parent(c, parent, iip); c 2064 fs/ubifs/lpt.c set_pnode_lnum(c, pnode); c 2078 fs/ubifs/lpt.c int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum, c 2088 fs/ubifs/lpt.c if (start_lnum >= c->leb_cnt) c 2089 fs/ubifs/lpt.c start_lnum = c->main_first; c 2092 fs/ubifs/lpt.c ubifs_assert(c, start_lnum >= c->main_first && start_lnum < c->leb_cnt); c 2093 fs/ubifs/lpt.c ubifs_assert(c, end_lnum >= c->main_first && end_lnum < c->leb_cnt); c 2095 fs/ubifs/lpt.c if (!c->nroot) { c 2096 fs/ubifs/lpt.c err = ubifs_read_nnode(c, NULL, 0); c 2101 fs/ubifs/lpt.c path = kmalloc_array(c->lpt_hght + 1, sizeof(struct lpt_scan_node), c 2106 fs/ubifs/lpt.c path[0].ptr.nnode = c->nroot; c 2110 fs/ubifs/lpt.c nnode = c->nroot; c 2111 fs/ubifs/lpt.c i = start_lnum - c->main_first; c 2112 fs/ubifs/lpt.c shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; c 2113 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { c 2116 fs/ubifs/lpt.c nnode = scan_get_nnode(c, path + h, nnode, iip); c 2123 fs/ubifs/lpt.c pnode = scan_get_pnode(c, path + h, nnode, iip); c 2135 fs/ubifs/lpt.c ret = scan_cb(c, lprops, path[h].in_tree, data); c 2142 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) { c 2160 fs/ubifs/lpt.c ubifs_ensure_cat(c, lprops); c 2174 fs/ubifs/lpt.c update_cats(c, pnode); c 2175 fs/ubifs/lpt.c c->pnodes_have += 1; c 2177 fs/ubifs/lpt.c err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *) c 2178 fs/ubifs/lpt.c c->nroot, 0, 0); c 2181 fs/ubifs/lpt.c err = dbg_check_cats(c); c 2198 fs/ubifs/lpt.c if (lnum + 1 >= c->leb_cnt) { c 2200 fs/ubifs/lpt.c start_lnum = c->main_first; c 2212 fs/ubifs/lpt.c ubifs_assert(c, h >= 0); c 2222 fs/ubifs/lpt.c for (; h < c->lpt_hght; h++) { c 2223 fs/ubifs/lpt.c nnode = scan_get_nnode(c, path + h, nnode, iip); c 2230 fs/ubifs/lpt.c pnode = scan_get_pnode(c, path + h, nnode, iip); c 2250 fs/ubifs/lpt.c static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, c 2256 fs/ubifs/lpt.c ubifs_err(c, "pnode num %d expected %d parent num %d iip %d", c 2263 fs/ubifs/lpt.c c->main_first; c 2268 fs/ubifs/lpt.c if (lnum >= c->leb_cnt) c 2271 fs/ubifs/lpt.c ubifs_err(c, "bad LEB number %d expected %d", c 2277 fs/ubifs/lpt.c ubifs_err(c, "LEB %d taken but not uncat %d", c 2290 fs/ubifs/lpt.c ubifs_err(c, "LEB %d index but cat %d", c 2303 fs/ubifs/lpt.c ubifs_err(c, "LEB %d not index but cat %d", c 2310 fs/ubifs/lpt.c list = &c->uncat_list; c 2313 fs/ubifs/lpt.c list = &c->empty_list; c 2316 fs/ubifs/lpt.c list = &c->freeable_list; c 2319 fs/ubifs/lpt.c list = &c->frdi_idx_list; c 2327 fs/ubifs/lpt.c heap = &c->lpt_heap[cat - 1]; c 2344 fs/ubifs/lpt.c ubifs_err(c, "LEB %d cat %d not found in cat heap/list", c 2350 fs/ubifs/lpt.c if (lprops->free != c->leb_size) { c 2351 fs/ubifs/lpt.c ubifs_err(c, "LEB %d cat %d free %d dirty %d", c 2359 fs/ubifs/lpt.c if (lprops->free + lprops->dirty != c->leb_size) { c 2360 fs/ubifs/lpt.c ubifs_err(c, "LEB %d cat %d free %d dirty %d", c 2380 fs/ubifs/lpt.c int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, c 2387 fs/ubifs/lpt.c if (!dbg_is_chk_lprops(c)) c 2391 fs/ubifs/lpt.c ubifs_assert(c, row >= 0); c 2397 fs/ubifs/lpt.c ubifs_err(c, "nnode num %d expected %d parent num %d iip %d", c 2424 fs/ubifs/lpt.c err = dbg_chk_pnode(c, pnode, col); c 21 fs/ubifs/lpt_commit.c static int dbg_populate_lsave(struct ubifs_info *c); c 30 fs/ubifs/lpt_commit.c static struct ubifs_cnode *first_dirty_cnode(const struct ubifs_info *c, struct ubifs_nnode *nnode) c 32 fs/ubifs/lpt_commit.c ubifs_assert(c, nnode); c 61 fs/ubifs/lpt_commit.c static struct ubifs_cnode *next_dirty_cnode(const struct ubifs_info *c, struct ubifs_cnode *cnode) c 66 fs/ubifs/lpt_commit.c ubifs_assert(c, cnode); c 76 fs/ubifs/lpt_commit.c return first_dirty_cnode(c, (struct ubifs_nnode *)cnode); c 88 fs/ubifs/lpt_commit.c static int get_cnodes_to_commit(struct ubifs_info *c) c 93 fs/ubifs/lpt_commit.c if (!c->nroot) c 96 fs/ubifs/lpt_commit.c if (!test_bit(DIRTY_CNODE, &c->nroot->flags)) c 99 fs/ubifs/lpt_commit.c c->lpt_cnext = first_dirty_cnode(c, c->nroot); c 100 fs/ubifs/lpt_commit.c cnode = c->lpt_cnext; c 105 fs/ubifs/lpt_commit.c ubifs_assert(c, !test_bit(COW_CNODE, &cnode->flags)); c 107 fs/ubifs/lpt_commit.c cnext = next_dirty_cnode(c, cnode); c 109 fs/ubifs/lpt_commit.c cnode->cnext = c->lpt_cnext; c 118 fs/ubifs/lpt_commit.c ubifs_assert(c, cnt == c->dirty_nn_cnt + c->dirty_pn_cnt); c 129 fs/ubifs/lpt_commit.c static void upd_ltab(struct ubifs_info *c, int lnum, int free, int dirty) c 132 fs/ubifs/lpt_commit.c lnum, c->ltab[lnum - c->lpt_first].free, c 133 fs/ubifs/lpt_commit.c c->ltab[lnum - c->lpt_first].dirty, free, dirty); c 134 fs/ubifs/lpt_commit.c ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last); c 135 fs/ubifs/lpt_commit.c c->ltab[lnum - c->lpt_first].free = free; c 136 fs/ubifs/lpt_commit.c c->ltab[lnum - c->lpt_first].dirty += dirty; c 149 fs/ubifs/lpt_commit.c static int alloc_lpt_leb(struct ubifs_info *c, int *lnum) c 153 fs/ubifs/lpt_commit.c n = *lnum - c->lpt_first + 1; c 154 fs/ubifs/lpt_commit.c for (i = n; i < c->lpt_lebs; i++) { c 155 fs/ubifs/lpt_commit.c if (c->ltab[i].tgc || c->ltab[i].cmt) c 157 fs/ubifs/lpt_commit.c if (c->ltab[i].free == c->leb_size) { c 158 fs/ubifs/lpt_commit.c c->ltab[i].cmt = 1; c 159 fs/ubifs/lpt_commit.c *lnum = i + c->lpt_first; c 165 fs/ubifs/lpt_commit.c if (c->ltab[i].tgc || c->ltab[i].cmt) c 167 fs/ubifs/lpt_commit.c if (c->ltab[i].free == c->leb_size) { c 168 fs/ubifs/lpt_commit.c c->ltab[i].cmt = 1; c 169 fs/ubifs/lpt_commit.c *lnum = i + c->lpt_first; c 182 fs/ubifs/lpt_commit.c static int layout_cnodes(struct ubifs_info *c) c 187 fs/ubifs/lpt_commit.c err = dbg_chk_lpt_sz(c, 0, 0); c 190 fs/ubifs/lpt_commit.c cnode = c->lpt_cnext; c 193 fs/ubifs/lpt_commit.c lnum = c->nhead_lnum; c 194 fs/ubifs/lpt_commit.c offs = c->nhead_offs; c 196 fs/ubifs/lpt_commit.c done_lsave = !c->big_lpt; c 198 fs/ubifs/lpt_commit.c if (!done_lsave && offs + c->lsave_sz <= c->leb_size) { c 200 fs/ubifs/lpt_commit.c c->lsave_lnum = lnum; c 201 fs/ubifs/lpt_commit.c c->lsave_offs = offs; c 202 fs/ubifs/lpt_commit.c offs += c->lsave_sz; c 203 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->lsave_sz); c 206 fs/ubifs/lpt_commit.c if (offs + c->ltab_sz <= c->leb_size) { c 208 fs/ubifs/lpt_commit.c c->ltab_lnum = lnum; c 209 fs/ubifs/lpt_commit.c c->ltab_offs = offs; c 210 fs/ubifs/lpt_commit.c offs += c->ltab_sz; c 211 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->ltab_sz); c 216 fs/ubifs/lpt_commit.c len = c->nnode_sz; c 217 fs/ubifs/lpt_commit.c c->dirty_nn_cnt -= 1; c 219 fs/ubifs/lpt_commit.c len = c->pnode_sz; c 220 fs/ubifs/lpt_commit.c c->dirty_pn_cnt -= 1; c 222 fs/ubifs/lpt_commit.c while (offs + len > c->leb_size) { c 223 fs/ubifs/lpt_commit.c alen = ALIGN(offs, c->min_io_size); c 224 fs/ubifs/lpt_commit.c upd_ltab(c, lnum, c->leb_size - alen, alen - offs); c 225 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 2, c->leb_size - offs); c 226 fs/ubifs/lpt_commit.c err = alloc_lpt_leb(c, &lnum); c 230 fs/ubifs/lpt_commit.c ubifs_assert(c, lnum >= c->lpt_first && c 231 fs/ubifs/lpt_commit.c lnum <= c->lpt_last); c 235 fs/ubifs/lpt_commit.c c->lsave_lnum = lnum; c 236 fs/ubifs/lpt_commit.c c->lsave_offs = offs; c 237 fs/ubifs/lpt_commit.c offs += c->lsave_sz; c 238 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->lsave_sz); c 243 fs/ubifs/lpt_commit.c c->ltab_lnum = lnum; c 244 fs/ubifs/lpt_commit.c c->ltab_offs = offs; c 245 fs/ubifs/lpt_commit.c offs += c->ltab_sz; c 246 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->ltab_sz); c 255 fs/ubifs/lpt_commit.c c->lpt_lnum = lnum; c 256 fs/ubifs/lpt_commit.c c->lpt_offs = offs; c 259 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, len); c 261 fs/ubifs/lpt_commit.c } while (cnode && cnode != c->lpt_cnext); c 265 fs/ubifs/lpt_commit.c if (offs + c->lsave_sz > c->leb_size) { c 266 fs/ubifs/lpt_commit.c alen = ALIGN(offs, c->min_io_size); c 267 fs/ubifs/lpt_commit.c upd_ltab(c, lnum, c->leb_size - alen, alen - offs); c 268 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 2, c->leb_size - offs); c 269 fs/ubifs/lpt_commit.c err = alloc_lpt_leb(c, &lnum); c 273 fs/ubifs/lpt_commit.c ubifs_assert(c, lnum >= c->lpt_first && c 274 fs/ubifs/lpt_commit.c lnum <= c->lpt_last); c 277 fs/ubifs/lpt_commit.c c->lsave_lnum = lnum; c 278 fs/ubifs/lpt_commit.c c->lsave_offs = offs; c 279 fs/ubifs/lpt_commit.c offs += c->lsave_sz; c 280 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->lsave_sz); c 285 fs/ubifs/lpt_commit.c if (offs + c->ltab_sz > c->leb_size) { c 286 fs/ubifs/lpt_commit.c alen = ALIGN(offs, c->min_io_size); c 287 fs/ubifs/lpt_commit.c upd_ltab(c, lnum, c->leb_size - alen, alen - offs); c 288 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 2, c->leb_size - offs); c 289 fs/ubifs/lpt_commit.c err = alloc_lpt_leb(c, &lnum); c 293 fs/ubifs/lpt_commit.c ubifs_assert(c, lnum >= c->lpt_first && c 294 fs/ubifs/lpt_commit.c lnum <= c->lpt_last); c 296 fs/ubifs/lpt_commit.c c->ltab_lnum = lnum; c 297 fs/ubifs/lpt_commit.c c->ltab_offs = offs; c 298 fs/ubifs/lpt_commit.c offs += c->ltab_sz; c 299 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->ltab_sz); c 302 fs/ubifs/lpt_commit.c alen = ALIGN(offs, c->min_io_size); c 303 fs/ubifs/lpt_commit.c upd_ltab(c, lnum, c->leb_size - alen, alen - offs); c 304 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 4, alen - offs); c 305 fs/ubifs/lpt_commit.c err = dbg_chk_lpt_sz(c, 3, alen); c 311 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT out of space at LEB %d:%d needing %d, done_ltab %d, done_lsave %d", c 313 fs/ubifs/lpt_commit.c ubifs_dump_lpt_info(c); c 314 fs/ubifs/lpt_commit.c ubifs_dump_lpt_lebs(c); c 333 fs/ubifs/lpt_commit.c static int realloc_lpt_leb(struct ubifs_info *c, int *lnum) c 337 fs/ubifs/lpt_commit.c n = *lnum - c->lpt_first + 1; c 338 fs/ubifs/lpt_commit.c for (i = n; i < c->lpt_lebs; i++) c 339 fs/ubifs/lpt_commit.c if (c->ltab[i].cmt) { c 340 fs/ubifs/lpt_commit.c c->ltab[i].cmt = 0; c 341 fs/ubifs/lpt_commit.c *lnum = i + c->lpt_first; c 346 fs/ubifs/lpt_commit.c if (c->ltab[i].cmt) { c 347 fs/ubifs/lpt_commit.c c->ltab[i].cmt = 0; c 348 fs/ubifs/lpt_commit.c *lnum = i + c->lpt_first; c 360 fs/ubifs/lpt_commit.c static int write_cnodes(struct ubifs_info *c) c 364 fs/ubifs/lpt_commit.c void *buf = c->lpt_buf; c 366 fs/ubifs/lpt_commit.c cnode = c->lpt_cnext; c 369 fs/ubifs/lpt_commit.c lnum = c->nhead_lnum; c 370 fs/ubifs/lpt_commit.c offs = c->nhead_offs; c 374 fs/ubifs/lpt_commit.c err = ubifs_leb_unmap(c, lnum); c 379 fs/ubifs/lpt_commit.c done_lsave = !c->big_lpt; c 381 fs/ubifs/lpt_commit.c if (!done_lsave && offs + c->lsave_sz <= c->leb_size) { c 383 fs/ubifs/lpt_commit.c ubifs_pack_lsave(c, buf + offs, c->lsave); c 384 fs/ubifs/lpt_commit.c offs += c->lsave_sz; c 385 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->lsave_sz); c 388 fs/ubifs/lpt_commit.c if (offs + c->ltab_sz <= c->leb_size) { c 390 fs/ubifs/lpt_commit.c ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); c 391 fs/ubifs/lpt_commit.c offs += c->ltab_sz; c 392 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->ltab_sz); c 398 fs/ubifs/lpt_commit.c len = c->nnode_sz; c 400 fs/ubifs/lpt_commit.c len = c->pnode_sz; c 401 fs/ubifs/lpt_commit.c while (offs + len > c->leb_size) { c 404 fs/ubifs/lpt_commit.c alen = ALIGN(wlen, c->min_io_size); c 406 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, c 411 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 2, c->leb_size - offs); c 412 fs/ubifs/lpt_commit.c err = realloc_lpt_leb(c, &lnum); c 416 fs/ubifs/lpt_commit.c ubifs_assert(c, lnum >= c->lpt_first && c 417 fs/ubifs/lpt_commit.c lnum <= c->lpt_last); c 418 fs/ubifs/lpt_commit.c err = ubifs_leb_unmap(c, lnum); c 424 fs/ubifs/lpt_commit.c ubifs_pack_lsave(c, buf + offs, c->lsave); c 425 fs/ubifs/lpt_commit.c offs += c->lsave_sz; c 426 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->lsave_sz); c 431 fs/ubifs/lpt_commit.c ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); c 432 fs/ubifs/lpt_commit.c offs += c->ltab_sz; c 433 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->ltab_sz); c 439 fs/ubifs/lpt_commit.c ubifs_pack_nnode(c, buf + offs, c 442 fs/ubifs/lpt_commit.c ubifs_pack_pnode(c, buf + offs, c 455 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, len); c 457 fs/ubifs/lpt_commit.c } while (cnode && cnode != c->lpt_cnext); c 461 fs/ubifs/lpt_commit.c if (offs + c->lsave_sz > c->leb_size) { c 463 fs/ubifs/lpt_commit.c alen = ALIGN(wlen, c->min_io_size); c 465 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, alen); c 468 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 2, c->leb_size - offs); c 469 fs/ubifs/lpt_commit.c err = realloc_lpt_leb(c, &lnum); c 473 fs/ubifs/lpt_commit.c ubifs_assert(c, lnum >= c->lpt_first && c 474 fs/ubifs/lpt_commit.c lnum <= c->lpt_last); c 475 fs/ubifs/lpt_commit.c err = ubifs_leb_unmap(c, lnum); c 480 fs/ubifs/lpt_commit.c ubifs_pack_lsave(c, buf + offs, c->lsave); c 481 fs/ubifs/lpt_commit.c offs += c->lsave_sz; c 482 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->lsave_sz); c 487 fs/ubifs/lpt_commit.c if (offs + c->ltab_sz > c->leb_size) { c 489 fs/ubifs/lpt_commit.c alen = ALIGN(wlen, c->min_io_size); c 491 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, alen); c 494 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 2, c->leb_size - offs); c 495 fs/ubifs/lpt_commit.c err = realloc_lpt_leb(c, &lnum); c 499 fs/ubifs/lpt_commit.c ubifs_assert(c, lnum >= c->lpt_first && c 500 fs/ubifs/lpt_commit.c lnum <= c->lpt_last); c 501 fs/ubifs/lpt_commit.c err = ubifs_leb_unmap(c, lnum); c 505 fs/ubifs/lpt_commit.c ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); c 506 fs/ubifs/lpt_commit.c offs += c->ltab_sz; c 507 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 1, c->ltab_sz); c 512 fs/ubifs/lpt_commit.c alen = ALIGN(wlen, c->min_io_size); c 514 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, alen); c 518 fs/ubifs/lpt_commit.c dbg_chk_lpt_sz(c, 4, alen - wlen); c 519 fs/ubifs/lpt_commit.c err = dbg_chk_lpt_sz(c, 3, ALIGN(offs, c->min_io_size)); c 523 fs/ubifs/lpt_commit.c c->nhead_lnum = lnum; c 524 fs/ubifs/lpt_commit.c c->nhead_offs = ALIGN(offs, c->min_io_size); c 526 fs/ubifs/lpt_commit.c dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs); c 527 fs/ubifs/lpt_commit.c dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs); c 528 fs/ubifs/lpt_commit.c dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs); c 529 fs/ubifs/lpt_commit.c if (c->big_lpt) c 530 fs/ubifs/lpt_commit.c dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs); c 535 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT out of space mismatch at LEB %d:%d needing %d, done_ltab %d, done_lsave %d", c 537 fs/ubifs/lpt_commit.c ubifs_dump_lpt_info(c); c 538 fs/ubifs/lpt_commit.c ubifs_dump_lpt_lebs(c); c 552 fs/ubifs/lpt_commit.c static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c, c 562 fs/ubifs/lpt_commit.c return ubifs_get_pnode(c, nnode, iip); c 578 fs/ubifs/lpt_commit.c nnode = ubifs_get_nnode(c, nnode, iip); c 595 fs/ubifs/lpt_commit.c nnode = ubifs_get_nnode(c, nnode, iip); c 606 fs/ubifs/lpt_commit.c return ubifs_get_pnode(c, nnode, iip); c 614 fs/ubifs/lpt_commit.c static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode) c 616 fs/ubifs/lpt_commit.c ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum, c 617 fs/ubifs/lpt_commit.c c->pnode_sz); c 625 fs/ubifs/lpt_commit.c static void do_make_pnode_dirty(struct ubifs_info *c, struct ubifs_pnode *pnode) c 631 fs/ubifs/lpt_commit.c c->dirty_pn_cnt += 1; c 632 fs/ubifs/lpt_commit.c add_pnode_dirt(c, pnode); c 637 fs/ubifs/lpt_commit.c c->dirty_nn_cnt += 1; c 638 fs/ubifs/lpt_commit.c ubifs_add_nnode_dirt(c, nnode); c 657 fs/ubifs/lpt_commit.c static int make_tree_dirty(struct ubifs_info *c) c 661 fs/ubifs/lpt_commit.c pnode = ubifs_pnode_lookup(c, 0); c 666 fs/ubifs/lpt_commit.c do_make_pnode_dirty(c, pnode); c 667 fs/ubifs/lpt_commit.c pnode = next_pnode_to_dirty(c, pnode); c 681 fs/ubifs/lpt_commit.c static int need_write_all(struct ubifs_info *c) c 686 fs/ubifs/lpt_commit.c for (i = 0; i < c->lpt_lebs; i++) { c 687 fs/ubifs/lpt_commit.c if (i + c->lpt_first == c->nhead_lnum) c 688 fs/ubifs/lpt_commit.c free += c->leb_size - c->nhead_offs; c 689 fs/ubifs/lpt_commit.c else if (c->ltab[i].free == c->leb_size) c 690 fs/ubifs/lpt_commit.c free += c->leb_size; c 691 fs/ubifs/lpt_commit.c else if (c->ltab[i].free + c->ltab[i].dirty == c->leb_size) c 692 fs/ubifs/lpt_commit.c free += c->leb_size; c 695 fs/ubifs/lpt_commit.c if (free <= c->lpt_sz * 2) c 708 fs/ubifs/lpt_commit.c static void lpt_tgc_start(struct ubifs_info *c) c 712 fs/ubifs/lpt_commit.c for (i = 0; i < c->lpt_lebs; i++) { c 713 fs/ubifs/lpt_commit.c if (i + c->lpt_first == c->nhead_lnum) c 715 fs/ubifs/lpt_commit.c if (c->ltab[i].dirty > 0 && c 716 fs/ubifs/lpt_commit.c c->ltab[i].free + c->ltab[i].dirty == c->leb_size) { c 717 fs/ubifs/lpt_commit.c c->ltab[i].tgc = 1; c 718 fs/ubifs/lpt_commit.c c->ltab[i].free = c->leb_size; c 719 fs/ubifs/lpt_commit.c c->ltab[i].dirty = 0; c 720 fs/ubifs/lpt_commit.c dbg_lp("LEB %d", i + c->lpt_first); c 734 fs/ubifs/lpt_commit.c static int lpt_tgc_end(struct ubifs_info *c) c 738 fs/ubifs/lpt_commit.c for (i = 0; i < c->lpt_lebs; i++) c 739 fs/ubifs/lpt_commit.c if (c->ltab[i].tgc) { c 740 fs/ubifs/lpt_commit.c err = ubifs_leb_unmap(c, i + c->lpt_first); c 743 fs/ubifs/lpt_commit.c c->ltab[i].tgc = 0; c 744 fs/ubifs/lpt_commit.c dbg_lp("LEB %d", i + c->lpt_first); c 761 fs/ubifs/lpt_commit.c static void populate_lsave(struct ubifs_info *c) c 767 fs/ubifs/lpt_commit.c ubifs_assert(c, c->big_lpt); c 768 fs/ubifs/lpt_commit.c if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) { c 769 fs/ubifs/lpt_commit.c c->lpt_drty_flgs |= LSAVE_DIRTY; c 770 fs/ubifs/lpt_commit.c ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz); c 773 fs/ubifs/lpt_commit.c if (dbg_populate_lsave(c)) c 776 fs/ubifs/lpt_commit.c list_for_each_entry(lprops, &c->empty_list, list) { c 777 fs/ubifs/lpt_commit.c c->lsave[cnt++] = lprops->lnum; c 778 fs/ubifs/lpt_commit.c if (cnt >= c->lsave_cnt) c 781 fs/ubifs/lpt_commit.c list_for_each_entry(lprops, &c->freeable_list, list) { c 782 fs/ubifs/lpt_commit.c c->lsave[cnt++] = lprops->lnum; c 783 fs/ubifs/lpt_commit.c if (cnt >= c->lsave_cnt) c 786 fs/ubifs/lpt_commit.c list_for_each_entry(lprops, &c->frdi_idx_list, list) { c 787 fs/ubifs/lpt_commit.c c->lsave[cnt++] = lprops->lnum; c 788 fs/ubifs/lpt_commit.c if (cnt >= c->lsave_cnt) c 791 fs/ubifs/lpt_commit.c heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; c 793 fs/ubifs/lpt_commit.c c->lsave[cnt++] = heap->arr[i]->lnum; c 794 fs/ubifs/lpt_commit.c if (cnt >= c->lsave_cnt) c 797 fs/ubifs/lpt_commit.c heap = &c->lpt_heap[LPROPS_DIRTY - 1]; c 799 fs/ubifs/lpt_commit.c c->lsave[cnt++] = heap->arr[i]->lnum; c 800 fs/ubifs/lpt_commit.c if (cnt >= c->lsave_cnt) c 803 fs/ubifs/lpt_commit.c heap = &c->lpt_heap[LPROPS_FREE - 1]; c 805 fs/ubifs/lpt_commit.c c->lsave[cnt++] = heap->arr[i]->lnum; c 806 fs/ubifs/lpt_commit.c if (cnt >= c->lsave_cnt) c 810 fs/ubifs/lpt_commit.c while (cnt < c->lsave_cnt) c 811 fs/ubifs/lpt_commit.c c->lsave[cnt++] = c->main_first; c 822 fs/ubifs/lpt_commit.c static struct ubifs_nnode *nnode_lookup(struct ubifs_info *c, int i) c 827 fs/ubifs/lpt_commit.c if (!c->nroot) { c 828 fs/ubifs/lpt_commit.c err = ubifs_read_nnode(c, NULL, 0); c 832 fs/ubifs/lpt_commit.c nnode = c->nroot; c 838 fs/ubifs/lpt_commit.c nnode = ubifs_get_nnode(c, nnode, iip); c 860 fs/ubifs/lpt_commit.c static int make_nnode_dirty(struct ubifs_info *c, int node_num, int lnum, c 865 fs/ubifs/lpt_commit.c nnode = nnode_lookup(c, node_num); c 874 fs/ubifs/lpt_commit.c } else if (c->lpt_lnum != lnum || c->lpt_offs != offs) c 878 fs/ubifs/lpt_commit.c c->dirty_nn_cnt += 1; c 879 fs/ubifs/lpt_commit.c ubifs_add_nnode_dirt(c, nnode); c 884 fs/ubifs/lpt_commit.c c->dirty_nn_cnt += 1; c 885 fs/ubifs/lpt_commit.c ubifs_add_nnode_dirt(c, nnode); c 909 fs/ubifs/lpt_commit.c static int make_pnode_dirty(struct ubifs_info *c, int node_num, int lnum, c 915 fs/ubifs/lpt_commit.c pnode = ubifs_pnode_lookup(c, node_num); c 921 fs/ubifs/lpt_commit.c do_make_pnode_dirty(c, pnode); c 939 fs/ubifs/lpt_commit.c static int make_ltab_dirty(struct ubifs_info *c, int lnum, int offs) c 941 fs/ubifs/lpt_commit.c if (lnum != c->ltab_lnum || offs != c->ltab_offs) c 943 fs/ubifs/lpt_commit.c if (!(c->lpt_drty_flgs & LTAB_DIRTY)) { c 944 fs/ubifs/lpt_commit.c c->lpt_drty_flgs |= LTAB_DIRTY; c 945 fs/ubifs/lpt_commit.c ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz); c 964 fs/ubifs/lpt_commit.c static int make_lsave_dirty(struct ubifs_info *c, int lnum, int offs) c 966 fs/ubifs/lpt_commit.c if (lnum != c->lsave_lnum || offs != c->lsave_offs) c 968 fs/ubifs/lpt_commit.c if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) { c 969 fs/ubifs/lpt_commit.c c->lpt_drty_flgs |= LSAVE_DIRTY; c 970 fs/ubifs/lpt_commit.c ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz); c 991 fs/ubifs/lpt_commit.c static int make_node_dirty(struct ubifs_info *c, int node_type, int node_num, c 996 fs/ubifs/lpt_commit.c return make_nnode_dirty(c, node_num, lnum, offs); c 998 fs/ubifs/lpt_commit.c return make_pnode_dirty(c, node_num, lnum, offs); c 1000 fs/ubifs/lpt_commit.c return make_ltab_dirty(c, lnum, offs); c 1002 fs/ubifs/lpt_commit.c return make_lsave_dirty(c, lnum, offs); c 1012 fs/ubifs/lpt_commit.c static int get_lpt_node_len(const struct ubifs_info *c, int node_type) c 1016 fs/ubifs/lpt_commit.c return c->nnode_sz; c 1018 fs/ubifs/lpt_commit.c return c->pnode_sz; c 1020 fs/ubifs/lpt_commit.c return c->ltab_sz; c 1022 fs/ubifs/lpt_commit.c return c->lsave_sz; c 1033 fs/ubifs/lpt_commit.c static int get_pad_len(const struct ubifs_info *c, uint8_t *buf, int len) c 1037 fs/ubifs/lpt_commit.c if (c->min_io_size == 1) c 1039 fs/ubifs/lpt_commit.c offs = c->leb_size - len; c 1040 fs/ubifs/lpt_commit.c pad_len = ALIGN(offs, c->min_io_size) - offs; c 1050 fs/ubifs/lpt_commit.c static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf, c 1056 fs/ubifs/lpt_commit.c node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS); c 1057 fs/ubifs/lpt_commit.c *node_num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits); c 1069 fs/ubifs/lpt_commit.c static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len) c 1077 fs/ubifs/lpt_commit.c node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS); c 1080 fs/ubifs/lpt_commit.c node_len = get_lpt_node_len(c, node_type); c 1085 fs/ubifs/lpt_commit.c crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS); c 1105 fs/ubifs/lpt_commit.c static int lpt_gc_lnum(struct ubifs_info *c, int lnum) c 1107 fs/ubifs/lpt_commit.c int err, len = c->leb_size, node_type, node_num, node_len, offs; c 1108 fs/ubifs/lpt_commit.c void *buf = c->lpt_buf; c 1112 fs/ubifs/lpt_commit.c err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); c 1117 fs/ubifs/lpt_commit.c if (!is_a_node(c, buf, len)) { c 1120 fs/ubifs/lpt_commit.c pad_len = get_pad_len(c, buf, len); c 1128 fs/ubifs/lpt_commit.c node_type = get_lpt_node_type(c, buf, &node_num); c 1129 fs/ubifs/lpt_commit.c node_len = get_lpt_node_len(c, node_type); c 1130 fs/ubifs/lpt_commit.c offs = c->leb_size - len; c 1131 fs/ubifs/lpt_commit.c ubifs_assert(c, node_len != 0); c 1132 fs/ubifs/lpt_commit.c mutex_lock(&c->lp_mutex); c 1133 fs/ubifs/lpt_commit.c err = make_node_dirty(c, node_type, node_num, lnum, offs); c 1134 fs/ubifs/lpt_commit.c mutex_unlock(&c->lp_mutex); c 1150 fs/ubifs/lpt_commit.c static int lpt_gc(struct ubifs_info *c) c 1154 fs/ubifs/lpt_commit.c mutex_lock(&c->lp_mutex); c 1155 fs/ubifs/lpt_commit.c for (i = 0; i < c->lpt_lebs; i++) { c 1156 fs/ubifs/lpt_commit.c ubifs_assert(c, !c->ltab[i].tgc); c 1157 fs/ubifs/lpt_commit.c if (i + c->lpt_first == c->nhead_lnum || c 1158 fs/ubifs/lpt_commit.c c->ltab[i].free + c->ltab[i].dirty == c->leb_size) c 1160 fs/ubifs/lpt_commit.c if (c->ltab[i].dirty > dirty) { c 1161 fs/ubifs/lpt_commit.c dirty = c->ltab[i].dirty; c 1162 fs/ubifs/lpt_commit.c lnum = i + c->lpt_first; c 1165 fs/ubifs/lpt_commit.c mutex_unlock(&c->lp_mutex); c 1168 fs/ubifs/lpt_commit.c return lpt_gc_lnum(c, lnum); c 1181 fs/ubifs/lpt_commit.c int ubifs_lpt_start_commit(struct ubifs_info *c) c 1187 fs/ubifs/lpt_commit.c mutex_lock(&c->lp_mutex); c 1188 fs/ubifs/lpt_commit.c err = dbg_chk_lpt_free_spc(c); c 1191 fs/ubifs/lpt_commit.c err = dbg_check_ltab(c); c 1195 fs/ubifs/lpt_commit.c if (c->check_lpt_free) { c 1202 fs/ubifs/lpt_commit.c c->check_lpt_free = 0; c 1203 fs/ubifs/lpt_commit.c while (need_write_all(c)) { c 1204 fs/ubifs/lpt_commit.c mutex_unlock(&c->lp_mutex); c 1205 fs/ubifs/lpt_commit.c err = lpt_gc(c); c 1208 fs/ubifs/lpt_commit.c mutex_lock(&c->lp_mutex); c 1212 fs/ubifs/lpt_commit.c lpt_tgc_start(c); c 1214 fs/ubifs/lpt_commit.c if (!c->dirty_pn_cnt) { c 1220 fs/ubifs/lpt_commit.c if (!c->big_lpt && need_write_all(c)) { c 1222 fs/ubifs/lpt_commit.c err = make_tree_dirty(c); c 1225 fs/ubifs/lpt_commit.c lpt_tgc_start(c); c 1228 fs/ubifs/lpt_commit.c if (c->big_lpt) c 1229 fs/ubifs/lpt_commit.c populate_lsave(c); c 1231 fs/ubifs/lpt_commit.c cnt = get_cnodes_to_commit(c); c 1232 fs/ubifs/lpt_commit.c ubifs_assert(c, cnt != 0); c 1234 fs/ubifs/lpt_commit.c err = layout_cnodes(c); c 1238 fs/ubifs/lpt_commit.c err = ubifs_lpt_calc_hash(c, c->mst_node->hash_lpt); c 1243 fs/ubifs/lpt_commit.c memcpy(c->ltab_cmt, c->ltab, c 1244 fs/ubifs/lpt_commit.c sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs); c 1245 fs/ubifs/lpt_commit.c c->lpt_drty_flgs &= ~(LTAB_DIRTY | LSAVE_DIRTY); c 1248 fs/ubifs/lpt_commit.c mutex_unlock(&c->lp_mutex); c 1256 fs/ubifs/lpt_commit.c static void free_obsolete_cnodes(struct ubifs_info *c) c 1260 fs/ubifs/lpt_commit.c cnext = c->lpt_cnext; c 1270 fs/ubifs/lpt_commit.c } while (cnext != c->lpt_cnext); c 1271 fs/ubifs/lpt_commit.c c->lpt_cnext = NULL; c 1283 fs/ubifs/lpt_commit.c int ubifs_lpt_end_commit(struct ubifs_info *c) c 1289 fs/ubifs/lpt_commit.c if (!c->lpt_cnext) c 1292 fs/ubifs/lpt_commit.c err = write_cnodes(c); c 1296 fs/ubifs/lpt_commit.c mutex_lock(&c->lp_mutex); c 1297 fs/ubifs/lpt_commit.c free_obsolete_cnodes(c); c 1298 fs/ubifs/lpt_commit.c mutex_unlock(&c->lp_mutex); c 1310 fs/ubifs/lpt_commit.c int ubifs_lpt_post_commit(struct ubifs_info *c) c 1314 fs/ubifs/lpt_commit.c mutex_lock(&c->lp_mutex); c 1315 fs/ubifs/lpt_commit.c err = lpt_tgc_end(c); c 1318 fs/ubifs/lpt_commit.c if (c->big_lpt) c 1319 fs/ubifs/lpt_commit.c while (need_write_all(c)) { c 1320 fs/ubifs/lpt_commit.c mutex_unlock(&c->lp_mutex); c 1321 fs/ubifs/lpt_commit.c err = lpt_gc(c); c 1324 fs/ubifs/lpt_commit.c mutex_lock(&c->lp_mutex); c 1327 fs/ubifs/lpt_commit.c mutex_unlock(&c->lp_mutex); c 1339 fs/ubifs/lpt_commit.c static struct ubifs_nnode *first_nnode(struct ubifs_info *c, int *hght) c 1344 fs/ubifs/lpt_commit.c nnode = c->nroot; c 1348 fs/ubifs/lpt_commit.c for (h = 1; h < c->lpt_hght; h++) { c 1373 fs/ubifs/lpt_commit.c static struct ubifs_nnode *next_nnode(struct ubifs_info *c, c 1395 fs/ubifs/lpt_commit.c for (h = *hght + 1; h < c->lpt_hght; h++) { c 1416 fs/ubifs/lpt_commit.c void ubifs_lpt_free(struct ubifs_info *c, int wr_only) c 1423 fs/ubifs/lpt_commit.c free_obsolete_cnodes(c); /* Leftover from a failed commit */ c 1425 fs/ubifs/lpt_commit.c vfree(c->ltab_cmt); c 1426 fs/ubifs/lpt_commit.c c->ltab_cmt = NULL; c 1427 fs/ubifs/lpt_commit.c vfree(c->lpt_buf); c 1428 fs/ubifs/lpt_commit.c c->lpt_buf = NULL; c 1429 fs/ubifs/lpt_commit.c kfree(c->lsave); c 1430 fs/ubifs/lpt_commit.c c->lsave = NULL; c 1437 fs/ubifs/lpt_commit.c nnode = first_nnode(c, &hght); c 1441 fs/ubifs/lpt_commit.c nnode = next_nnode(c, nnode, &hght); c 1444 fs/ubifs/lpt_commit.c kfree(c->lpt_heap[i].arr); c 1445 fs/ubifs/lpt_commit.c kfree(c->dirty_idx.arr); c 1446 fs/ubifs/lpt_commit.c kfree(c->nroot); c 1447 fs/ubifs/lpt_commit.c vfree(c->ltab); c 1448 fs/ubifs/lpt_commit.c kfree(c->lpt_nod_buf); c 1476 fs/ubifs/lpt_commit.c static int dbg_is_nnode_dirty(struct ubifs_info *c, int lnum, int offs) c 1482 fs/ubifs/lpt_commit.c nnode = first_nnode(c, &hght); c 1483 fs/ubifs/lpt_commit.c for (; nnode; nnode = next_nnode(c, nnode, &hght)) { c 1495 fs/ubifs/lpt_commit.c if (c->lpt_lnum != lnum || c->lpt_offs != offs) c 1511 fs/ubifs/lpt_commit.c static int dbg_is_pnode_dirty(struct ubifs_info *c, int lnum, int offs) c 1515 fs/ubifs/lpt_commit.c cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT); c 1521 fs/ubifs/lpt_commit.c pnode = ubifs_pnode_lookup(c, i); c 1540 fs/ubifs/lpt_commit.c static int dbg_is_ltab_dirty(struct ubifs_info *c, int lnum, int offs) c 1542 fs/ubifs/lpt_commit.c if (lnum != c->ltab_lnum || offs != c->ltab_offs) c 1544 fs/ubifs/lpt_commit.c return (c->lpt_drty_flgs & LTAB_DIRTY) != 0; c 1553 fs/ubifs/lpt_commit.c static int dbg_is_lsave_dirty(struct ubifs_info *c, int lnum, int offs) c 1555 fs/ubifs/lpt_commit.c if (lnum != c->lsave_lnum || offs != c->lsave_offs) c 1557 fs/ubifs/lpt_commit.c return (c->lpt_drty_flgs & LSAVE_DIRTY) != 0; c 1567 fs/ubifs/lpt_commit.c static int dbg_is_node_dirty(struct ubifs_info *c, int node_type, int lnum, c 1572 fs/ubifs/lpt_commit.c return dbg_is_nnode_dirty(c, lnum, offs); c 1574 fs/ubifs/lpt_commit.c return dbg_is_pnode_dirty(c, lnum, offs); c 1576 fs/ubifs/lpt_commit.c return dbg_is_ltab_dirty(c, lnum, offs); c 1578 fs/ubifs/lpt_commit.c return dbg_is_lsave_dirty(c, lnum, offs); c 1590 fs/ubifs/lpt_commit.c static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) c 1592 fs/ubifs/lpt_commit.c int err, len = c->leb_size, dirty = 0, node_type, node_num, node_len; c 1596 fs/ubifs/lpt_commit.c if (!dbg_is_chk_lprops(c)) c 1599 fs/ubifs/lpt_commit.c buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); c 1601 fs/ubifs/lpt_commit.c ubifs_err(c, "cannot allocate memory for ltab checking"); c 1607 fs/ubifs/lpt_commit.c err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); c 1612 fs/ubifs/lpt_commit.c if (!is_a_node(c, p, len)) { c 1615 fs/ubifs/lpt_commit.c pad_len = get_pad_len(c, p, len); c 1623 fs/ubifs/lpt_commit.c ubifs_err(c, "invalid empty space in LEB %d at %d", c 1624 fs/ubifs/lpt_commit.c lnum, c->leb_size - len); c 1627 fs/ubifs/lpt_commit.c i = lnum - c->lpt_first; c 1628 fs/ubifs/lpt_commit.c if (len != c->ltab[i].free) { c 1629 fs/ubifs/lpt_commit.c ubifs_err(c, "invalid free space in LEB %d (free %d, expected %d)", c 1630 fs/ubifs/lpt_commit.c lnum, len, c->ltab[i].free); c 1633 fs/ubifs/lpt_commit.c if (dirty != c->ltab[i].dirty) { c 1634 fs/ubifs/lpt_commit.c ubifs_err(c, "invalid dirty space in LEB %d (dirty %d, expected %d)", c 1635 fs/ubifs/lpt_commit.c lnum, dirty, c->ltab[i].dirty); c 1640 fs/ubifs/lpt_commit.c node_type = get_lpt_node_type(c, p, &node_num); c 1641 fs/ubifs/lpt_commit.c node_len = get_lpt_node_len(c, node_type); c 1642 fs/ubifs/lpt_commit.c ret = dbg_is_node_dirty(c, node_type, lnum, c->leb_size - len); c 1661 fs/ubifs/lpt_commit.c int dbg_check_ltab(struct ubifs_info *c) c 1665 fs/ubifs/lpt_commit.c if (!dbg_is_chk_lprops(c)) c 1669 fs/ubifs/lpt_commit.c cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT); c 1673 fs/ubifs/lpt_commit.c pnode = ubifs_pnode_lookup(c, i); c 1680 fs/ubifs/lpt_commit.c err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *)c->nroot, 0, 0); c 1685 fs/ubifs/lpt_commit.c for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) { c 1686 fs/ubifs/lpt_commit.c err = dbg_check_ltab_lnum(c, lnum); c 1688 fs/ubifs/lpt_commit.c ubifs_err(c, "failed at LEB %d", lnum); c 1703 fs/ubifs/lpt_commit.c int dbg_chk_lpt_free_spc(struct ubifs_info *c) c 1708 fs/ubifs/lpt_commit.c if (!dbg_is_chk_lprops(c)) c 1711 fs/ubifs/lpt_commit.c for (i = 0; i < c->lpt_lebs; i++) { c 1712 fs/ubifs/lpt_commit.c if (c->ltab[i].tgc || c->ltab[i].cmt) c 1714 fs/ubifs/lpt_commit.c if (i + c->lpt_first == c->nhead_lnum) c 1715 fs/ubifs/lpt_commit.c free += c->leb_size - c->nhead_offs; c 1716 fs/ubifs/lpt_commit.c else if (c->ltab[i].free == c->leb_size) c 1717 fs/ubifs/lpt_commit.c free += c->leb_size; c 1719 fs/ubifs/lpt_commit.c if (free < c->lpt_sz) { c 1720 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT space error: free %lld lpt_sz %lld", c 1721 fs/ubifs/lpt_commit.c free, c->lpt_sz); c 1722 fs/ubifs/lpt_commit.c ubifs_dump_lpt_info(c); c 1723 fs/ubifs/lpt_commit.c ubifs_dump_lpt_lebs(c); c 1744 fs/ubifs/lpt_commit.c int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) c 1746 fs/ubifs/lpt_commit.c struct ubifs_debug_info *d = c->dbg; c 1750 fs/ubifs/lpt_commit.c if (!dbg_is_chk_lprops(c)) c 1759 fs/ubifs/lpt_commit.c if (c->dirty_pn_cnt > c->pnode_cnt) { c 1760 fs/ubifs/lpt_commit.c ubifs_err(c, "dirty pnodes %d exceed max %d", c 1761 fs/ubifs/lpt_commit.c c->dirty_pn_cnt, c->pnode_cnt); c 1764 fs/ubifs/lpt_commit.c if (c->dirty_nn_cnt > c->nnode_cnt) { c 1765 fs/ubifs/lpt_commit.c ubifs_err(c, "dirty nnodes %d exceed max %d", c 1766 fs/ubifs/lpt_commit.c c->dirty_nn_cnt, c->nnode_cnt); c 1779 fs/ubifs/lpt_commit.c chk_lpt_sz = c->leb_size; c 1781 fs/ubifs/lpt_commit.c chk_lpt_sz += len - c->nhead_offs; c 1783 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT wrote %lld but space used was %lld", c 1787 fs/ubifs/lpt_commit.c if (d->chk_lpt_sz > c->lpt_sz) { c 1788 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT wrote %lld but lpt_sz is %lld", c 1789 fs/ubifs/lpt_commit.c d->chk_lpt_sz, c->lpt_sz); c 1793 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT layout size %lld but wrote %lld", c 1798 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT new nhead offs: expected %d was %d", c 1802 fs/ubifs/lpt_commit.c lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; c 1803 fs/ubifs/lpt_commit.c lpt_sz += (long long)c->nnode_cnt * c->nnode_sz; c 1804 fs/ubifs/lpt_commit.c lpt_sz += c->ltab_sz; c 1805 fs/ubifs/lpt_commit.c if (c->big_lpt) c 1806 fs/ubifs/lpt_commit.c lpt_sz += c->lsave_sz; c 1808 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT chk_lpt_sz %lld + waste %lld exceeds %lld", c 1813 fs/ubifs/lpt_commit.c ubifs_dump_lpt_info(c); c 1814 fs/ubifs/lpt_commit.c ubifs_dump_lpt_lebs(c); c 1842 fs/ubifs/lpt_commit.c static void dump_lpt_leb(const struct ubifs_info *c, int lnum) c 1844 fs/ubifs/lpt_commit.c int err, len = c->leb_size, node_type, node_num, node_len, offs; c 1848 fs/ubifs/lpt_commit.c buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); c 1850 fs/ubifs/lpt_commit.c ubifs_err(c, "cannot allocate memory to dump LPT"); c 1854 fs/ubifs/lpt_commit.c err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); c 1859 fs/ubifs/lpt_commit.c offs = c->leb_size - len; c 1860 fs/ubifs/lpt_commit.c if (!is_a_node(c, p, len)) { c 1863 fs/ubifs/lpt_commit.c pad_len = get_pad_len(c, p, len); c 1877 fs/ubifs/lpt_commit.c node_type = get_lpt_node_type(c, p, &node_num); c 1881 fs/ubifs/lpt_commit.c node_len = c->pnode_sz; c 1882 fs/ubifs/lpt_commit.c if (c->big_lpt) c 1894 fs/ubifs/lpt_commit.c node_len = c->nnode_sz; c 1895 fs/ubifs/lpt_commit.c if (c->big_lpt) c 1901 fs/ubifs/lpt_commit.c err = ubifs_unpack_nnode(c, p, &nnode); c 1917 fs/ubifs/lpt_commit.c node_len = c->ltab_sz; c 1921 fs/ubifs/lpt_commit.c node_len = c->lsave_sz; c 1925 fs/ubifs/lpt_commit.c ubifs_err(c, "LPT node type %d not recognized", node_type); c 1946 fs/ubifs/lpt_commit.c void ubifs_dump_lpt_lebs(const struct ubifs_info *c) c 1951 fs/ubifs/lpt_commit.c for (i = 0; i < c->lpt_lebs; i++) c 1952 fs/ubifs/lpt_commit.c dump_lpt_leb(c, i + c->lpt_first); c 1965 fs/ubifs/lpt_commit.c static int dbg_populate_lsave(struct ubifs_info *c) c 1971 fs/ubifs/lpt_commit.c if (!dbg_is_chk_gen(c)) c 1976 fs/ubifs/lpt_commit.c for (i = 0; i < c->lsave_cnt; i++) c 1977 fs/ubifs/lpt_commit.c c->lsave[i] = c->main_first; c 1979 fs/ubifs/lpt_commit.c list_for_each_entry(lprops, &c->empty_list, list) c 1980 fs/ubifs/lpt_commit.c c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; c 1981 fs/ubifs/lpt_commit.c list_for_each_entry(lprops, &c->freeable_list, list) c 1982 fs/ubifs/lpt_commit.c c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; c 1983 fs/ubifs/lpt_commit.c list_for_each_entry(lprops, &c->frdi_idx_list, list) c 1984 fs/ubifs/lpt_commit.c c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum; c 1986 fs/ubifs/lpt_commit.c heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; c 1988 fs/ubifs/lpt_commit.c c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; c 1989 fs/ubifs/lpt_commit.c heap = &c->lpt_heap[LPROPS_DIRTY - 1]; c 1991 fs/ubifs/lpt_commit.c c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; c 1992 fs/ubifs/lpt_commit.c heap = &c->lpt_heap[LPROPS_FREE - 1]; c 1994 fs/ubifs/lpt_commit.c c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum; c 24 fs/ubifs/master.c int ubifs_compare_master_node(struct ubifs_info *c, void *m1, void *m2) c 64 fs/ubifs/master.c static int mst_node_check_hash(const struct ubifs_info *c, c 71 fs/ubifs/master.c SHASH_DESC_ON_STACK(shash, c->hash_tfm); c 73 fs/ubifs/master.c shash->tfm = c->hash_tfm; c 78 fs/ubifs/master.c if (ubifs_check_hash(c, expected, calc)) c 93 fs/ubifs/master.c static int scan_for_master(struct ubifs_info *c) c 101 fs/ubifs/master.c sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); c 110 fs/ubifs/master.c memcpy(c->mst_node, snod->node, snod->len); c 117 fs/ubifs/master.c sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); c 129 fs/ubifs/master.c if (ubifs_compare_master_node(c, c->mst_node, snod->node)) c 132 fs/ubifs/master.c c->mst_offs = offs; c 135 fs/ubifs/master.c if (!ubifs_authenticated(c)) c 138 fs/ubifs/master.c if (ubifs_hmac_zero(c, c->mst_node->hmac)) { c 139 fs/ubifs/master.c err = mst_node_check_hash(c, c->mst_node, c 140 fs/ubifs/master.c c->sup_node->hash_mst); c 142 fs/ubifs/master.c ubifs_err(c, "Failed to verify master node hash"); c 144 fs/ubifs/master.c err = ubifs_node_verify_hmac(c, c->mst_node, c 148 fs/ubifs/master.c ubifs_err(c, "Failed to verify master node HMAC"); c 161 fs/ubifs/master.c ubifs_err(c, "unexpected node type %d master LEB %d:%d", c 174 fs/ubifs/master.c static int validate_master(const struct ubifs_info *c) c 179 fs/ubifs/master.c if (c->max_sqnum >= SQNUM_WATERMARK) { c 184 fs/ubifs/master.c if (c->cmt_no >= c->max_sqnum) { c 189 fs/ubifs/master.c if (c->highest_inum >= INUM_WATERMARK) { c 194 fs/ubifs/master.c if (c->lhead_lnum < UBIFS_LOG_LNUM || c 195 fs/ubifs/master.c c->lhead_lnum >= UBIFS_LOG_LNUM + c->log_lebs || c 196 fs/ubifs/master.c c->lhead_offs < 0 || c->lhead_offs >= c->leb_size || c 197 fs/ubifs/master.c c->lhead_offs & (c->min_io_size - 1)) { c 202 fs/ubifs/master.c if (c->zroot.lnum >= c->leb_cnt || c->zroot.lnum < c->main_first || c 203 fs/ubifs/master.c c->zroot.offs >= c->leb_size || c->zroot.offs & 7) { c 208 fs/ubifs/master.c if (c->zroot.len < c->ranges[UBIFS_IDX_NODE].min_len || c 209 fs/ubifs/master.c c->zroot.len > c->ranges[UBIFS_IDX_NODE].max_len) { c 214 fs/ubifs/master.c if (c->gc_lnum >= c->leb_cnt || c->gc_lnum < c->main_first) { c 219 fs/ubifs/master.c if (c->ihead_lnum >= c->leb_cnt || c->ihead_lnum < c->main_first || c 220 fs/ubifs/master.c c->ihead_offs % c->min_io_size || c->ihead_offs < 0 || c 221 fs/ubifs/master.c c->ihead_offs > c->leb_size || c->ihead_offs & 7) { c 226 fs/ubifs/master.c main_sz = (long long)c->main_lebs * c->leb_size; c 227 fs/ubifs/master.c if (c->bi.old_idx_sz & 7 || c->bi.old_idx_sz >= main_sz) { c 232 fs/ubifs/master.c if (c->lpt_lnum < c->lpt_first || c->lpt_lnum > c->lpt_last || c 233 fs/ubifs/master.c c->lpt_offs < 0 || c->lpt_offs + c->nnode_sz > c->leb_size) { c 238 fs/ubifs/master.c if (c->nhead_lnum < c->lpt_first || c->nhead_lnum > c->lpt_last || c 239 fs/ubifs/master.c c->nhead_offs < 0 || c->nhead_offs % c->min_io_size || c 240 fs/ubifs/master.c c->nhead_offs > c->leb_size) { c 245 fs/ubifs/master.c if (c->ltab_lnum < c->lpt_first || c->ltab_lnum > c->lpt_last || c 246 fs/ubifs/master.c c->ltab_offs < 0 || c 247 fs/ubifs/master.c c->ltab_offs + c->ltab_sz > c->leb_size) { c 252 fs/ubifs/master.c if (c->big_lpt && (c->lsave_lnum < c->lpt_first || c 253 fs/ubifs/master.c c->lsave_lnum > c->lpt_last || c->lsave_offs < 0 || c 254 fs/ubifs/master.c c->lsave_offs + c->lsave_sz > c->leb_size)) { c 259 fs/ubifs/master.c if (c->lscan_lnum < c->main_first || c->lscan_lnum >= c->leb_cnt) { c 264 fs/ubifs/master.c if (c->lst.empty_lebs < 0 || c->lst.empty_lebs > c->main_lebs - 2) { c 269 fs/ubifs/master.c if (c->lst.idx_lebs < 0 || c->lst.idx_lebs > c->main_lebs - 1) { c 274 fs/ubifs/master.c if (c->lst.total_free < 0 || c->lst.total_free > main_sz || c 275 fs/ubifs/master.c c->lst.total_free & 7) { c 280 fs/ubifs/master.c if (c->lst.total_dirty < 0 || (c->lst.total_dirty & 7)) { c 285 fs/ubifs/master.c if (c->lst.total_used < 0 || (c->lst.total_used & 7)) { c 290 fs/ubifs/master.c if (c->lst.total_free + c->lst.total_dirty + c 291 fs/ubifs/master.c c->lst.total_used > main_sz) { c 296 fs/ubifs/master.c if (c->lst.total_dead + c->lst.total_dark + c 297 fs/ubifs/master.c c->lst.total_used + c->bi.old_idx_sz > main_sz) { c 302 fs/ubifs/master.c if (c->lst.total_dead < 0 || c 303 fs/ubifs/master.c c->lst.total_dead > c->lst.total_free + c->lst.total_dirty || c 304 fs/ubifs/master.c c->lst.total_dead & 7) { c 309 fs/ubifs/master.c if (c->lst.total_dark < 0 || c 310 fs/ubifs/master.c c->lst.total_dark > c->lst.total_free + c->lst.total_dirty || c 311 fs/ubifs/master.c c->lst.total_dark & 7) { c 319 fs/ubifs/master.c ubifs_err(c, "bad master node at offset %d error %d", c->mst_offs, err); c 320 fs/ubifs/master.c ubifs_dump_node(c, c->mst_node); c 332 fs/ubifs/master.c int ubifs_read_master(struct ubifs_info *c) c 336 fs/ubifs/master.c c->mst_node = kzalloc(c->mst_node_alsz, GFP_KERNEL); c 337 fs/ubifs/master.c if (!c->mst_node) c 340 fs/ubifs/master.c err = scan_for_master(c); c 343 fs/ubifs/master.c err = ubifs_recover_master_node(c); c 353 fs/ubifs/master.c c->mst_node->flags &= cpu_to_le32(~UBIFS_MST_RCVRY); c 355 fs/ubifs/master.c c->max_sqnum = le64_to_cpu(c->mst_node->ch.sqnum); c 356 fs/ubifs/master.c c->highest_inum = le64_to_cpu(c->mst_node->highest_inum); c 357 fs/ubifs/master.c c->cmt_no = le64_to_cpu(c->mst_node->cmt_no); c 358 fs/ubifs/master.c c->zroot.lnum = le32_to_cpu(c->mst_node->root_lnum); c 359 fs/ubifs/master.c c->zroot.offs = le32_to_cpu(c->mst_node->root_offs); c 360 fs/ubifs/master.c c->zroot.len = le32_to_cpu(c->mst_node->root_len); c 361 fs/ubifs/master.c c->lhead_lnum = le32_to_cpu(c->mst_node->log_lnum); c 362 fs/ubifs/master.c c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum); c 363 fs/ubifs/master.c c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum); c 364 fs/ubifs/master.c c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs); c 365 fs/ubifs/master.c c->bi.old_idx_sz = le64_to_cpu(c->mst_node->index_size); c 366 fs/ubifs/master.c c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum); c 367 fs/ubifs/master.c c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs); c 368 fs/ubifs/master.c c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum); c 369 fs/ubifs/master.c c->nhead_offs = le32_to_cpu(c->mst_node->nhead_offs); c 370 fs/ubifs/master.c c->ltab_lnum = le32_to_cpu(c->mst_node->ltab_lnum); c 371 fs/ubifs/master.c c->ltab_offs = le32_to_cpu(c->mst_node->ltab_offs); c 372 fs/ubifs/master.c c->lsave_lnum = le32_to_cpu(c->mst_node->lsave_lnum); c 373 fs/ubifs/master.c c->lsave_offs = le32_to_cpu(c->mst_node->lsave_offs); c 374 fs/ubifs/master.c c->lscan_lnum = le32_to_cpu(c->mst_node->lscan_lnum); c 375 fs/ubifs/master.c c->lst.empty_lebs = le32_to_cpu(c->mst_node->empty_lebs); c 376 fs/ubifs/master.c c->lst.idx_lebs = le32_to_cpu(c->mst_node->idx_lebs); c 377 fs/ubifs/master.c old_leb_cnt = le32_to_cpu(c->mst_node->leb_cnt); c 378 fs/ubifs/master.c c->lst.total_free = le64_to_cpu(c->mst_node->total_free); c 379 fs/ubifs/master.c c->lst.total_dirty = le64_to_cpu(c->mst_node->total_dirty); c 380 fs/ubifs/master.c c->lst.total_used = le64_to_cpu(c->mst_node->total_used); c 381 fs/ubifs/master.c c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead); c 382 fs/ubifs/master.c c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark); c 384 fs/ubifs/master.c ubifs_copy_hash(c, c->mst_node->hash_root_idx, c->zroot.hash); c 386 fs/ubifs/master.c c->calc_idx_sz = c->bi.old_idx_sz; c 388 fs/ubifs/master.c if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS)) c 389 fs/ubifs/master.c c->no_orphs = 1; c 391 fs/ubifs/master.c if (old_leb_cnt != c->leb_cnt) { c 393 fs/ubifs/master.c int growth = c->leb_cnt - old_leb_cnt; c 395 fs/ubifs/master.c if (c->leb_cnt < old_leb_cnt || c 396 fs/ubifs/master.c c->leb_cnt < UBIFS_MIN_LEB_CNT) { c 397 fs/ubifs/master.c ubifs_err(c, "bad leb_cnt on master node"); c 398 fs/ubifs/master.c ubifs_dump_node(c, c->mst_node); c 403 fs/ubifs/master.c old_leb_cnt, c->leb_cnt); c 404 fs/ubifs/master.c c->lst.empty_lebs += growth; c 405 fs/ubifs/master.c c->lst.total_free += growth * (long long)c->leb_size; c 406 fs/ubifs/master.c c->lst.total_dark += growth * (long long)c->dark_wm; c 414 fs/ubifs/master.c c->mst_node->leb_cnt = cpu_to_le32(c->leb_cnt); c 415 fs/ubifs/master.c c->mst_node->empty_lebs = cpu_to_le32(c->lst.empty_lebs); c 416 fs/ubifs/master.c c->mst_node->total_free = cpu_to_le64(c->lst.total_free); c 417 fs/ubifs/master.c c->mst_node->total_dark = cpu_to_le64(c->lst.total_dark); c 420 fs/ubifs/master.c err = validate_master(c); c 424 fs/ubifs/master.c err = dbg_old_index_check_init(c, &c->zroot); c 437 fs/ubifs/master.c int ubifs_write_master(struct ubifs_info *c) c 441 fs/ubifs/master.c ubifs_assert(c, !c->ro_media && !c->ro_mount); c 442 fs/ubifs/master.c if (c->ro_error) c 446 fs/ubifs/master.c offs = c->mst_offs + c->mst_node_alsz; c 449 fs/ubifs/master.c if (offs + UBIFS_MST_NODE_SZ > c->leb_size) { c 450 fs/ubifs/master.c err = ubifs_leb_unmap(c, lnum); c 456 fs/ubifs/master.c c->mst_offs = offs; c 457 fs/ubifs/master.c c->mst_node->highest_inum = cpu_to_le64(c->highest_inum); c 459 fs/ubifs/master.c ubifs_copy_hash(c, c->zroot.hash, c->mst_node->hash_root_idx); c 460 fs/ubifs/master.c err = ubifs_write_node_hmac(c, c->mst_node, len, lnum, offs, c 468 fs/ubifs/master.c err = ubifs_leb_unmap(c, lnum); c 472 fs/ubifs/master.c err = ubifs_write_node_hmac(c, c->mst_node, len, lnum, offs, c 6 fs/ubifs/misc.c void ubifs_msg(const struct ubifs_info *c, const char *fmt, ...) c 17 fs/ubifs/misc.c c->vi.ubi_num, c->vi.vol_id, &vaf); c 23 fs/ubifs/misc.c void ubifs_err(const struct ubifs_info *c, const char *fmt, ...) c 34 fs/ubifs/misc.c c->vi.ubi_num, c->vi.vol_id, current->pid, c 42 fs/ubifs/misc.c void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...) c 53 fs/ubifs/misc.c c->vi.ubi_num, c->vi.vol_id, current->pid, c 66 fs/ubifs/misc.c const char *ubifs_assert_action_name(struct ubifs_info *c) c 68 fs/ubifs/misc.c return assert_names[c->assert_action]; c 56 fs/ubifs/misc.h static inline void ubifs_wake_up_bgt(struct ubifs_info *c) c 58 fs/ubifs/misc.h if (c->bgt && !c->need_bgt) { c 59 fs/ubifs/misc.h c->need_bgt = 1; c 60 fs/ubifs/misc.h wake_up_process(c->bgt); c 101 fs/ubifs/misc.h static inline int ubifs_compr_present(struct ubifs_info *c, int compr_type) c 103 fs/ubifs/misc.h ubifs_assert(c, compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT); c 114 fs/ubifs/misc.h static inline const char *ubifs_compr_name(struct ubifs_info *c, int compr_type) c 116 fs/ubifs/misc.h ubifs_assert(c, compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT); c 161 fs/ubifs/misc.h static inline int ubifs_add_dirt(struct ubifs_info *c, int lnum, int dirty) c 163 fs/ubifs/misc.h return ubifs_update_one_lp(c, lnum, LPROPS_NC, dirty, 0, 0); c 175 fs/ubifs/misc.h static inline int ubifs_return_leb(struct ubifs_info *c, int lnum) c 177 fs/ubifs/misc.h return ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0, c 186 fs/ubifs/misc.h static inline int ubifs_idx_node_sz(const struct ubifs_info *c, int child_cnt) c 188 fs/ubifs/misc.h return UBIFS_IDX_NODE_SZ + (UBIFS_BRANCH_SZ + c->key_len + c->hash_len) c 199 fs/ubifs/misc.h struct ubifs_branch *ubifs_idx_branch(const struct ubifs_info *c, c 204 fs/ubifs/misc.h (UBIFS_BRANCH_SZ + c->key_len + c->hash_len) * bnum); c 212 fs/ubifs/misc.h static inline void *ubifs_idx_key(const struct ubifs_info *c, c 229 fs/ubifs/misc.h static inline int ubifs_tnc_lookup(struct ubifs_info *c, c 232 fs/ubifs/misc.h return ubifs_tnc_locate(c, key, node, NULL, NULL); c 242 fs/ubifs/misc.h static inline void ubifs_get_lprops(struct ubifs_info *c) c 244 fs/ubifs/misc.h mutex_lock(&c->lp_mutex); c 254 fs/ubifs/misc.h static inline void ubifs_release_lprops(struct ubifs_info *c) c 256 fs/ubifs/misc.h ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); c 257 fs/ubifs/misc.h ubifs_assert(c, c->lst.empty_lebs >= 0 && c 258 fs/ubifs/misc.h c->lst.empty_lebs <= c->main_lebs); c 259 fs/ubifs/misc.h mutex_unlock(&c->lp_mutex); c 270 fs/ubifs/misc.h static inline int ubifs_next_log_lnum(const struct ubifs_info *c, int lnum) c 273 fs/ubifs/misc.h if (lnum > c->log_last) c 279 fs/ubifs/misc.h static inline int ubifs_xattr_max_cnt(struct ubifs_info *c) c 281 fs/ubifs/misc.h int max_xattrs = (c->leb_size / 2) / UBIFS_INO_NODE_SZ; c 283 fs/ubifs/misc.h ubifs_assert(c, max_xattrs < c->max_orphans); c 287 fs/ubifs/misc.h const char *ubifs_assert_action_name(struct ubifs_info *c); c 43 fs/ubifs/orphan.c static int dbg_check_orphans(struct ubifs_info *c); c 45 fs/ubifs/orphan.c static struct ubifs_orphan *orphan_add(struct ubifs_info *c, ino_t inum, c 58 fs/ubifs/orphan.c spin_lock(&c->orphan_lock); c 59 fs/ubifs/orphan.c if (c->tot_orphans >= c->max_orphans) { c 60 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 64 fs/ubifs/orphan.c p = &c->orph_tree.rb_node; c 73 fs/ubifs/orphan.c ubifs_err(c, "orphaned twice"); c 74 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 79 fs/ubifs/orphan.c c->tot_orphans += 1; c 80 fs/ubifs/orphan.c c->new_orphans += 1; c 82 fs/ubifs/orphan.c rb_insert_color(&orphan->rb, &c->orph_tree); c 83 fs/ubifs/orphan.c list_add_tail(&orphan->list, &c->orph_list); c 84 fs/ubifs/orphan.c list_add_tail(&orphan->new_list, &c->orph_new); c 91 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 96 fs/ubifs/orphan.c static struct ubifs_orphan *lookup_orphan(struct ubifs_info *c, ino_t inum) c 101 fs/ubifs/orphan.c p = c->orph_tree.rb_node; c 115 fs/ubifs/orphan.c static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o) c 117 fs/ubifs/orphan.c rb_erase(&o->rb, &c->orph_tree); c 119 fs/ubifs/orphan.c c->tot_orphans -= 1; c 123 fs/ubifs/orphan.c c->new_orphans -= 1; c 129 fs/ubifs/orphan.c static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) c 138 fs/ubifs/orphan.c orph->dnext = c->orph_dnext; c 139 fs/ubifs/orphan.c c->orph_dnext = orph; c 144 fs/ubifs/orphan.c __orphan_drop(c, orph); c 155 fs/ubifs/orphan.c int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) c 165 fs/ubifs/orphan.c orphan = orphan_add(c, inum, NULL); c 169 fs/ubifs/orphan.c lowest_xent_key(c, &key, inum); c 171 fs/ubifs/orphan.c xent = ubifs_tnc_next_ent(c, &key, &nm); c 183 fs/ubifs/orphan.c xattr_orphan = orphan_add(c, xattr_inum, orphan); c 187 fs/ubifs/orphan.c key_read(c, &xent->key, &key); c 200 fs/ubifs/orphan.c void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) c 204 fs/ubifs/orphan.c spin_lock(&c->orphan_lock); c 206 fs/ubifs/orphan.c orph = lookup_orphan(c, inum); c 208 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 209 fs/ubifs/orphan.c ubifs_err(c, "missing orphan ino %lu", (unsigned long)inum); c 217 fs/ubifs/orphan.c orphan_delete(c, child_orph); c 220 fs/ubifs/orphan.c orphan_delete(c, orph); c 222 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 231 fs/ubifs/orphan.c int ubifs_orphan_start_commit(struct ubifs_info *c) c 235 fs/ubifs/orphan.c spin_lock(&c->orphan_lock); c 236 fs/ubifs/orphan.c last = &c->orph_cnext; c 237 fs/ubifs/orphan.c list_for_each_entry(orphan, &c->orph_new, new_list) { c 238 fs/ubifs/orphan.c ubifs_assert(c, orphan->new); c 239 fs/ubifs/orphan.c ubifs_assert(c, !orphan->cmt); c 246 fs/ubifs/orphan.c c->cmt_orphans = c->new_orphans; c 247 fs/ubifs/orphan.c c->new_orphans = 0; c 248 fs/ubifs/orphan.c dbg_cmt("%d orphans to commit", c->cmt_orphans); c 249 fs/ubifs/orphan.c INIT_LIST_HEAD(&c->orph_new); c 250 fs/ubifs/orphan.c if (c->tot_orphans == 0) c 251 fs/ubifs/orphan.c c->no_orphs = 1; c 253 fs/ubifs/orphan.c c->no_orphs = 0; c 254 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 265 fs/ubifs/orphan.c static int avail_orphs(struct ubifs_info *c) c 269 fs/ubifs/orphan.c avail_lebs = c->orph_lebs - (c->ohead_lnum - c->orph_first) - 1; c 271 fs/ubifs/orphan.c ((c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64)); c 272 fs/ubifs/orphan.c gap = c->leb_size - c->ohead_offs; c 285 fs/ubifs/orphan.c static int tot_avail_orphs(struct ubifs_info *c) c 289 fs/ubifs/orphan.c avail_lebs = c->orph_lebs; c 291 fs/ubifs/orphan.c ((c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64)); c 305 fs/ubifs/orphan.c static int do_write_orph_node(struct ubifs_info *c, int len, int atomic) c 310 fs/ubifs/orphan.c ubifs_assert(c, c->ohead_offs == 0); c 311 fs/ubifs/orphan.c ubifs_prepare_node(c, c->orph_buf, len, 1); c 312 fs/ubifs/orphan.c len = ALIGN(len, c->min_io_size); c 313 fs/ubifs/orphan.c err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len); c 315 fs/ubifs/orphan.c if (c->ohead_offs == 0) { c 317 fs/ubifs/orphan.c err = ubifs_leb_unmap(c, c->ohead_lnum); c 321 fs/ubifs/orphan.c err = ubifs_write_node(c, c->orph_buf, len, c->ohead_lnum, c 322 fs/ubifs/orphan.c c->ohead_offs); c 336 fs/ubifs/orphan.c static int write_orph_node(struct ubifs_info *c, int atomic) c 342 fs/ubifs/orphan.c ubifs_assert(c, c->cmt_orphans > 0); c 343 fs/ubifs/orphan.c gap = c->leb_size - c->ohead_offs; c 345 fs/ubifs/orphan.c c->ohead_lnum += 1; c 346 fs/ubifs/orphan.c c->ohead_offs = 0; c 347 fs/ubifs/orphan.c gap = c->leb_size; c 348 fs/ubifs/orphan.c if (c->ohead_lnum > c->orph_last) { c 353 fs/ubifs/orphan.c ubifs_err(c, "out of space in orphan area"); c 358 fs/ubifs/orphan.c if (cnt > c->cmt_orphans) c 359 fs/ubifs/orphan.c cnt = c->cmt_orphans; c 361 fs/ubifs/orphan.c ubifs_assert(c, c->orph_buf); c 362 fs/ubifs/orphan.c orph = c->orph_buf; c 364 fs/ubifs/orphan.c spin_lock(&c->orphan_lock); c 365 fs/ubifs/orphan.c cnext = c->orph_cnext; c 368 fs/ubifs/orphan.c ubifs_assert(c, orphan->cmt); c 374 fs/ubifs/orphan.c c->orph_cnext = cnext; c 375 fs/ubifs/orphan.c c->cmt_orphans -= cnt; c 376 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 377 fs/ubifs/orphan.c if (c->cmt_orphans) c 378 fs/ubifs/orphan.c orph->cmt_no = cpu_to_le64(c->cmt_no); c 381 fs/ubifs/orphan.c orph->cmt_no = cpu_to_le64((c->cmt_no) | (1ULL << 63)); c 382 fs/ubifs/orphan.c ubifs_assert(c, c->ohead_offs + len <= c->leb_size); c 383 fs/ubifs/orphan.c ubifs_assert(c, c->ohead_lnum >= c->orph_first); c 384 fs/ubifs/orphan.c ubifs_assert(c, c->ohead_lnum <= c->orph_last); c 385 fs/ubifs/orphan.c err = do_write_orph_node(c, len, atomic); c 386 fs/ubifs/orphan.c c->ohead_offs += ALIGN(len, c->min_io_size); c 387 fs/ubifs/orphan.c c->ohead_offs = ALIGN(c->ohead_offs, 8); c 399 fs/ubifs/orphan.c static int write_orph_nodes(struct ubifs_info *c, int atomic) c 403 fs/ubifs/orphan.c while (c->cmt_orphans > 0) { c 404 fs/ubifs/orphan.c err = write_orph_node(c, atomic); c 412 fs/ubifs/orphan.c for (lnum = c->ohead_lnum + 1; lnum <= c->orph_last; lnum++) { c 413 fs/ubifs/orphan.c err = ubifs_leb_unmap(c, lnum); c 432 fs/ubifs/orphan.c static int consolidate(struct ubifs_info *c) c 434 fs/ubifs/orphan.c int tot_avail = tot_avail_orphs(c), err = 0; c 436 fs/ubifs/orphan.c spin_lock(&c->orphan_lock); c 438 fs/ubifs/orphan.c tot_avail, c->tot_orphans); c 439 fs/ubifs/orphan.c if (c->tot_orphans - c->new_orphans <= tot_avail) { c 444 fs/ubifs/orphan.c last = &c->orph_cnext; c 445 fs/ubifs/orphan.c list_for_each_entry(orphan, &c->orph_list, list) { c 454 fs/ubifs/orphan.c ubifs_assert(c, cnt == c->tot_orphans - c->new_orphans); c 455 fs/ubifs/orphan.c c->cmt_orphans = cnt; c 456 fs/ubifs/orphan.c c->ohead_lnum = c->orph_first; c 457 fs/ubifs/orphan.c c->ohead_offs = 0; c 463 fs/ubifs/orphan.c ubifs_err(c, "out of space in orphan area"); c 466 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 477 fs/ubifs/orphan.c static int commit_orphans(struct ubifs_info *c) c 481 fs/ubifs/orphan.c ubifs_assert(c, c->cmt_orphans > 0); c 482 fs/ubifs/orphan.c avail = avail_orphs(c); c 483 fs/ubifs/orphan.c if (avail < c->cmt_orphans) { c 485 fs/ubifs/orphan.c err = consolidate(c); c 490 fs/ubifs/orphan.c err = write_orph_nodes(c, atomic); c 503 fs/ubifs/orphan.c static void erase_deleted(struct ubifs_info *c) c 507 fs/ubifs/orphan.c spin_lock(&c->orphan_lock); c 508 fs/ubifs/orphan.c dnext = c->orph_dnext; c 512 fs/ubifs/orphan.c ubifs_assert(c, !orphan->new); c 513 fs/ubifs/orphan.c ubifs_assert(c, orphan->del); c 514 fs/ubifs/orphan.c rb_erase(&orphan->rb, &c->orph_tree); c 516 fs/ubifs/orphan.c c->tot_orphans -= 1; c 520 fs/ubifs/orphan.c c->orph_dnext = NULL; c 521 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 530 fs/ubifs/orphan.c int ubifs_orphan_end_commit(struct ubifs_info *c) c 534 fs/ubifs/orphan.c if (c->cmt_orphans != 0) { c 535 fs/ubifs/orphan.c err = commit_orphans(c); c 539 fs/ubifs/orphan.c erase_deleted(c); c 540 fs/ubifs/orphan.c err = dbg_check_orphans(c); c 552 fs/ubifs/orphan.c int ubifs_clear_orphans(struct ubifs_info *c) c 556 fs/ubifs/orphan.c for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { c 557 fs/ubifs/orphan.c err = ubifs_leb_unmap(c, lnum); c 561 fs/ubifs/orphan.c c->ohead_lnum = c->orph_first; c 562 fs/ubifs/orphan.c c->ohead_offs = 0; c 575 fs/ubifs/orphan.c static int insert_dead_orphan(struct ubifs_info *c, ino_t inum) c 585 fs/ubifs/orphan.c p = &c->orph_tree.rb_node; c 599 fs/ubifs/orphan.c c->tot_orphans += 1; c 601 fs/ubifs/orphan.c rb_insert_color(&orphan->rb, &c->orph_tree); c 602 fs/ubifs/orphan.c list_add_tail(&orphan->list, &c->orph_list); c 604 fs/ubifs/orphan.c orphan->dnext = c->orph_dnext; c 605 fs/ubifs/orphan.c c->orph_dnext = orphan; c 607 fs/ubifs/orphan.c c->new_orphans, c->tot_orphans); c 623 fs/ubifs/orphan.c static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 640 fs/ubifs/orphan.c ubifs_err(c, "invalid node type %d in orphan area at %d:%d", c 642 fs/ubifs/orphan.c ubifs_dump_node(c, snod->node); c 659 fs/ubifs/orphan.c if (cmt_no > c->cmt_no) c 660 fs/ubifs/orphan.c c->cmt_no = cmt_no; c 668 fs/ubifs/orphan.c ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d", c 670 fs/ubifs/orphan.c ubifs_dump_node(c, snod->node); c 689 fs/ubifs/orphan.c ino_key_init(c, &key1, inum); c 690 fs/ubifs/orphan.c err = ubifs_tnc_lookup(c, &key1, ino); c 702 fs/ubifs/orphan.c lowest_ino_key(c, &key1, inum); c 703 fs/ubifs/orphan.c highest_ino_key(c, &key2, inum); c 705 fs/ubifs/orphan.c err = ubifs_tnc_remove_range(c, &key1, &key2); c 710 fs/ubifs/orphan.c err = insert_dead_orphan(c, inum); c 730 fs/ubifs/orphan.c ubifs_ro_mode(c, err); c 745 fs/ubifs/orphan.c static int kill_orphans(struct ubifs_info *c) c 750 fs/ubifs/orphan.c c->ohead_lnum = c->orph_first; c 751 fs/ubifs/orphan.c c->ohead_offs = 0; c 753 fs/ubifs/orphan.c if (c->no_orphs) { c 768 fs/ubifs/orphan.c for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { c 772 fs/ubifs/orphan.c sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); c 775 fs/ubifs/orphan.c sleb = ubifs_recover_leb(c, lnum, 0, c 776 fs/ubifs/orphan.c c->sbuf, -1); c 782 fs/ubifs/orphan.c err = do_kill_orphans(c, sleb, &last_cmt_no, &outofdate, c 789 fs/ubifs/orphan.c c->ohead_lnum = lnum; c 790 fs/ubifs/orphan.c c->ohead_offs = sleb->endpt; c 807 fs/ubifs/orphan.c int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only) c 811 fs/ubifs/orphan.c c->max_orphans = tot_avail_orphs(c); c 814 fs/ubifs/orphan.c c->orph_buf = vmalloc(c->leb_size); c 815 fs/ubifs/orphan.c if (!c->orph_buf) c 820 fs/ubifs/orphan.c err = kill_orphans(c); c 822 fs/ubifs/orphan.c err = ubifs_clear_orphans(c); c 845 fs/ubifs/orphan.c static bool dbg_find_orphan(struct ubifs_info *c, ino_t inum) c 849 fs/ubifs/orphan.c spin_lock(&c->orphan_lock); c 850 fs/ubifs/orphan.c found = !!lookup_orphan(c, inum); c 851 fs/ubifs/orphan.c spin_unlock(&c->orphan_lock); c 910 fs/ubifs/orphan.c static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 917 fs/ubifs/orphan.c inum = key_inum(c, &zbr->key); c 920 fs/ubifs/orphan.c if (key_type(c, &zbr->key) != UBIFS_INO_KEY) c 921 fs/ubifs/orphan.c ubifs_err(c, "found orphan node ino %lu, type %d", c 922 fs/ubifs/orphan.c (unsigned long)inum, key_type(c, &zbr->key)); c 925 fs/ubifs/orphan.c err = ubifs_tnc_read_node(c, zbr, ci->node); c 927 fs/ubifs/orphan.c ubifs_err(c, "node read failed, error %d", err); c 933 fs/ubifs/orphan.c !dbg_find_orphan(c, inum)) { c 934 fs/ubifs/orphan.c ubifs_err(c, "missing orphan, ino %lu", c 966 fs/ubifs/orphan.c static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci) c 972 fs/ubifs/orphan.c if (c->no_orphs) c 975 fs/ubifs/orphan.c buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); c 977 fs/ubifs/orphan.c ubifs_err(c, "cannot allocate memory to check orphans"); c 981 fs/ubifs/orphan.c for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { c 984 fs/ubifs/orphan.c sleb = ubifs_scan(c, lnum, 0, buf, 0); c 1000 fs/ubifs/orphan.c static int dbg_check_orphans(struct ubifs_info *c) c 1005 fs/ubifs/orphan.c if (!dbg_is_chk_orph(c)) c 1015 fs/ubifs/orphan.c ubifs_err(c, "out of memory"); c 1019 fs/ubifs/orphan.c err = dbg_scan_orphans(c, &ci); c 1023 fs/ubifs/orphan.c err = dbg_walk_index(c, &dbg_orphan_check, NULL, &ci); c 1025 fs/ubifs/orphan.c ubifs_err(c, "cannot scan TNC, error %d", err); c 1030 fs/ubifs/orphan.c ubifs_err(c, "%lu missing orphan(s)", ci.missing); c 97 fs/ubifs/recovery.c static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf, c 100 fs/ubifs/recovery.c const int sz = c->mst_node_alsz; c 104 fs/ubifs/recovery.c sbuf = vmalloc(c->leb_size); c 108 fs/ubifs/recovery.c err = ubifs_leb_read(c, lnum, sbuf, 0, c->leb_size, 0); c 115 fs/ubifs/recovery.c len = c->leb_size; c 116 fs/ubifs/recovery.c while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) { c 132 fs/ubifs/recovery.c ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); c 138 fs/ubifs/recovery.c ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); c 160 fs/ubifs/recovery.c if (offs < c->leb_size) { c 170 fs/ubifs/recovery.c if (offs < c->leb_size) c 192 fs/ubifs/recovery.c static int write_rcvrd_mst_node(struct ubifs_info *c, c 195 fs/ubifs/recovery.c int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; c 203 fs/ubifs/recovery.c err = ubifs_prepare_node_hmac(c, mst, UBIFS_MST_NODE_SZ, c 207 fs/ubifs/recovery.c err = ubifs_leb_change(c, lnum, mst, sz); c 210 fs/ubifs/recovery.c err = ubifs_leb_change(c, lnum + 1, mst, sz); c 227 fs/ubifs/recovery.c int ubifs_recover_master_node(struct ubifs_info *c) c 231 fs/ubifs/recovery.c const int sz = c->mst_node_alsz; c 236 fs/ubifs/recovery.c err = get_master_node(c, UBIFS_MST_LNUM, &buf1, &mst1, &cor1); c 240 fs/ubifs/recovery.c err = get_master_node(c, UBIFS_MST_LNUM + 1, &buf2, &mst2, &cor2); c 258 fs/ubifs/recovery.c if (ubifs_compare_master_node(c, mst1, mst2)) c 267 fs/ubifs/recovery.c c->leb_size - offs2 - sz < sz) { c 292 fs/ubifs/recovery.c if (offs2 + sz + sz <= c->leb_size) c 297 fs/ubifs/recovery.c ubifs_msg(c, "recovered master node from LEB %d", c 300 fs/ubifs/recovery.c memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ); c 302 fs/ubifs/recovery.c if (c->ro_mount) { c 304 fs/ubifs/recovery.c c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL); c 305 fs/ubifs/recovery.c if (!c->rcvrd_mst_node) { c 309 fs/ubifs/recovery.c memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ); c 335 fs/ubifs/recovery.c c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); c 338 fs/ubifs/recovery.c c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1; c 339 fs/ubifs/recovery.c err = write_rcvrd_mst_node(c, c->mst_node); c 352 fs/ubifs/recovery.c ubifs_err(c, "failed to recover master node"); c 354 fs/ubifs/recovery.c ubifs_err(c, "dumping first master node"); c 355 fs/ubifs/recovery.c ubifs_dump_node(c, mst1); c 358 fs/ubifs/recovery.c ubifs_err(c, "dumping second master node"); c 359 fs/ubifs/recovery.c ubifs_dump_node(c, mst2); c 375 fs/ubifs/recovery.c int ubifs_write_rcvrd_mst_node(struct ubifs_info *c) c 379 fs/ubifs/recovery.c if (!c->rcvrd_mst_node) c 381 fs/ubifs/recovery.c c->rcvrd_mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); c 382 fs/ubifs/recovery.c c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); c 383 fs/ubifs/recovery.c err = write_rcvrd_mst_node(c, c->rcvrd_mst_node); c 386 fs/ubifs/recovery.c kfree(c->rcvrd_mst_node); c 387 fs/ubifs/recovery.c c->rcvrd_mst_node = NULL; c 402 fs/ubifs/recovery.c static int is_last_write(const struct ubifs_info *c, void *buf, int offs) c 411 fs/ubifs/recovery.c empty_offs = ALIGN(offs + 1, c->max_write_size); c 412 fs/ubifs/recovery.c check_len = c->leb_size - empty_offs; c 429 fs/ubifs/recovery.c static void clean_buf(const struct ubifs_info *c, void **buf, int lnum, c 436 fs/ubifs/recovery.c ubifs_assert(c, !(*offs & 7)); c 437 fs/ubifs/recovery.c empty_offs = ALIGN(*offs, c->min_io_size); c 439 fs/ubifs/recovery.c ubifs_pad(c, *buf, pad_len); c 443 fs/ubifs/recovery.c memset(*buf, 0xff, c->leb_size - empty_offs); c 458 fs/ubifs/recovery.c static int no_more_nodes(const struct ubifs_info *c, void *buf, int len, c 465 fs/ubifs/recovery.c skip = ALIGN(offs + UBIFS_CH_SZ, c->max_write_size) - offs; c 472 fs/ubifs/recovery.c if (ubifs_check_node(c, buf, lnum, offs, 1, 0) != -EUCLEAN) { c 477 fs/ubifs/recovery.c skip = ALIGN(offs + dlen, c->max_write_size) - offs; c 491 fs/ubifs/recovery.c static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 505 fs/ubifs/recovery.c if (c->ro_mount && !c->remounting_rw) { c 516 fs/ubifs/recovery.c list_add_tail(&ucleb->list, &c->unclean_leb_list); c 524 fs/ubifs/recovery.c err = ubifs_leb_unmap(c, lnum); c 528 fs/ubifs/recovery.c int len = ALIGN(endpt, c->min_io_size); c 531 fs/ubifs/recovery.c err = ubifs_leb_read(c, lnum, sleb->buf, 0, c 543 fs/ubifs/recovery.c ubifs_pad(c, buf, pad_len); c 546 fs/ubifs/recovery.c err = ubifs_leb_change(c, lnum, sleb->buf, len); c 622 fs/ubifs/recovery.c struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, c 625 fs/ubifs/recovery.c int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; c 626 fs/ubifs/recovery.c int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped; c 632 fs/ubifs/recovery.c sleb = ubifs_start_scan(c, lnum, offs, sbuf); c 636 fs/ubifs/recovery.c ubifs_assert(c, len >= 8); c 647 fs/ubifs/recovery.c ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); c 653 fs/ubifs/recovery.c err = ubifs_add_snod(c, sleb, buf, offs); c 673 fs/ubifs/recovery.c ubifs_err(c, "unexpected return value %d", ret); c 680 fs/ubifs/recovery.c if (!is_last_write(c, buf, offs)) c 683 fs/ubifs/recovery.c if (!no_more_nodes(c, buf, len, lnum, offs)) c 686 fs/ubifs/recovery.c if (!is_last_write(c, buf, offs)) { c 693 fs/ubifs/recovery.c ubifs_err(c, "corrupt empty space LEB %d:%d, corruption starts at %d", c 702 fs/ubifs/recovery.c min_io_unit = round_down(offs, c->min_io_size); c 766 fs/ubifs/recovery.c len = c->leb_size - offs; c 768 fs/ubifs/recovery.c clean_buf(c, &buf, lnum, &offs, &len); c 769 fs/ubifs/recovery.c ubifs_end_scan(c, sleb, lnum, offs); c 771 fs/ubifs/recovery.c err = fix_unclean_leb(c, sleb, start); c 779 fs/ubifs/recovery.c ubifs_err(c, "corruption %d", ret); c 780 fs/ubifs/recovery.c ubifs_scan_a_node(c, buf, len, lnum, offs, 0); c 782 fs/ubifs/recovery.c ubifs_scanned_corruption(c, lnum, offs, buf); c 785 fs/ubifs/recovery.c ubifs_err(c, "LEB %d scanning failed", lnum); c 799 fs/ubifs/recovery.c static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs, c 809 fs/ubifs/recovery.c if (c->leb_size - offs < UBIFS_CS_NODE_SZ) c 811 fs/ubifs/recovery.c err = ubifs_leb_read(c, lnum, (void *)cs_node, offs, c 815 fs/ubifs/recovery.c ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0); c 817 fs/ubifs/recovery.c ubifs_err(c, "Not a valid node"); c 821 fs/ubifs/recovery.c ubifs_err(c, "Not a CS node, type is %d", cs_node->ch.node_type); c 824 fs/ubifs/recovery.c if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) { c 825 fs/ubifs/recovery.c ubifs_err(c, "CS node cmt_no %llu != current cmt_no %llu", c 827 fs/ubifs/recovery.c c->cmt_no); c 838 fs/ubifs/recovery.c ubifs_err(c, "failed to get CS sqnum"); c 856 fs/ubifs/recovery.c struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, c 864 fs/ubifs/recovery.c if (next_lnum >= UBIFS_LOG_LNUM + c->log_lebs) c 866 fs/ubifs/recovery.c if (next_lnum != c->ltail_lnum) { c 871 fs/ubifs/recovery.c sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0); c 876 fs/ubifs/recovery.c unsigned long long cs_sqnum = c->cs_sqnum; c 883 fs/ubifs/recovery.c err = get_cs_sqnum(c, lnum, offs, &cs_sqnum); c 890 fs/ubifs/recovery.c ubifs_err(c, "unrecoverable log corruption in LEB %d", c 898 fs/ubifs/recovery.c return ubifs_recover_leb(c, lnum, offs, sbuf, -1); c 912 fs/ubifs/recovery.c static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf) c 914 fs/ubifs/recovery.c int len = c->max_write_size, err; c 916 fs/ubifs/recovery.c if (offs + len > c->leb_size) c 917 fs/ubifs/recovery.c len = c->leb_size - offs; c 923 fs/ubifs/recovery.c err = ubifs_leb_read(c, lnum, sbuf, offs, len, 1); c 927 fs/ubifs/recovery.c return ubifs_leb_unmap(c, lnum); c 928 fs/ubifs/recovery.c err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1); c 931 fs/ubifs/recovery.c return ubifs_leb_change(c, lnum, sbuf, offs); c 954 fs/ubifs/recovery.c int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf) c 958 fs/ubifs/recovery.c ubifs_assert(c, !c->ro_mount || c->remounting_rw); c 960 fs/ubifs/recovery.c dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs); c 961 fs/ubifs/recovery.c err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf); c 965 fs/ubifs/recovery.c dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs); c 967 fs/ubifs/recovery.c return recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf); c 982 fs/ubifs/recovery.c static int clean_an_unclean_leb(struct ubifs_info *c, c 992 fs/ubifs/recovery.c return ubifs_leb_unmap(c, lnum); c 995 fs/ubifs/recovery.c err = ubifs_leb_read(c, lnum, buf, offs, len, 0); c 1005 fs/ubifs/recovery.c ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet); c 1028 fs/ubifs/recovery.c ubifs_err(c, "unexpected empty space at %d:%d", c 1039 fs/ubifs/recovery.c ubifs_scanned_corruption(c, lnum, offs, buf); c 1044 fs/ubifs/recovery.c len = ALIGN(ucleb->endpt, c->min_io_size); c 1049 fs/ubifs/recovery.c buf = c->sbuf + len - pad_len; c 1050 fs/ubifs/recovery.c ubifs_pad(c, buf, pad_len); c 1055 fs/ubifs/recovery.c err = ubifs_leb_change(c, lnum, sbuf, len); c 1075 fs/ubifs/recovery.c int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf) c 1078 fs/ubifs/recovery.c while (!list_empty(&c->unclean_leb_list)) { c 1082 fs/ubifs/recovery.c ucleb = list_entry(c->unclean_leb_list.next, c 1084 fs/ubifs/recovery.c err = clean_an_unclean_leb(c, ucleb, sbuf); c 1101 fs/ubifs/recovery.c static int grab_empty_leb(struct ubifs_info *c) c 1120 fs/ubifs/recovery.c lnum = ubifs_find_free_leb_for_idx(c); c 1122 fs/ubifs/recovery.c ubifs_err(c, "could not find an empty LEB"); c 1123 fs/ubifs/recovery.c ubifs_dump_lprops(c); c 1124 fs/ubifs/recovery.c ubifs_dump_budg(c, &c->bi); c 1129 fs/ubifs/recovery.c err = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0, c 1134 fs/ubifs/recovery.c c->gc_lnum = lnum; c 1137 fs/ubifs/recovery.c return ubifs_run_commit(c); c 1158 fs/ubifs/recovery.c int ubifs_rcvry_gc_commit(struct ubifs_info *c) c 1160 fs/ubifs/recovery.c struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; c 1166 fs/ubifs/recovery.c c->gc_lnum = -1; c 1167 fs/ubifs/recovery.c if (wbuf->lnum == -1 || wbuf->offs == c->leb_size) c 1168 fs/ubifs/recovery.c return grab_empty_leb(c); c 1170 fs/ubifs/recovery.c err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2); c 1176 fs/ubifs/recovery.c return grab_empty_leb(c); c 1179 fs/ubifs/recovery.c ubifs_assert(c, !(lp.flags & LPROPS_INDEX)); c 1180 fs/ubifs/recovery.c ubifs_assert(c, lp.free + lp.dirty >= wbuf->offs); c 1187 fs/ubifs/recovery.c err = ubifs_run_commit(c); c 1193 fs/ubifs/recovery.c err = ubifs_garbage_collect_leb(c, &lp); c 1202 fs/ubifs/recovery.c ubifs_err(c, "GC failed, error %d", err); c 1208 fs/ubifs/recovery.c ubifs_assert(c, err == LEB_RETAINED); c 1212 fs/ubifs/recovery.c err = ubifs_leb_unmap(c, c->gc_lnum); c 1246 fs/ubifs/recovery.c static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size, c 1249 fs/ubifs/recovery.c struct rb_node **p = &c->size_tree.rb_node, *parent = NULL; c 1271 fs/ubifs/recovery.c rb_insert_color(&e->rb, &c->size_tree); c 1281 fs/ubifs/recovery.c static struct size_entry *find_ino(struct ubifs_info *c, ino_t inum) c 1283 fs/ubifs/recovery.c struct rb_node *p = c->size_tree.rb_node; c 1303 fs/ubifs/recovery.c static void remove_ino(struct ubifs_info *c, ino_t inum) c 1305 fs/ubifs/recovery.c struct size_entry *e = find_ino(c, inum); c 1309 fs/ubifs/recovery.c rb_erase(&e->rb, &c->size_tree); c 1317 fs/ubifs/recovery.c void ubifs_destroy_size_tree(struct ubifs_info *c) c 1321 fs/ubifs/recovery.c rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) { c 1326 fs/ubifs/recovery.c c->size_tree = RB_ROOT; c 1354 fs/ubifs/recovery.c int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key, c 1357 fs/ubifs/recovery.c ino_t inum = key_inum(c, key); c 1361 fs/ubifs/recovery.c switch (key_type(c, key)) { c 1364 fs/ubifs/recovery.c remove_ino(c, inum); c 1366 fs/ubifs/recovery.c e = find_ino(c, inum); c 1371 fs/ubifs/recovery.c err = add_ino(c, inum, new_size, 0, 1); c 1378 fs/ubifs/recovery.c e = find_ino(c, inum); c 1383 fs/ubifs/recovery.c err = add_ino(c, inum, 0, new_size, 0); c 1389 fs/ubifs/recovery.c e = find_ino(c, inum); c 1402 fs/ubifs/recovery.c static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) c 1404 fs/ubifs/recovery.c struct ubifs_ino_node *ino = c->sbuf; c 1412 fs/ubifs/recovery.c ino_key_init(c, &key, e->inum); c 1413 fs/ubifs/recovery.c err = ubifs_tnc_locate(c, &key, ino, &lnum, &offs); c 1424 fs/ubifs/recovery.c err = ubifs_leb_read(c, lnum, c->sbuf, 0, c->leb_size, 1); c 1428 fs/ubifs/recovery.c ino = c->sbuf + offs; c 1434 fs/ubifs/recovery.c p = c->sbuf; c 1435 fs/ubifs/recovery.c len = c->leb_size - 1; c 1438 fs/ubifs/recovery.c len = ALIGN(len + 1, c->min_io_size); c 1440 fs/ubifs/recovery.c err = ubifs_leb_change(c, lnum, c->sbuf, len); c 1448 fs/ubifs/recovery.c ubifs_warn(c, "inode %lu failed to fix size %lld -> %lld error %d", c 1458 fs/ubifs/recovery.c static int inode_fix_size(struct ubifs_info *c, struct size_entry *e) c 1464 fs/ubifs/recovery.c if (c->ro_mount) c 1465 fs/ubifs/recovery.c ubifs_assert(c, !e->inode); c 1471 fs/ubifs/recovery.c inode = ubifs_iget(c->vfs_sb, e->inum); c 1502 fs/ubifs/recovery.c if (c->ro_mount) c 1505 fs/ubifs/recovery.c err = ubifs_jnl_write_inode(c, inode); c 1512 fs/ubifs/recovery.c rb_erase(&e->rb, &c->size_tree); c 1528 fs/ubifs/recovery.c int ubifs_recover_size(struct ubifs_info *c, bool in_place) c 1530 fs/ubifs/recovery.c struct rb_node *this = rb_first(&c->size_tree); c 1543 fs/ubifs/recovery.c ino_key_init(c, &key, e->inum); c 1544 fs/ubifs/recovery.c err = ubifs_tnc_lookup(c, &key, c->sbuf); c 1551 fs/ubifs/recovery.c err = ubifs_tnc_remove_ino(c, e->inum); c 1555 fs/ubifs/recovery.c struct ubifs_ino_node *ino = c->sbuf; c 1563 fs/ubifs/recovery.c ubifs_assert(c, !(c->ro_mount && in_place)); c 1571 fs/ubifs/recovery.c err = fix_size_in_place(c, e); c 1576 fs/ubifs/recovery.c err = inode_fix_size(c, e); c 1583 fs/ubifs/recovery.c rb_erase(&e->rb, &c->size_tree); c 88 fs/ubifs/replay.c static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b) c 93 fs/ubifs/replay.c ubifs_get_lprops(c); c 95 fs/ubifs/replay.c lp = ubifs_lpt_lookup_dirty(c, b->bud->lnum); c 102 fs/ubifs/replay.c if (b->bud->start == 0 && (lp->free != c->leb_size || lp->dirty != 0)) { c 126 fs/ubifs/replay.c dirty -= c->leb_size - lp->free; c 139 fs/ubifs/replay.c lp = ubifs_change_lp(c, lp, b->free, dirty + b->dirty, c 147 fs/ubifs/replay.c err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf, c 148 fs/ubifs/replay.c b->bud->lnum, c->leb_size - b->free); c 151 fs/ubifs/replay.c ubifs_release_lprops(c); c 162 fs/ubifs/replay.c static int set_buds_lprops(struct ubifs_info *c) c 167 fs/ubifs/replay.c list_for_each_entry(b, &c->replay_buds, list) { c 168 fs/ubifs/replay.c err = set_bud_lprops(c, b); c 181 fs/ubifs/replay.c static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r) c 195 fs/ubifs/replay.c ino = key_inum(c, &r->key); c 197 fs/ubifs/replay.c data_key_init(c, &min_key, ino, min_blk); c 198 fs/ubifs/replay.c data_key_init(c, &max_key, ino, max_blk); c 200 fs/ubifs/replay.c return ubifs_tnc_remove_range(c, &min_key, &max_key); c 213 fs/ubifs/replay.c static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino) c 217 fs/ubifs/replay.c ubifs_assert(c, rino->deletion); c 218 fs/ubifs/replay.c ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY); c 224 fs/ubifs/replay.c list_for_each_entry_reverse(r, &c->replay_list, list) { c 225 fs/ubifs/replay.c ubifs_assert(c, r->sqnum >= rino->sqnum); c 226 fs/ubifs/replay.c if (key_inum(c, &r->key) == key_inum(c, &rino->key)) c 231 fs/ubifs/replay.c ubifs_assert(c, 0); c 242 fs/ubifs/replay.c static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r) c 249 fs/ubifs/replay.c if (is_hash_key(c, &r->key)) { c 251 fs/ubifs/replay.c err = ubifs_tnc_remove_nm(c, &r->key, &r->nm); c 253 fs/ubifs/replay.c err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs, c 257 fs/ubifs/replay.c switch (key_type(c, &r->key)) { c 260 fs/ubifs/replay.c ino_t inum = key_inum(c, &r->key); c 262 fs/ubifs/replay.c if (inode_still_linked(c, r)) { c 267 fs/ubifs/replay.c err = ubifs_tnc_remove_ino(c, inum); c 271 fs/ubifs/replay.c err = trun_remove_range(c, r); c 274 fs/ubifs/replay.c err = ubifs_tnc_remove(c, &r->key); c 278 fs/ubifs/replay.c err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs, c 283 fs/ubifs/replay.c if (c->need_recovery) c 284 fs/ubifs/replay.c err = ubifs_recover_size_accum(c, &r->key, r->deletion, c 304 fs/ubifs/replay.c struct ubifs_info *c = priv; c 313 fs/ubifs/replay.c ubifs_assert(c, ra->sqnum != rb->sqnum); c 326 fs/ubifs/replay.c static int apply_replay_list(struct ubifs_info *c) c 331 fs/ubifs/replay.c list_sort(c, &c->replay_list, &replay_entries_cmp); c 333 fs/ubifs/replay.c list_for_each_entry(r, &c->replay_list, list) { c 336 fs/ubifs/replay.c err = apply_replay_entry(c, r); c 350 fs/ubifs/replay.c static void destroy_replay_list(struct ubifs_info *c) c 354 fs/ubifs/replay.c list_for_each_entry_safe(r, tmp, &c->replay_list, list) { c 355 fs/ubifs/replay.c if (is_hash_key(c, &r->key)) c 382 fs/ubifs/replay.c static int insert_node(struct ubifs_info *c, int lnum, int offs, int len, c 391 fs/ubifs/replay.c if (key_inum(c, key) >= c->highest_inum) c 392 fs/ubifs/replay.c c->highest_inum = key_inum(c, key); c 403 fs/ubifs/replay.c ubifs_copy_hash(c, hash, r->hash); c 406 fs/ubifs/replay.c key_copy(c, key, &r->key); c 410 fs/ubifs/replay.c list_add_tail(&r->list, &c->replay_list); c 431 fs/ubifs/replay.c static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len, c 440 fs/ubifs/replay.c if (key_inum(c, key) >= c->highest_inum) c 441 fs/ubifs/replay.c c->highest_inum = key_inum(c, key); c 458 fs/ubifs/replay.c ubifs_copy_hash(c, hash, r->hash); c 461 fs/ubifs/replay.c key_copy(c, key, &r->key); c 467 fs/ubifs/replay.c list_add_tail(&r->list, &c->replay_list); c 479 fs/ubifs/replay.c int ubifs_validate_entry(struct ubifs_info *c, c 482 fs/ubifs/replay.c int key_type = key_type_flash(c, dent->key); c 490 fs/ubifs/replay.c ubifs_err(c, "bad %s node", key_type == UBIFS_DENT_KEY ? c 496 fs/ubifs/replay.c ubifs_err(c, "bad key type %d", key_type); c 513 fs/ubifs/replay.c static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud) c 515 fs/ubifs/replay.c struct ubifs_jhead *jh = &c->jheads[bud->jhead]; c 554 fs/ubifs/replay.c err = ubifs_leb_read(c, next->lnum, (char *)&data, next->start, 4, 1); c 562 fs/ubifs/replay.c static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash) c 564 fs/ubifs/replay.c SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); c 566 fs/ubifs/replay.c hash_desc->tfm = c->hash_tfm; c 568 fs/ubifs/replay.c ubifs_shash_copy_state(c, log_hash, hash_desc); c 572 fs/ubifs/replay.c static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac) c 574 fs/ubifs/replay.c SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm); c 576 fs/ubifs/replay.c hmac_desc->tfm = c->hmac_tfm; c 578 fs/ubifs/replay.c return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac); c 597 fs/ubifs/replay.c static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 607 fs/ubifs/replay.c if (!ubifs_authenticated(c)) c 617 fs/ubifs/replay.c err = authenticate_sleb_hash(c, log_hash, hash); c 621 fs/ubifs/replay.c err = authenticate_sleb_hmac(c, hash, hmac); c 625 fs/ubifs/replay.c err = ubifs_check_hmac(c, auth->hmac, hmac); c 671 fs/ubifs/replay.c static int replay_bud(struct ubifs_info *c, struct bud_entry *b) c 673 fs/ubifs/replay.c int is_last = is_last_bud(c, b->bud); c 682 fs/ubifs/replay.c if (c->need_recovery && is_last) c 689 fs/ubifs/replay.c sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead); c 691 fs/ubifs/replay.c sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); c 695 fs/ubifs/replay.c n_nodes = authenticate_sleb(c, sleb, b->bud->log_hash, is_last); c 701 fs/ubifs/replay.c ubifs_shash_copy_state(c, b->bud->log_hash, c 702 fs/ubifs/replay.c c->jheads[b->bud->jhead].log_hash); c 733 fs/ubifs/replay.c ubifs_err(c, "file system's life ended"); c 737 fs/ubifs/replay.c ubifs_node_calc_hash(c, snod->node, hash); c 739 fs/ubifs/replay.c if (snod->sqnum > c->max_sqnum) c 740 fs/ubifs/replay.c c->max_sqnum = snod->sqnum; c 750 fs/ubifs/replay.c err = insert_node(c, lnum, snod->offs, snod->len, hash, c 759 fs/ubifs/replay.c key_block(c, &snod->key) * c 762 fs/ubifs/replay.c err = insert_node(c, lnum, snod->offs, snod->len, hash, c 772 fs/ubifs/replay.c err = ubifs_validate_entry(c, dent); c 776 fs/ubifs/replay.c err = insert_dent(c, lnum, snod->offs, snod->len, hash, c 790 fs/ubifs/replay.c if (old_size < 0 || old_size > c->max_inode_sz || c 791 fs/ubifs/replay.c new_size < 0 || new_size > c->max_inode_sz || c 793 fs/ubifs/replay.c ubifs_err(c, "bad truncation node"); c 801 fs/ubifs/replay.c trun_key_init(c, &key, le32_to_cpu(trun->inum)); c 802 fs/ubifs/replay.c err = insert_node(c, lnum, snod->offs, snod->len, hash, c 810 fs/ubifs/replay.c ubifs_err(c, "unexpected node type %d in bud LEB %d:%d", c 823 fs/ubifs/replay.c ubifs_assert(c, ubifs_search_bud(c, lnum)); c 824 fs/ubifs/replay.c ubifs_assert(c, sleb->endpt - offs >= used); c 825 fs/ubifs/replay.c ubifs_assert(c, sleb->endpt % c->min_io_size == 0); c 828 fs/ubifs/replay.c b->free = c->leb_size - sleb->endpt; c 837 fs/ubifs/replay.c ubifs_err(c, "bad node is at LEB %d:%d", lnum, snod->offs); c 838 fs/ubifs/replay.c ubifs_dump_node(c, snod->node); c 850 fs/ubifs/replay.c static int replay_buds(struct ubifs_info *c) c 856 fs/ubifs/replay.c list_for_each_entry(b, &c->replay_buds, list) { c 857 fs/ubifs/replay.c err = replay_bud(c, b); c 861 fs/ubifs/replay.c ubifs_assert(c, b->sqnum > prev_sqnum); c 872 fs/ubifs/replay.c static void destroy_bud_list(struct ubifs_info *c) c 876 fs/ubifs/replay.c while (!list_empty(&c->replay_buds)) { c 877 fs/ubifs/replay.c b = list_entry(c->replay_buds.next, struct bud_entry, list); c 894 fs/ubifs/replay.c static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, c 916 fs/ubifs/replay.c bud->log_hash = ubifs_hash_get_desc(c); c 922 fs/ubifs/replay.c ubifs_shash_copy_state(c, c->log_hash, bud->log_hash); c 924 fs/ubifs/replay.c ubifs_add_bud(c, bud); c 928 fs/ubifs/replay.c list_add_tail(&b->list, &c->replay_buds); c 949 fs/ubifs/replay.c static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref) c 961 fs/ubifs/replay.c if (jhead >= c->jhead_cnt || lnum >= c->leb_cnt || c 962 fs/ubifs/replay.c lnum < c->main_first || offs > c->leb_size || c 963 fs/ubifs/replay.c offs & (c->min_io_size - 1)) c 967 fs/ubifs/replay.c bud = ubifs_search_bud(c, lnum); c 971 fs/ubifs/replay.c ubifs_err(c, "bud at LEB %d:%d was already referred", lnum, offs); c 989 fs/ubifs/replay.c static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) c 997 fs/ubifs/replay.c sleb = ubifs_scan(c, lnum, offs, sbuf, c->need_recovery); c 999 fs/ubifs/replay.c if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery) c 1006 fs/ubifs/replay.c sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf); c 1018 fs/ubifs/replay.c if (c->cs_sqnum == 0) { c 1027 fs/ubifs/replay.c ubifs_err(c, "first log node at LEB %d:%d is not CS node", c 1031 fs/ubifs/replay.c if (le64_to_cpu(node->cmt_no) != c->cmt_no) { c 1032 fs/ubifs/replay.c ubifs_err(c, "first CS node at LEB %d:%d has wrong commit number %llu expected %llu", c 1035 fs/ubifs/replay.c c->cmt_no); c 1039 fs/ubifs/replay.c c->cs_sqnum = le64_to_cpu(node->ch.sqnum); c 1040 fs/ubifs/replay.c dbg_mnt("commit start sqnum %llu", c->cs_sqnum); c 1042 fs/ubifs/replay.c err = ubifs_shash_init(c, c->log_hash); c 1046 fs/ubifs/replay.c err = ubifs_shash_update(c, c->log_hash, node, UBIFS_CS_NODE_SZ); c 1051 fs/ubifs/replay.c if (snod->sqnum < c->cs_sqnum) { c 1065 fs/ubifs/replay.c ubifs_err(c, "first node is not at zero offset"); c 1073 fs/ubifs/replay.c ubifs_err(c, "file system's life ended"); c 1077 fs/ubifs/replay.c if (snod->sqnum < c->cs_sqnum) { c 1078 fs/ubifs/replay.c ubifs_err(c, "bad sqnum %llu, commit sqnum %llu", c 1079 fs/ubifs/replay.c snod->sqnum, c->cs_sqnum); c 1083 fs/ubifs/replay.c if (snod->sqnum > c->max_sqnum) c 1084 fs/ubifs/replay.c c->max_sqnum = snod->sqnum; c 1090 fs/ubifs/replay.c err = validate_ref(c, ref); c 1096 fs/ubifs/replay.c err = ubifs_shash_update(c, c->log_hash, ref, c 1101 fs/ubifs/replay.c err = add_replay_bud(c, le32_to_cpu(ref->lnum), c 1113 fs/ubifs/replay.c ubifs_err(c, "unexpected node in log"); c 1118 fs/ubifs/replay.c ubifs_err(c, "unexpected node in log"); c 1123 fs/ubifs/replay.c if (sleb->endpt || c->lhead_offs >= c->leb_size) { c 1124 fs/ubifs/replay.c c->lhead_lnum = lnum; c 1125 fs/ubifs/replay.c c->lhead_offs = sleb->endpt; c 1134 fs/ubifs/replay.c ubifs_err(c, "log error detected while replaying the log at LEB %d:%d", c 1136 fs/ubifs/replay.c ubifs_dump_node(c, snod->node); c 1148 fs/ubifs/replay.c static int take_ihead(struct ubifs_info *c) c 1153 fs/ubifs/replay.c ubifs_get_lprops(c); c 1155 fs/ubifs/replay.c lp = ubifs_lpt_lookup_dirty(c, c->ihead_lnum); c 1163 fs/ubifs/replay.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, c 1172 fs/ubifs/replay.c ubifs_release_lprops(c); c 1184 fs/ubifs/replay.c int ubifs_replay_journal(struct ubifs_info *c) c 1191 fs/ubifs/replay.c free = take_ihead(c); c 1195 fs/ubifs/replay.c if (c->ihead_offs != c->leb_size - free) { c 1196 fs/ubifs/replay.c ubifs_err(c, "bad index head LEB %d:%d", c->ihead_lnum, c 1197 fs/ubifs/replay.c c->ihead_offs); c 1202 fs/ubifs/replay.c c->replaying = 1; c 1203 fs/ubifs/replay.c lnum = c->ltail_lnum = c->lhead_lnum; c 1206 fs/ubifs/replay.c err = replay_log_leb(c, lnum, 0, c->sbuf); c 1208 fs/ubifs/replay.c if (lnum != c->lhead_lnum) c 1219 fs/ubifs/replay.c ubifs_err(c, "no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted", c 1225 fs/ubifs/replay.c lnum = ubifs_next_log_lnum(c, lnum); c 1226 fs/ubifs/replay.c } while (lnum != c->ltail_lnum); c 1228 fs/ubifs/replay.c err = replay_buds(c); c 1232 fs/ubifs/replay.c err = apply_replay_list(c); c 1236 fs/ubifs/replay.c err = set_buds_lprops(c); c 1246 fs/ubifs/replay.c c->bi.uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt); c 1247 fs/ubifs/replay.c c->bi.uncommitted_idx *= c->max_idx_node_sz; c 1249 fs/ubifs/replay.c ubifs_assert(c, c->bud_bytes <= c->max_bud_bytes || c->need_recovery); c 1251 fs/ubifs/replay.c c->lhead_lnum, c->lhead_offs, c->max_sqnum, c 1252 fs/ubifs/replay.c (unsigned long)c->highest_inum); c 1254 fs/ubifs/replay.c destroy_replay_list(c); c 1255 fs/ubifs/replay.c destroy_bud_list(c); c 1256 fs/ubifs/replay.c c->replaying = 0; c 54 fs/ubifs/sb.c static int get_default_compressor(struct ubifs_info *c) c 56 fs/ubifs/sb.c if (ubifs_compr_present(c, UBIFS_COMPR_LZO)) c 59 fs/ubifs/sb.c if (ubifs_compr_present(c, UBIFS_COMPR_ZLIB)) c 72 fs/ubifs/sb.c static int create_default_filesystem(struct ubifs_info *c) c 93 fs/ubifs/sb.c c->key_len = UBIFS_SK_LEN; c 99 fs/ubifs/sb.c if (c->leb_cnt < 0x7FFFFFFF / DEFAULT_JNL_PERCENT) c 101 fs/ubifs/sb.c jnl_lebs = c->leb_cnt * DEFAULT_JNL_PERCENT / 100; c 103 fs/ubifs/sb.c jnl_lebs = (c->leb_cnt / 100) * DEFAULT_JNL_PERCENT; c 107 fs/ubifs/sb.c if (jnl_lebs * c->leb_size > DEFAULT_MAX_JNL) c 108 fs/ubifs/sb.c jnl_lebs = DEFAULT_MAX_JNL / c->leb_size; c 116 fs/ubifs/sb.c tmp = 2 * (c->ref_node_alsz * jnl_lebs) + c->leb_size - 1; c 117 fs/ubifs/sb.c log_lebs = tmp / c->leb_size; c 120 fs/ubifs/sb.c if (c->leb_cnt - min_leb_cnt > 8) { c 137 fs/ubifs/sb.c if (c->leb_cnt - min_leb_cnt > 1) c 145 fs/ubifs/sb.c main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - log_lebs; c 149 fs/ubifs/sb.c c->lsave_cnt = DEFAULT_LSAVE_CNT; c 150 fs/ubifs/sb.c c->max_leb_cnt = c->leb_cnt; c 151 fs/ubifs/sb.c err = ubifs_create_dflt_lpt(c, &main_lebs, lpt_first, &lpt_lebs, c 159 fs/ubifs/sb.c main_first = c->leb_cnt - main_lebs; c 161 fs/ubifs/sb.c sup = kzalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_KERNEL); c 162 fs/ubifs/sb.c mst = kzalloc(c->mst_node_alsz, GFP_KERNEL); c 163 fs/ubifs/sb.c idx_node_size = ubifs_idx_node_sz(c, 1); c 164 fs/ubifs/sb.c idx = kzalloc(ALIGN(idx_node_size, c->min_io_size), GFP_KERNEL); c 165 fs/ubifs/sb.c ino = kzalloc(ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size), GFP_KERNEL); c 166 fs/ubifs/sb.c cs = kzalloc(ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size), GFP_KERNEL); c 175 fs/ubifs/sb.c tmp64 = (long long)max_buds * c->leb_size; c 180 fs/ubifs/sb.c if (ubifs_authenticated(c)) { c 182 fs/ubifs/sb.c sup->hash_algo = cpu_to_le16(c->auth_hash_algo); c 183 fs/ubifs/sb.c err = ubifs_hmac_wkm(c, sup->hmac_wkm); c 193 fs/ubifs/sb.c sup->min_io_size = cpu_to_le32(c->min_io_size); c 194 fs/ubifs/sb.c sup->leb_size = cpu_to_le32(c->leb_size); c 195 fs/ubifs/sb.c sup->leb_cnt = cpu_to_le32(c->leb_cnt); c 196 fs/ubifs/sb.c sup->max_leb_cnt = cpu_to_le32(c->max_leb_cnt); c 203 fs/ubifs/sb.c sup->lsave_cnt = cpu_to_le32(c->lsave_cnt); c 206 fs/ubifs/sb.c if (c->mount_opts.override_compr) c 207 fs/ubifs/sb.c sup->default_compr = cpu_to_le16(c->mount_opts.compr_type); c 209 fs/ubifs/sb.c sup->default_compr = cpu_to_le16(get_default_compressor(c)); c 213 fs/ubifs/sb.c main_bytes = (long long)main_lebs * c->leb_size; c 230 fs/ubifs/sb.c tmp = ubifs_idx_node_sz(c, 1); c 234 fs/ubifs/sb.c mst->ihead_offs = cpu_to_le32(ALIGN(tmp, c->min_io_size)); c 236 fs/ubifs/sb.c mst->lpt_lnum = cpu_to_le32(c->lpt_lnum); c 237 fs/ubifs/sb.c mst->lpt_offs = cpu_to_le32(c->lpt_offs); c 238 fs/ubifs/sb.c mst->nhead_lnum = cpu_to_le32(c->nhead_lnum); c 239 fs/ubifs/sb.c mst->nhead_offs = cpu_to_le32(c->nhead_offs); c 240 fs/ubifs/sb.c mst->ltab_lnum = cpu_to_le32(c->ltab_lnum); c 241 fs/ubifs/sb.c mst->ltab_offs = cpu_to_le32(c->ltab_offs); c 242 fs/ubifs/sb.c mst->lsave_lnum = cpu_to_le32(c->lsave_lnum); c 243 fs/ubifs/sb.c mst->lsave_offs = cpu_to_le32(c->lsave_offs); c 247 fs/ubifs/sb.c mst->leb_cnt = cpu_to_le32(c->leb_cnt); c 248 fs/ubifs/sb.c ubifs_copy_hash(c, hash_lpt, mst->hash_lpt); c 252 fs/ubifs/sb.c tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size); c 253 fs/ubifs/sb.c tmp64 -= ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size); c 256 fs/ubifs/sb.c tmp64 = ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size); c 257 fs/ubifs/sb.c ino_waste = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size) - c 260 fs/ubifs/sb.c tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), 8); c 264 fs/ubifs/sb.c tmp64 = ((long long)(c->main_lebs - 1) * c->dark_wm); c 273 fs/ubifs/sb.c c->key_fmt = UBIFS_SIMPLE_KEY_FMT; c 274 fs/ubifs/sb.c c->key_hash = key_r5_hash; c 278 fs/ubifs/sb.c ino_key_init(c, &key, UBIFS_ROOT_INO); c 279 fs/ubifs/sb.c br = ubifs_idx_branch(c, idx, 0); c 280 fs/ubifs/sb.c key_write_idx(c, &key, &br->key); c 289 fs/ubifs/sb.c ino_key_init_flash(c, &ino->key, UBIFS_ROOT_INO); c 291 fs/ubifs/sb.c ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); c 321 fs/ubifs/sb.c err = ubifs_write_node_hmac(c, sup, UBIFS_SB_NODE_SZ, 0, 0, c 326 fs/ubifs/sb.c err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ, c 331 fs/ubifs/sb.c ubifs_node_calc_hash(c, ino, hash); c 332 fs/ubifs/sb.c ubifs_copy_hash(c, hash, ubifs_branch_hash(c, br)); c 334 fs/ubifs/sb.c err = ubifs_write_node(c, idx, idx_node_size, main_first + DEFAULT_IDX_LEB, 0); c 338 fs/ubifs/sb.c ubifs_node_calc_hash(c, idx, hash); c 339 fs/ubifs/sb.c ubifs_copy_hash(c, hash, mst->hash_root_idx); c 341 fs/ubifs/sb.c err = ubifs_write_node_hmac(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0, c 346 fs/ubifs/sb.c err = ubifs_write_node_hmac(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, c 351 fs/ubifs/sb.c err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0); c 355 fs/ubifs/sb.c ubifs_msg(c, "default file-system created"); c 378 fs/ubifs/sb.c static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup) c 383 fs/ubifs/sb.c if (!c->key_hash) { c 393 fs/ubifs/sb.c if (le32_to_cpu(sup->min_io_size) != c->min_io_size) { c 394 fs/ubifs/sb.c ubifs_err(c, "min. I/O unit mismatch: %d in superblock, %d real", c 395 fs/ubifs/sb.c le32_to_cpu(sup->min_io_size), c->min_io_size); c 399 fs/ubifs/sb.c if (le32_to_cpu(sup->leb_size) != c->leb_size) { c 400 fs/ubifs/sb.c ubifs_err(c, "LEB size mismatch: %d in superblock, %d real", c 401 fs/ubifs/sb.c le32_to_cpu(sup->leb_size), c->leb_size); c 405 fs/ubifs/sb.c if (c->log_lebs < UBIFS_MIN_LOG_LEBS || c 406 fs/ubifs/sb.c c->lpt_lebs < UBIFS_MIN_LPT_LEBS || c 407 fs/ubifs/sb.c c->orph_lebs < UBIFS_MIN_ORPH_LEBS || c 408 fs/ubifs/sb.c c->main_lebs < UBIFS_MIN_MAIN_LEBS) { c 418 fs/ubifs/sb.c min_leb_cnt = UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs; c 419 fs/ubifs/sb.c min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6; c 421 fs/ubifs/sb.c if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) { c 422 fs/ubifs/sb.c ubifs_err(c, "bad LEB count: %d in superblock, %d on UBI volume, %d minimum required", c 423 fs/ubifs/sb.c c->leb_cnt, c->vi.size, min_leb_cnt); c 427 fs/ubifs/sb.c if (c->max_leb_cnt < c->leb_cnt) { c 428 fs/ubifs/sb.c ubifs_err(c, "max. LEB count %d less than LEB count %d", c 429 fs/ubifs/sb.c c->max_leb_cnt, c->leb_cnt); c 433 fs/ubifs/sb.c if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) { c 434 fs/ubifs/sb.c ubifs_err(c, "too few main LEBs count %d, must be at least %d", c 435 fs/ubifs/sb.c c->main_lebs, UBIFS_MIN_MAIN_LEBS); c 439 fs/ubifs/sb.c max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS; c 440 fs/ubifs/sb.c if (c->max_bud_bytes < max_bytes) { c 441 fs/ubifs/sb.c ubifs_err(c, "too small journal (%lld bytes), must be at least %lld bytes", c 442 fs/ubifs/sb.c c->max_bud_bytes, max_bytes); c 446 fs/ubifs/sb.c max_bytes = (long long)c->leb_size * c->main_lebs; c 447 fs/ubifs/sb.c if (c->max_bud_bytes > max_bytes) { c 448 fs/ubifs/sb.c ubifs_err(c, "too large journal size (%lld bytes), only %lld bytes available in the main area", c 449 fs/ubifs/sb.c c->max_bud_bytes, max_bytes); c 453 fs/ubifs/sb.c if (c->jhead_cnt < NONDATA_JHEADS_CNT + 1 || c 454 fs/ubifs/sb.c c->jhead_cnt > NONDATA_JHEADS_CNT + UBIFS_MAX_JHEADS) { c 459 fs/ubifs/sb.c if (c->fanout < UBIFS_MIN_FANOUT || c 460 fs/ubifs/sb.c ubifs_idx_node_sz(c, c->fanout) > c->leb_size) { c 465 fs/ubifs/sb.c if (c->lsave_cnt < 0 || (c->lsave_cnt > DEFAULT_LSAVE_CNT && c 466 fs/ubifs/sb.c c->lsave_cnt > c->max_leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - c 467 fs/ubifs/sb.c c->log_lebs - c->lpt_lebs - c->orph_lebs)) { c 472 fs/ubifs/sb.c if (UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs + c->lpt_lebs + c 473 fs/ubifs/sb.c c->orph_lebs + c->main_lebs != c->leb_cnt) { c 478 fs/ubifs/sb.c if (c->default_compr >= UBIFS_COMPR_TYPES_CNT) { c 483 fs/ubifs/sb.c if (c->rp_size < 0 || max_bytes < c->rp_size) { c 494 fs/ubifs/sb.c if (!c->double_hash && c->fmt_version >= 5) { c 499 fs/ubifs/sb.c if (c->encrypted && c->fmt_version < 5) { c 507 fs/ubifs/sb.c ubifs_err(c, "bad superblock, error %d", err); c 508 fs/ubifs/sb.c ubifs_dump_node(c, sup); c 520 fs/ubifs/sb.c static struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c) c 525 fs/ubifs/sb.c sup = kmalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_NOFS); c 529 fs/ubifs/sb.c err = ubifs_read_node(c, sup, UBIFS_SB_NODE, UBIFS_SB_NODE_SZ, c 539 fs/ubifs/sb.c static int authenticate_sb_node(struct ubifs_info *c, c 548 fs/ubifs/sb.c if (c->authenticated && !authenticated) { c 549 fs/ubifs/sb.c ubifs_err(c, "authenticated FS forced, but found FS without authentication"); c 553 fs/ubifs/sb.c if (!c->authenticated && authenticated) { c 554 fs/ubifs/sb.c ubifs_err(c, "authenticated FS found, but no key given"); c 558 fs/ubifs/sb.c ubifs_msg(c, "Mounting in %sauthenticated mode", c 559 fs/ubifs/sb.c c->authenticated ? "" : "un"); c 561 fs/ubifs/sb.c if (!c->authenticated) c 569 fs/ubifs/sb.c ubifs_err(c, "superblock uses unknown hash algo %d", c 574 fs/ubifs/sb.c if (strcmp(hash_algo_name[hash_algo], c->auth_hash_name)) { c 575 fs/ubifs/sb.c ubifs_err(c, "This filesystem uses %s for hashing," c 577 fs/ubifs/sb.c c->auth_hash_name); c 586 fs/ubifs/sb.c if (ubifs_hmac_zero(c, sup->hmac)) { c 587 fs/ubifs/sb.c err = ubifs_sb_verify_signature(c, sup); c 589 fs/ubifs/sb.c err = ubifs_hmac_wkm(c, hmac_wkm); c 592 fs/ubifs/sb.c if (ubifs_check_hmac(c, hmac_wkm, sup->hmac_wkm)) { c 593 fs/ubifs/sb.c ubifs_err(c, "provided key does not fit"); c 596 fs/ubifs/sb.c err = ubifs_node_verify_hmac(c, sup, sizeof(*sup), c 602 fs/ubifs/sb.c ubifs_err(c, "Failed to authenticate superblock: %d", err); c 614 fs/ubifs/sb.c int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup) c 616 fs/ubifs/sb.c int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size); c 619 fs/ubifs/sb.c err = ubifs_prepare_node_hmac(c, sup, UBIFS_SB_NODE_SZ, c 624 fs/ubifs/sb.c return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len); c 635 fs/ubifs/sb.c int ubifs_read_superblock(struct ubifs_info *c) c 640 fs/ubifs/sb.c if (c->empty) { c 641 fs/ubifs/sb.c err = create_default_filesystem(c); c 646 fs/ubifs/sb.c sup = ubifs_read_sb_node(c); c 650 fs/ubifs/sb.c c->sup_node = sup; c 652 fs/ubifs/sb.c c->fmt_version = le32_to_cpu(sup->fmt_version); c 653 fs/ubifs/sb.c c->ro_compat_version = le32_to_cpu(sup->ro_compat_version); c 659 fs/ubifs/sb.c if (c->fmt_version > UBIFS_FORMAT_VERSION) { c 660 fs/ubifs/sb.c ubifs_assert(c, !c->ro_media || c->ro_mount); c 661 fs/ubifs/sb.c if (!c->ro_mount || c 662 fs/ubifs/sb.c c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) { c 663 fs/ubifs/sb.c ubifs_err(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", c 664 fs/ubifs/sb.c c->fmt_version, c->ro_compat_version, c 667 fs/ubifs/sb.c if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) { c 668 fs/ubifs/sb.c ubifs_msg(c, "only R/O mounting is possible"); c 680 fs/ubifs/sb.c c->rw_incompat = 1; c 683 fs/ubifs/sb.c if (c->fmt_version < 3) { c 684 fs/ubifs/sb.c ubifs_err(c, "on-flash format version %d is not supported", c 685 fs/ubifs/sb.c c->fmt_version); c 692 fs/ubifs/sb.c c->key_hash = key_r5_hash; c 693 fs/ubifs/sb.c c->key_hash_type = UBIFS_KEY_HASH_R5; c 697 fs/ubifs/sb.c c->key_hash = key_test_hash; c 698 fs/ubifs/sb.c c->key_hash_type = UBIFS_KEY_HASH_TEST; c 702 fs/ubifs/sb.c c->key_fmt = sup->key_fmt; c 704 fs/ubifs/sb.c switch (c->key_fmt) { c 706 fs/ubifs/sb.c c->key_len = UBIFS_SK_LEN; c 709 fs/ubifs/sb.c ubifs_err(c, "unsupported key format"); c 714 fs/ubifs/sb.c c->leb_cnt = le32_to_cpu(sup->leb_cnt); c 715 fs/ubifs/sb.c c->max_leb_cnt = le32_to_cpu(sup->max_leb_cnt); c 716 fs/ubifs/sb.c c->max_bud_bytes = le64_to_cpu(sup->max_bud_bytes); c 717 fs/ubifs/sb.c c->log_lebs = le32_to_cpu(sup->log_lebs); c 718 fs/ubifs/sb.c c->lpt_lebs = le32_to_cpu(sup->lpt_lebs); c 719 fs/ubifs/sb.c c->orph_lebs = le32_to_cpu(sup->orph_lebs); c 720 fs/ubifs/sb.c c->jhead_cnt = le32_to_cpu(sup->jhead_cnt) + NONDATA_JHEADS_CNT; c 721 fs/ubifs/sb.c c->fanout = le32_to_cpu(sup->fanout); c 722 fs/ubifs/sb.c c->lsave_cnt = le32_to_cpu(sup->lsave_cnt); c 723 fs/ubifs/sb.c c->rp_size = le64_to_cpu(sup->rp_size); c 724 fs/ubifs/sb.c c->rp_uid = make_kuid(&init_user_ns, le32_to_cpu(sup->rp_uid)); c 725 fs/ubifs/sb.c c->rp_gid = make_kgid(&init_user_ns, le32_to_cpu(sup->rp_gid)); c 727 fs/ubifs/sb.c if (!c->mount_opts.override_compr) c 728 fs/ubifs/sb.c c->default_compr = le16_to_cpu(sup->default_compr); c 730 fs/ubifs/sb.c c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran); c 731 fs/ubifs/sb.c memcpy(&c->uuid, &sup->uuid, 16); c 732 fs/ubifs/sb.c c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT); c 733 fs/ubifs/sb.c c->space_fixup = !!(sup_flags & UBIFS_FLG_SPACE_FIXUP); c 734 fs/ubifs/sb.c c->double_hash = !!(sup_flags & UBIFS_FLG_DOUBLE_HASH); c 735 fs/ubifs/sb.c c->encrypted = !!(sup_flags & UBIFS_FLG_ENCRYPTION); c 737 fs/ubifs/sb.c err = authenticate_sb_node(c, sup); c 742 fs/ubifs/sb.c ubifs_err(c, "Unknown feature flags found: %#x", c 748 fs/ubifs/sb.c if (!IS_ENABLED(CONFIG_FS_ENCRYPTION) && c->encrypted) { c 749 fs/ubifs/sb.c ubifs_err(c, "file system contains encrypted files but UBIFS" c 756 fs/ubifs/sb.c if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) { c 757 fs/ubifs/sb.c int old_leb_cnt = c->leb_cnt; c 759 fs/ubifs/sb.c c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size); c 760 fs/ubifs/sb.c sup->leb_cnt = cpu_to_le32(c->leb_cnt); c 762 fs/ubifs/sb.c c->superblock_need_write = 1; c 765 fs/ubifs/sb.c old_leb_cnt, c->leb_cnt); c 768 fs/ubifs/sb.c c->log_bytes = (long long)c->log_lebs * c->leb_size; c 769 fs/ubifs/sb.c c->log_last = UBIFS_LOG_LNUM + c->log_lebs - 1; c 770 fs/ubifs/sb.c c->lpt_first = UBIFS_LOG_LNUM + c->log_lebs; c 771 fs/ubifs/sb.c c->lpt_last = c->lpt_first + c->lpt_lebs - 1; c 772 fs/ubifs/sb.c c->orph_first = c->lpt_last + 1; c 773 fs/ubifs/sb.c c->orph_last = c->orph_first + c->orph_lebs - 1; c 774 fs/ubifs/sb.c c->main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS; c 775 fs/ubifs/sb.c c->main_lebs -= c->log_lebs + c->lpt_lebs + c->orph_lebs; c 776 fs/ubifs/sb.c c->main_first = c->leb_cnt - c->main_lebs; c 778 fs/ubifs/sb.c err = validate_sb(c, sup); c 794 fs/ubifs/sb.c static int fixup_leb(struct ubifs_info *c, int lnum, int len) c 798 fs/ubifs/sb.c ubifs_assert(c, len >= 0); c 799 fs/ubifs/sb.c ubifs_assert(c, len % c->min_io_size == 0); c 800 fs/ubifs/sb.c ubifs_assert(c, len < c->leb_size); c 804 fs/ubifs/sb.c return ubifs_leb_unmap(c, lnum); c 808 fs/ubifs/sb.c err = ubifs_leb_read(c, lnum, c->sbuf, 0, len, 1); c 812 fs/ubifs/sb.c return ubifs_leb_change(c, lnum, c->sbuf, len); c 822 fs/ubifs/sb.c static int fixup_free_space(struct ubifs_info *c) c 827 fs/ubifs/sb.c ubifs_get_lprops(c); c 831 fs/ubifs/sb.c err = fixup_leb(c, lnum, c->mst_offs + c->mst_node_alsz); c 837 fs/ubifs/sb.c lnum = ubifs_next_log_lnum(c, c->lhead_lnum); c 838 fs/ubifs/sb.c while (lnum != c->ltail_lnum) { c 839 fs/ubifs/sb.c err = fixup_leb(c, lnum, 0); c 842 fs/ubifs/sb.c lnum = ubifs_next_log_lnum(c, lnum); c 849 fs/ubifs/sb.c err = fixup_leb(c, c->lhead_lnum, c 850 fs/ubifs/sb.c ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size)); c 855 fs/ubifs/sb.c for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) { c 856 fs/ubifs/sb.c int free = c->ltab[lnum - c->lpt_first].free; c 859 fs/ubifs/sb.c err = fixup_leb(c, lnum, c->leb_size - free); c 866 fs/ubifs/sb.c for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { c 867 fs/ubifs/sb.c err = fixup_leb(c, lnum, 0); c 873 fs/ubifs/sb.c for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) { c 874 fs/ubifs/sb.c lprops = ubifs_lpt_lookup(c, lnum); c 881 fs/ubifs/sb.c err = fixup_leb(c, lnum, c->leb_size - lprops->free); c 888 fs/ubifs/sb.c ubifs_release_lprops(c); c 905 fs/ubifs/sb.c int ubifs_fixup_free_space(struct ubifs_info *c) c 908 fs/ubifs/sb.c struct ubifs_sb_node *sup = c->sup_node; c 910 fs/ubifs/sb.c ubifs_assert(c, c->space_fixup); c 911 fs/ubifs/sb.c ubifs_assert(c, !c->ro_mount); c 913 fs/ubifs/sb.c ubifs_msg(c, "start fixing up free space"); c 915 fs/ubifs/sb.c err = fixup_free_space(c); c 920 fs/ubifs/sb.c c->space_fixup = 0; c 923 fs/ubifs/sb.c c->superblock_need_write = 1; c 925 fs/ubifs/sb.c ubifs_msg(c, "free space fixup complete"); c 929 fs/ubifs/sb.c int ubifs_enable_encryption(struct ubifs_info *c) c 932 fs/ubifs/sb.c struct ubifs_sb_node *sup = c->sup_node; c 937 fs/ubifs/sb.c if (c->encrypted) c 940 fs/ubifs/sb.c if (c->ro_mount || c->ro_media) c 943 fs/ubifs/sb.c if (c->fmt_version < 5) { c 944 fs/ubifs/sb.c ubifs_err(c, "on-flash format version 5 is needed for encryption"); c 950 fs/ubifs/sb.c err = ubifs_write_sb_node(c, sup); c 952 fs/ubifs/sb.c c->encrypted = 1; c 57 fs/ubifs/scan.c int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, c 79 fs/ubifs/scan.c if (ubifs_check_node(c, buf, lnum, offs, quiet, 1)) c 89 fs/ubifs/scan.c offs + node_len + pad_len > c->leb_size) { c 91 fs/ubifs/scan.c ubifs_err(c, "bad pad node at LEB %d:%d", c 93 fs/ubifs/scan.c ubifs_dump_node(c, pad); c 101 fs/ubifs/scan.c ubifs_err(c, "bad padding length %d - %d", c 125 fs/ubifs/scan.c struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, c 141 fs/ubifs/scan.c err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0); c 143 fs/ubifs/scan.c ubifs_err(c, "cannot read %d bytes from LEB %d:%d, error %d", c 144 fs/ubifs/scan.c c->leb_size - offs, lnum, offs, err); c 163 fs/ubifs/scan.c void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 167 fs/ubifs/scan.c ubifs_assert(c, offs % c->min_io_size == 0); c 169 fs/ubifs/scan.c sleb->endpt = ALIGN(offs, c->min_io_size); c 181 fs/ubifs/scan.c int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 207 fs/ubifs/scan.c key_read(c, &ino->key, &snod->key); c 210 fs/ubifs/scan.c invalid_key_init(c, &snod->key); c 225 fs/ubifs/scan.c void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, c 230 fs/ubifs/scan.c ubifs_err(c, "corruption at LEB %d:%d", lnum, offs); c 231 fs/ubifs/scan.c len = c->leb_size - offs; c 234 fs/ubifs/scan.c ubifs_err(c, "first %d bytes from LEB %d:%d", len, lnum, offs); c 254 fs/ubifs/scan.c struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, c 258 fs/ubifs/scan.c int err, len = c->leb_size - offs; c 261 fs/ubifs/scan.c sleb = ubifs_start_scan(c, lnum, offs, sbuf); c 274 fs/ubifs/scan.c ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet); c 289 fs/ubifs/scan.c ubifs_err(c, "garbage"); c 295 fs/ubifs/scan.c ubifs_err(c, "bad node"); c 298 fs/ubifs/scan.c ubifs_err(c, "unknown"); c 303 fs/ubifs/scan.c err = ubifs_add_snod(c, sleb, buf, offs); c 313 fs/ubifs/scan.c if (offs % c->min_io_size) { c 315 fs/ubifs/scan.c ubifs_err(c, "empty space starts at non-aligned offset %d", c 320 fs/ubifs/scan.c ubifs_end_scan(c, sleb, lnum, offs); c 328 fs/ubifs/scan.c ubifs_err(c, "corrupt empty space at LEB %d:%d", c 337 fs/ubifs/scan.c ubifs_scanned_corruption(c, lnum, offs, buf); c 338 fs/ubifs/scan.c ubifs_err(c, "LEB %d scanning failed", lnum); c 345 fs/ubifs/scan.c ubifs_err(c, "LEB %d scanning failed, error %d", lnum, err); c 58 fs/ubifs/shrinker.c static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention) c 64 fs/ubifs/shrinker.c ubifs_assert(c, mutex_is_locked(&c->umount_mutex)); c 65 fs/ubifs/shrinker.c ubifs_assert(c, mutex_is_locked(&c->tnc_mutex)); c 67 fs/ubifs/shrinker.c if (!c->zroot.znode || atomic_long_read(&c->clean_zn_cnt) == 0) c 80 fs/ubifs/shrinker.c znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); c 82 fs/ubifs/shrinker.c atomic_long_read(&c->clean_zn_cnt) > 0) { c 114 fs/ubifs/shrinker.c c->zroot.znode = NULL; c 116 fs/ubifs/shrinker.c freed = ubifs_destroy_tnc_subtree(c, znode); c 118 fs/ubifs/shrinker.c atomic_long_sub(freed, &c->clean_zn_cnt); c 123 fs/ubifs/shrinker.c if (unlikely(!c->zroot.znode)) c 127 fs/ubifs/shrinker.c znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); c 146 fs/ubifs/shrinker.c struct ubifs_info *c; c 158 fs/ubifs/shrinker.c c = list_entry(p, struct ubifs_info, infos_list); c 163 fs/ubifs/shrinker.c if (c->shrinker_run_no == run_no) c 165 fs/ubifs/shrinker.c if (!mutex_trylock(&c->umount_mutex)) { c 175 fs/ubifs/shrinker.c if (!mutex_trylock(&c->tnc_mutex)) { c 176 fs/ubifs/shrinker.c mutex_unlock(&c->umount_mutex); c 186 fs/ubifs/shrinker.c c->shrinker_run_no = run_no; c 187 fs/ubifs/shrinker.c freed += shrink_tnc(c, nr, age, contention); c 188 fs/ubifs/shrinker.c mutex_unlock(&c->tnc_mutex); c 196 fs/ubifs/shrinker.c list_move_tail(&c->infos_list, &ubifs_infos); c 197 fs/ubifs/shrinker.c mutex_unlock(&c->umount_mutex); c 216 fs/ubifs/shrinker.c struct ubifs_info *c; c 225 fs/ubifs/shrinker.c list_for_each_entry(c, &ubifs_infos, infos_list) { c 228 fs/ubifs/shrinker.c if (!mutex_trylock(&c->umount_mutex)) { c 237 fs/ubifs/shrinker.c dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt); c 239 fs/ubifs/shrinker.c if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN || c 240 fs/ubifs/shrinker.c c->ro_mount || c->ro_error) { c 241 fs/ubifs/shrinker.c mutex_unlock(&c->umount_mutex); c 245 fs/ubifs/shrinker.c if (c->cmt_state != COMMIT_RESTING) { c 247 fs/ubifs/shrinker.c mutex_unlock(&c->umount_mutex); c 252 fs/ubifs/shrinker.c list_move_tail(&c->infos_list, &ubifs_infos); c 255 fs/ubifs/shrinker.c ubifs_request_bg_commit(c); c 256 fs/ubifs/shrinker.c mutex_unlock(&c->umount_mutex); c 259 fs/ubifs/shrinker.c mutex_unlock(&c->umount_mutex); c 55 fs/ubifs/super.c static int validate_inode(struct ubifs_info *c, const struct inode *inode) c 60 fs/ubifs/super.c if (inode->i_size > c->max_inode_sz) { c 61 fs/ubifs/super.c ubifs_err(c, "inode is too large (%lld)", c 67 fs/ubifs/super.c ubifs_err(c, "unknown compression type %d", ui->compr_type); c 80 fs/ubifs/super.c if (!ubifs_compr_present(c, ui->compr_type)) { c 81 fs/ubifs/super.c ubifs_warn(c, "inode %lu uses '%s' compression, but it was not compiled in", c 82 fs/ubifs/super.c inode->i_ino, ubifs_compr_name(c, ui->compr_type)); c 85 fs/ubifs/super.c err = dbg_check_dir(c, inode); c 94 fs/ubifs/super.c struct ubifs_info *c = sb->s_fs_info; c 113 fs/ubifs/super.c ino_key_init(c, &key, inode->i_ino); c 115 fs/ubifs/super.c err = ubifs_tnc_lookup(c, &key, ino); c 147 fs/ubifs/super.c err = validate_inode(c, inode); c 237 fs/ubifs/super.c ubifs_err(c, "inode %lu validation failed, error %d", inode->i_ino, err); c 238 fs/ubifs/super.c ubifs_dump_node(c, ino); c 239 fs/ubifs/super.c ubifs_dump_inode(c, inode); c 244 fs/ubifs/super.c ubifs_err(c, "failed to read inode %lu, error %d", inode->i_ino, err); c 280 fs/ubifs/super.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 283 fs/ubifs/super.c ubifs_assert(c, !ui->xattr); c 307 fs/ubifs/super.c err = ubifs_jnl_write_inode(c, inode); c 309 fs/ubifs/super.c ubifs_err(c, "can't write inode %lu, error %d", c 312 fs/ubifs/super.c err = dbg_check_inode_size(c, inode, ui->ui_size); c 317 fs/ubifs/super.c ubifs_release_dirty_inode_budget(c, ui); c 334 fs/ubifs/super.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 346 fs/ubifs/super.c ubifs_assert(c, !atomic_read(&inode->i_count)); c 357 fs/ubifs/super.c err = ubifs_jnl_delete_inode(c, inode); c 363 fs/ubifs/super.c ubifs_err(c, "can't delete inode %lu, error %d", c 368 fs/ubifs/super.c ubifs_release_dirty_inode_budget(c, ui); c 371 fs/ubifs/super.c c->bi.nospace = c->bi.nospace_rp = 0; c 381 fs/ubifs/super.c struct ubifs_info *c = inode->i_sb->s_fs_info; c 384 fs/ubifs/super.c ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); c 393 fs/ubifs/super.c struct ubifs_info *c = dentry->d_sb->s_fs_info; c 395 fs/ubifs/super.c __le32 *uuid = (__le32 *)c->uuid; c 397 fs/ubifs/super.c free = ubifs_get_free_space(c); c 403 fs/ubifs/super.c buf->f_blocks = c->block_cnt; c 405 fs/ubifs/super.c if (free > c->report_rp_size) c 406 fs/ubifs/super.c buf->f_bavail = (free - c->report_rp_size) >> UBIFS_BLOCK_SHIFT; c 414 fs/ubifs/super.c ubifs_assert(c, buf->f_bfree <= c->block_cnt); c 420 fs/ubifs/super.c struct ubifs_info *c = root->d_sb->s_fs_info; c 422 fs/ubifs/super.c if (c->mount_opts.unmount_mode == 2) c 424 fs/ubifs/super.c else if (c->mount_opts.unmount_mode == 1) c 427 fs/ubifs/super.c if (c->mount_opts.bulk_read == 2) c 429 fs/ubifs/super.c else if (c->mount_opts.bulk_read == 1) c 432 fs/ubifs/super.c if (c->mount_opts.chk_data_crc == 2) c 434 fs/ubifs/super.c else if (c->mount_opts.chk_data_crc == 1) c 437 fs/ubifs/super.c if (c->mount_opts.override_compr) { c 439 fs/ubifs/super.c ubifs_compr_name(c, c->mount_opts.compr_type)); c 442 fs/ubifs/super.c seq_printf(s, ",assert=%s", ubifs_assert_action_name(c)); c 443 fs/ubifs/super.c seq_printf(s, ",ubi=%d,vol=%d", c->vi.ubi_num, c->vi.vol_id); c 451 fs/ubifs/super.c struct ubifs_info *c = sb->s_fs_info; c 465 fs/ubifs/super.c for (i = 0; i < c->jhead_cnt; i++) { c 466 fs/ubifs/super.c err = ubifs_wbuf_sync(&c->jheads[i].wbuf); c 478 fs/ubifs/super.c err = ubifs_run_commit(c); c 482 fs/ubifs/super.c return ubi_sync(c->vi.ubi_num); c 494 fs/ubifs/super.c static int init_constants_early(struct ubifs_info *c) c 496 fs/ubifs/super.c if (c->vi.corrupted) { c 497 fs/ubifs/super.c ubifs_warn(c, "UBI volume is corrupted - read-only mode"); c 498 fs/ubifs/super.c c->ro_media = 1; c 501 fs/ubifs/super.c if (c->di.ro_mode) { c 502 fs/ubifs/super.c ubifs_msg(c, "read-only UBI device"); c 503 fs/ubifs/super.c c->ro_media = 1; c 506 fs/ubifs/super.c if (c->vi.vol_type == UBI_STATIC_VOLUME) { c 507 fs/ubifs/super.c ubifs_msg(c, "static UBI volume - read-only mode"); c 508 fs/ubifs/super.c c->ro_media = 1; c 511 fs/ubifs/super.c c->leb_cnt = c->vi.size; c 512 fs/ubifs/super.c c->leb_size = c->vi.usable_leb_size; c 513 fs/ubifs/super.c c->leb_start = c->di.leb_start; c 514 fs/ubifs/super.c c->half_leb_size = c->leb_size / 2; c 515 fs/ubifs/super.c c->min_io_size = c->di.min_io_size; c 516 fs/ubifs/super.c c->min_io_shift = fls(c->min_io_size) - 1; c 517 fs/ubifs/super.c c->max_write_size = c->di.max_write_size; c 518 fs/ubifs/super.c c->max_write_shift = fls(c->max_write_size) - 1; c 520 fs/ubifs/super.c if (c->leb_size < UBIFS_MIN_LEB_SZ) { c 521 fs/ubifs/super.c ubifs_errc(c, "too small LEBs (%d bytes), min. is %d bytes", c 522 fs/ubifs/super.c c->leb_size, UBIFS_MIN_LEB_SZ); c 526 fs/ubifs/super.c if (c->leb_cnt < UBIFS_MIN_LEB_CNT) { c 527 fs/ubifs/super.c ubifs_errc(c, "too few LEBs (%d), min. is %d", c 528 fs/ubifs/super.c c->leb_cnt, UBIFS_MIN_LEB_CNT); c 532 fs/ubifs/super.c if (!is_power_of_2(c->min_io_size)) { c 533 fs/ubifs/super.c ubifs_errc(c, "bad min. I/O size %d", c->min_io_size); c 541 fs/ubifs/super.c if (c->max_write_size < c->min_io_size || c 542 fs/ubifs/super.c c->max_write_size % c->min_io_size || c 543 fs/ubifs/super.c !is_power_of_2(c->max_write_size)) { c 544 fs/ubifs/super.c ubifs_errc(c, "bad write buffer size %d for %d min. I/O unit", c 545 fs/ubifs/super.c c->max_write_size, c->min_io_size); c 554 fs/ubifs/super.c if (c->min_io_size < 8) { c 555 fs/ubifs/super.c c->min_io_size = 8; c 556 fs/ubifs/super.c c->min_io_shift = 3; c 557 fs/ubifs/super.c if (c->max_write_size < c->min_io_size) { c 558 fs/ubifs/super.c c->max_write_size = c->min_io_size; c 559 fs/ubifs/super.c c->max_write_shift = c->min_io_shift; c 563 fs/ubifs/super.c c->ref_node_alsz = ALIGN(UBIFS_REF_NODE_SZ, c->min_io_size); c 564 fs/ubifs/super.c c->mst_node_alsz = ALIGN(UBIFS_MST_NODE_SZ, c->min_io_size); c 570 fs/ubifs/super.c c->ranges[UBIFS_PAD_NODE].len = UBIFS_PAD_NODE_SZ; c 571 fs/ubifs/super.c c->ranges[UBIFS_SB_NODE].len = UBIFS_SB_NODE_SZ; c 572 fs/ubifs/super.c c->ranges[UBIFS_MST_NODE].len = UBIFS_MST_NODE_SZ; c 573 fs/ubifs/super.c c->ranges[UBIFS_REF_NODE].len = UBIFS_REF_NODE_SZ; c 574 fs/ubifs/super.c c->ranges[UBIFS_TRUN_NODE].len = UBIFS_TRUN_NODE_SZ; c 575 fs/ubifs/super.c c->ranges[UBIFS_CS_NODE].len = UBIFS_CS_NODE_SZ; c 576 fs/ubifs/super.c c->ranges[UBIFS_AUTH_NODE].min_len = UBIFS_AUTH_NODE_SZ; c 577 fs/ubifs/super.c c->ranges[UBIFS_AUTH_NODE].max_len = UBIFS_AUTH_NODE_SZ + c 579 fs/ubifs/super.c c->ranges[UBIFS_SIG_NODE].min_len = UBIFS_SIG_NODE_SZ; c 580 fs/ubifs/super.c c->ranges[UBIFS_SIG_NODE].max_len = c->leb_size - UBIFS_SB_NODE_SZ; c 582 fs/ubifs/super.c c->ranges[UBIFS_INO_NODE].min_len = UBIFS_INO_NODE_SZ; c 583 fs/ubifs/super.c c->ranges[UBIFS_INO_NODE].max_len = UBIFS_MAX_INO_NODE_SZ; c 584 fs/ubifs/super.c c->ranges[UBIFS_ORPH_NODE].min_len = c 586 fs/ubifs/super.c c->ranges[UBIFS_ORPH_NODE].max_len = c->leb_size; c 587 fs/ubifs/super.c c->ranges[UBIFS_DENT_NODE].min_len = UBIFS_DENT_NODE_SZ; c 588 fs/ubifs/super.c c->ranges[UBIFS_DENT_NODE].max_len = UBIFS_MAX_DENT_NODE_SZ; c 589 fs/ubifs/super.c c->ranges[UBIFS_XENT_NODE].min_len = UBIFS_XENT_NODE_SZ; c 590 fs/ubifs/super.c c->ranges[UBIFS_XENT_NODE].max_len = UBIFS_MAX_XENT_NODE_SZ; c 591 fs/ubifs/super.c c->ranges[UBIFS_DATA_NODE].min_len = UBIFS_DATA_NODE_SZ; c 592 fs/ubifs/super.c c->ranges[UBIFS_DATA_NODE].max_len = UBIFS_MAX_DATA_NODE_SZ; c 597 fs/ubifs/super.c c->ranges[UBIFS_IDX_NODE].min_len = UBIFS_IDX_NODE_SZ + UBIFS_BRANCH_SZ; c 602 fs/ubifs/super.c c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX; c 608 fs/ubifs/super.c c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size); c 609 fs/ubifs/super.c c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size); c 616 fs/ubifs/super.c c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; c 619 fs/ubifs/super.c c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; c 620 fs/ubifs/super.c if (c->max_bu_buf_len > c->leb_size) c 621 fs/ubifs/super.c c->max_bu_buf_len = c->leb_size; c 624 fs/ubifs/super.c c->min_log_bytes = c->leb_size; c 644 fs/ubifs/super.c static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) c 646 fs/ubifs/super.c return ubifs_update_one_lp(c, lnum, free, pad, 0, 0); c 658 fs/ubifs/super.c static int init_constants_sb(struct ubifs_info *c) c 663 fs/ubifs/super.c c->main_bytes = (long long)c->main_lebs * c->leb_size; c 664 fs/ubifs/super.c c->max_znode_sz = sizeof(struct ubifs_znode) + c 665 fs/ubifs/super.c c->fanout * sizeof(struct ubifs_zbranch); c 667 fs/ubifs/super.c tmp = ubifs_idx_node_sz(c, 1); c 668 fs/ubifs/super.c c->ranges[UBIFS_IDX_NODE].min_len = tmp; c 669 fs/ubifs/super.c c->min_idx_node_sz = ALIGN(tmp, 8); c 671 fs/ubifs/super.c tmp = ubifs_idx_node_sz(c, c->fanout); c 672 fs/ubifs/super.c c->ranges[UBIFS_IDX_NODE].max_len = tmp; c 673 fs/ubifs/super.c c->max_idx_node_sz = ALIGN(tmp, 8); c 676 fs/ubifs/super.c tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt; c 677 fs/ubifs/super.c tmp = ALIGN(tmp, c->min_io_size); c 678 fs/ubifs/super.c if (tmp > c->leb_size) { c 679 fs/ubifs/super.c ubifs_err(c, "too small LEB size %d, at least %d needed", c 680 fs/ubifs/super.c c->leb_size, tmp); c 688 fs/ubifs/super.c tmp64 = c->max_bud_bytes + c->leb_size - 1; c 689 fs/ubifs/super.c c->max_bud_cnt = div_u64(tmp64, c->leb_size); c 690 fs/ubifs/super.c tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1); c 691 fs/ubifs/super.c tmp /= c->leb_size; c 693 fs/ubifs/super.c if (c->log_lebs < tmp) { c 694 fs/ubifs/super.c ubifs_err(c, "too small log %d LEBs, required min. %d LEBs", c 695 fs/ubifs/super.c c->log_lebs, tmp); c 706 fs/ubifs/super.c c->bi.page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE; c 707 fs/ubifs/super.c c->bi.inode_budget = UBIFS_INO_NODE_SZ; c 708 fs/ubifs/super.c c->bi.dent_budget = UBIFS_MAX_DENT_NODE_SZ; c 717 fs/ubifs/super.c c->bg_bud_bytes = (c->max_bud_bytes * 13) >> 4; c 725 fs/ubifs/super.c tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1; c 726 fs/ubifs/super.c if (c->bg_bud_bytes < tmp64) c 727 fs/ubifs/super.c c->bg_bud_bytes = tmp64; c 728 fs/ubifs/super.c if (c->max_bud_bytes < tmp64 + c->leb_size) c 729 fs/ubifs/super.c c->max_bud_bytes = tmp64 + c->leb_size; c 731 fs/ubifs/super.c err = ubifs_calc_lpt_geom(c); c 736 fs/ubifs/super.c c->idx_leb_size = c->leb_size - c->max_idx_node_sz; c 748 fs/ubifs/super.c static void init_constants_master(struct ubifs_info *c) c 752 fs/ubifs/super.c c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); c 753 fs/ubifs/super.c c->report_rp_size = ubifs_reported_space(c, c->rp_size); c 764 fs/ubifs/super.c tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; c 765 fs/ubifs/super.c tmp64 *= (long long)c->leb_size - c->leb_overhead; c 766 fs/ubifs/super.c tmp64 = ubifs_reported_space(c, tmp64); c 767 fs/ubifs/super.c c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; c 781 fs/ubifs/super.c static int take_gc_lnum(struct ubifs_info *c) c 785 fs/ubifs/super.c if (c->gc_lnum == -1) { c 786 fs/ubifs/super.c ubifs_err(c, "no LEB for GC"); c 791 fs/ubifs/super.c err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0, c 803 fs/ubifs/super.c static int alloc_wbufs(struct ubifs_info *c) c 807 fs/ubifs/super.c c->jheads = kcalloc(c->jhead_cnt, sizeof(struct ubifs_jhead), c 809 fs/ubifs/super.c if (!c->jheads) c 813 fs/ubifs/super.c for (i = 0; i < c->jhead_cnt; i++) { c 814 fs/ubifs/super.c INIT_LIST_HEAD(&c->jheads[i].buds_list); c 815 fs/ubifs/super.c err = ubifs_wbuf_init(c, &c->jheads[i].wbuf); c 819 fs/ubifs/super.c c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; c 820 fs/ubifs/super.c c->jheads[i].wbuf.jhead = i; c 821 fs/ubifs/super.c c->jheads[i].grouped = 1; c 822 fs/ubifs/super.c c->jheads[i].log_hash = ubifs_hash_get_desc(c); c 823 fs/ubifs/super.c if (IS_ERR(c->jheads[i].log_hash)) c 831 fs/ubifs/super.c c->jheads[GCHD].wbuf.no_timer = 1; c 832 fs/ubifs/super.c c->jheads[GCHD].grouped = 0; c 838 fs/ubifs/super.c kfree(c->jheads[i].log_hash); c 847 fs/ubifs/super.c static void free_wbufs(struct ubifs_info *c) c 851 fs/ubifs/super.c if (c->jheads) { c 852 fs/ubifs/super.c for (i = 0; i < c->jhead_cnt; i++) { c 853 fs/ubifs/super.c kfree(c->jheads[i].wbuf.buf); c 854 fs/ubifs/super.c kfree(c->jheads[i].wbuf.inodes); c 855 fs/ubifs/super.c kfree(c->jheads[i].log_hash); c 857 fs/ubifs/super.c kfree(c->jheads); c 858 fs/ubifs/super.c c->jheads = NULL; c 866 fs/ubifs/super.c static void free_orphans(struct ubifs_info *c) c 870 fs/ubifs/super.c while (c->orph_dnext) { c 871 fs/ubifs/super.c orph = c->orph_dnext; c 872 fs/ubifs/super.c c->orph_dnext = orph->dnext; c 877 fs/ubifs/super.c while (!list_empty(&c->orph_list)) { c 878 fs/ubifs/super.c orph = list_entry(c->orph_list.next, struct ubifs_orphan, list); c 881 fs/ubifs/super.c ubifs_err(c, "orphan list not empty at unmount"); c 884 fs/ubifs/super.c vfree(c->orph_buf); c 885 fs/ubifs/super.c c->orph_buf = NULL; c 892 fs/ubifs/super.c static void free_buds(struct ubifs_info *c) c 896 fs/ubifs/super.c rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb) c 909 fs/ubifs/super.c static int check_volume_empty(struct ubifs_info *c) c 913 fs/ubifs/super.c c->empty = 1; c 914 fs/ubifs/super.c for (lnum = 0; lnum < c->leb_cnt; lnum++) { c 915 fs/ubifs/super.c err = ubifs_is_mapped(c, lnum); c 919 fs/ubifs/super.c c->empty = 0; c 1006 fs/ubifs/super.c static int ubifs_parse_options(struct ubifs_info *c, char *options, c 1029 fs/ubifs/super.c c->mount_opts.unmount_mode = 2; c 1032 fs/ubifs/super.c c->mount_opts.unmount_mode = 1; c 1035 fs/ubifs/super.c c->mount_opts.bulk_read = 2; c 1036 fs/ubifs/super.c c->bulk_read = 1; c 1039 fs/ubifs/super.c c->mount_opts.bulk_read = 1; c 1040 fs/ubifs/super.c c->bulk_read = 0; c 1043 fs/ubifs/super.c c->mount_opts.chk_data_crc = 2; c 1044 fs/ubifs/super.c c->no_chk_data_crc = 0; c 1047 fs/ubifs/super.c c->mount_opts.chk_data_crc = 1; c 1048 fs/ubifs/super.c c->no_chk_data_crc = 1; c 1057 fs/ubifs/super.c c->mount_opts.compr_type = UBIFS_COMPR_NONE; c 1059 fs/ubifs/super.c c->mount_opts.compr_type = UBIFS_COMPR_LZO; c 1061 fs/ubifs/super.c c->mount_opts.compr_type = UBIFS_COMPR_ZLIB; c 1063 fs/ubifs/super.c c->mount_opts.compr_type = UBIFS_COMPR_ZSTD; c 1065 fs/ubifs/super.c ubifs_err(c, "unknown compressor \"%s\"", name); //FIXME: is c ready? c 1070 fs/ubifs/super.c c->mount_opts.override_compr = 1; c 1071 fs/ubifs/super.c c->default_compr = c->mount_opts.compr_type; c 1081 fs/ubifs/super.c c->assert_action = ASSACT_REPORT; c 1083 fs/ubifs/super.c c->assert_action = ASSACT_RO; c 1085 fs/ubifs/super.c c->assert_action = ASSACT_PANIC; c 1087 fs/ubifs/super.c ubifs_err(c, "unknown assert action \"%s\"", act); c 1095 fs/ubifs/super.c c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL); c 1096 fs/ubifs/super.c if (!c->auth_key_name) c 1100 fs/ubifs/super.c c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL); c 1101 fs/ubifs/super.c if (!c->auth_hash_name) c 1109 fs/ubifs/super.c struct super_block *sb = c->vfs_sb; c 1113 fs/ubifs/super.c ubifs_err(c, "unrecognized mount option \"%s\" or missing value", c 1133 fs/ubifs/super.c static void destroy_journal(struct ubifs_info *c) c 1135 fs/ubifs/super.c while (!list_empty(&c->unclean_leb_list)) { c 1138 fs/ubifs/super.c ucleb = list_entry(c->unclean_leb_list.next, c 1143 fs/ubifs/super.c while (!list_empty(&c->old_buds)) { c 1146 fs/ubifs/super.c bud = list_entry(c->old_buds.next, struct ubifs_bud, list); c 1150 fs/ubifs/super.c ubifs_destroy_idx_gc(c); c 1151 fs/ubifs/super.c ubifs_destroy_size_tree(c); c 1152 fs/ubifs/super.c ubifs_tnc_close(c); c 1153 fs/ubifs/super.c free_buds(c); c 1160 fs/ubifs/super.c static void bu_init(struct ubifs_info *c) c 1162 fs/ubifs/super.c ubifs_assert(c, c->bulk_read == 1); c 1164 fs/ubifs/super.c if (c->bu.buf) c 1168 fs/ubifs/super.c c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); c 1169 fs/ubifs/super.c if (!c->bu.buf) { c 1170 fs/ubifs/super.c if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { c 1171 fs/ubifs/super.c c->max_bu_buf_len = UBIFS_KMALLOC_OK; c 1176 fs/ubifs/super.c ubifs_warn(c, "cannot allocate %d bytes of memory for bulk-read, disabling it", c 1177 fs/ubifs/super.c c->max_bu_buf_len); c 1178 fs/ubifs/super.c c->mount_opts.bulk_read = 1; c 1179 fs/ubifs/super.c c->bulk_read = 0; c 1191 fs/ubifs/super.c static int check_free_space(struct ubifs_info *c) c 1193 fs/ubifs/super.c ubifs_assert(c, c->dark_wm > 0); c 1194 fs/ubifs/super.c if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { c 1195 fs/ubifs/super.c ubifs_err(c, "insufficient free space to mount in R/W mode"); c 1196 fs/ubifs/super.c ubifs_dump_budg(c, &c->bi); c 1197 fs/ubifs/super.c ubifs_dump_lprops(c); c 1210 fs/ubifs/super.c static int mount_ubifs(struct ubifs_info *c) c 1216 fs/ubifs/super.c c->ro_mount = !!sb_rdonly(c->vfs_sb); c 1218 fs/ubifs/super.c c->probing = !!(c->vfs_sb->s_flags & SB_SILENT); c 1220 fs/ubifs/super.c err = init_constants_early(c); c 1224 fs/ubifs/super.c err = ubifs_debugging_init(c); c 1228 fs/ubifs/super.c err = check_volume_empty(c); c 1232 fs/ubifs/super.c if (c->empty && (c->ro_mount || c->ro_media)) { c 1237 fs/ubifs/super.c ubifs_err(c, "can't format empty UBI volume: read-only %s", c 1238 fs/ubifs/super.c c->ro_media ? "UBI volume" : "mount"); c 1243 fs/ubifs/super.c if (c->ro_media && !c->ro_mount) { c 1244 fs/ubifs/super.c ubifs_err(c, "cannot mount read-write - read-only media"); c 1255 fs/ubifs/super.c c->bottom_up_buf = kmalloc_array(BOTTOM_UP_HEIGHT, sizeof(int), c 1257 fs/ubifs/super.c if (!c->bottom_up_buf) c 1260 fs/ubifs/super.c c->sbuf = vmalloc(c->leb_size); c 1261 fs/ubifs/super.c if (!c->sbuf) c 1264 fs/ubifs/super.c if (!c->ro_mount) { c 1265 fs/ubifs/super.c c->ileb_buf = vmalloc(c->leb_size); c 1266 fs/ubifs/super.c if (!c->ileb_buf) c 1270 fs/ubifs/super.c if (c->bulk_read == 1) c 1271 fs/ubifs/super.c bu_init(c); c 1273 fs/ubifs/super.c if (!c->ro_mount) { c 1274 fs/ubifs/super.c c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ + \ c 1277 fs/ubifs/super.c if (!c->write_reserve_buf) c 1281 fs/ubifs/super.c c->mounting = 1; c 1283 fs/ubifs/super.c if (c->auth_key_name) { c 1285 fs/ubifs/super.c err = ubifs_init_authentication(c); c 1289 fs/ubifs/super.c ubifs_err(c, "auth_key_name, but UBIFS is built without" c 1296 fs/ubifs/super.c err = ubifs_read_superblock(c); c 1300 fs/ubifs/super.c c->probing = 0; c 1306 fs/ubifs/super.c if (!ubifs_compr_present(c, c->default_compr)) { c 1307 fs/ubifs/super.c ubifs_err(c, "'compressor \"%s\" is not compiled in", c 1308 fs/ubifs/super.c ubifs_compr_name(c, c->default_compr)); c 1313 fs/ubifs/super.c err = init_constants_sb(c); c 1317 fs/ubifs/super.c sz = ALIGN(c->max_idx_node_sz, c->min_io_size) * 2; c 1318 fs/ubifs/super.c c->cbuf = kmalloc(sz, GFP_NOFS); c 1319 fs/ubifs/super.c if (!c->cbuf) { c 1324 fs/ubifs/super.c err = alloc_wbufs(c); c 1328 fs/ubifs/super.c sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); c 1329 fs/ubifs/super.c if (!c->ro_mount) { c 1331 fs/ubifs/super.c c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); c 1332 fs/ubifs/super.c if (IS_ERR(c->bgt)) { c 1333 fs/ubifs/super.c err = PTR_ERR(c->bgt); c 1334 fs/ubifs/super.c c->bgt = NULL; c 1335 fs/ubifs/super.c ubifs_err(c, "cannot spawn \"%s\", error %d", c 1336 fs/ubifs/super.c c->bgt_name, err); c 1339 fs/ubifs/super.c wake_up_process(c->bgt); c 1342 fs/ubifs/super.c err = ubifs_read_master(c); c 1346 fs/ubifs/super.c init_constants_master(c); c 1348 fs/ubifs/super.c if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { c 1349 fs/ubifs/super.c ubifs_msg(c, "recovery needed"); c 1350 fs/ubifs/super.c c->need_recovery = 1; c 1353 fs/ubifs/super.c if (c->need_recovery && !c->ro_mount) { c 1354 fs/ubifs/super.c err = ubifs_recover_inl_heads(c, c->sbuf); c 1359 fs/ubifs/super.c err = ubifs_lpt_init(c, 1, !c->ro_mount); c 1363 fs/ubifs/super.c if (!c->ro_mount && c->space_fixup) { c 1364 fs/ubifs/super.c err = ubifs_fixup_free_space(c); c 1369 fs/ubifs/super.c if (!c->ro_mount && !c->need_recovery) { c 1374 fs/ubifs/super.c c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); c 1375 fs/ubifs/super.c err = ubifs_write_master(c); c 1386 fs/ubifs/super.c if (ubifs_authenticated(c) && ubifs_hmac_zero(c, c->sup_node->hmac)) { c 1387 fs/ubifs/super.c err = ubifs_hmac_wkm(c, c->sup_node->hmac_wkm); c 1390 fs/ubifs/super.c c->superblock_need_write = 1; c 1393 fs/ubifs/super.c if (!c->ro_mount && c->superblock_need_write) { c 1394 fs/ubifs/super.c err = ubifs_write_sb_node(c, c->sup_node); c 1397 fs/ubifs/super.c c->superblock_need_write = 0; c 1400 fs/ubifs/super.c err = dbg_check_idx_size(c, c->bi.old_idx_sz); c 1404 fs/ubifs/super.c err = ubifs_replay_journal(c); c 1409 fs/ubifs/super.c c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); c 1411 fs/ubifs/super.c err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount); c 1415 fs/ubifs/super.c if (!c->ro_mount) { c 1418 fs/ubifs/super.c err = check_free_space(c); c 1423 fs/ubifs/super.c lnum = c->lhead_lnum + 1; c 1424 fs/ubifs/super.c if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) c 1426 fs/ubifs/super.c if (lnum == c->ltail_lnum) { c 1427 fs/ubifs/super.c err = ubifs_consolidate_log(c); c 1432 fs/ubifs/super.c if (c->need_recovery) { c 1433 fs/ubifs/super.c if (!ubifs_authenticated(c)) { c 1434 fs/ubifs/super.c err = ubifs_recover_size(c, true); c 1439 fs/ubifs/super.c err = ubifs_rcvry_gc_commit(c); c 1443 fs/ubifs/super.c if (ubifs_authenticated(c)) { c 1444 fs/ubifs/super.c err = ubifs_recover_size(c, false); c 1449 fs/ubifs/super.c err = take_gc_lnum(c); c 1457 fs/ubifs/super.c err = ubifs_leb_unmap(c, c->gc_lnum); c 1462 fs/ubifs/super.c err = dbg_check_lprops(c); c 1465 fs/ubifs/super.c } else if (c->need_recovery) { c 1466 fs/ubifs/super.c err = ubifs_recover_size(c, false); c 1476 fs/ubifs/super.c err = take_gc_lnum(c); c 1482 fs/ubifs/super.c list_add_tail(&c->infos_list, &ubifs_infos); c 1485 fs/ubifs/super.c if (c->need_recovery) { c 1486 fs/ubifs/super.c if (c->ro_mount) c 1487 fs/ubifs/super.c ubifs_msg(c, "recovery deferred"); c 1489 fs/ubifs/super.c c->need_recovery = 0; c 1490 fs/ubifs/super.c ubifs_msg(c, "recovery completed"); c 1496 fs/ubifs/super.c ubifs_assert(c, c->lst.taken_empty_lebs > 0); c 1499 fs/ubifs/super.c ubifs_assert(c, c->lst.taken_empty_lebs > 0); c 1501 fs/ubifs/super.c err = dbg_check_filesystem(c); c 1505 fs/ubifs/super.c dbg_debugfs_init_fs(c); c 1507 fs/ubifs/super.c c->mounting = 0; c 1509 fs/ubifs/super.c ubifs_msg(c, "UBIFS: mounted UBI device %d, volume %d, name \"%s\"%s", c 1510 fs/ubifs/super.c c->vi.ubi_num, c->vi.vol_id, c->vi.name, c 1511 fs/ubifs/super.c c->ro_mount ? ", R/O mode" : ""); c 1512 fs/ubifs/super.c x = (long long)c->main_lebs * c->leb_size; c 1513 fs/ubifs/super.c y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes; c 1514 fs/ubifs/super.c ubifs_msg(c, "LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes", c 1515 fs/ubifs/super.c c->leb_size, c->leb_size >> 10, c->min_io_size, c 1516 fs/ubifs/super.c c->max_write_size); c 1517 fs/ubifs/super.c ubifs_msg(c, "FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)", c 1518 fs/ubifs/super.c x, x >> 20, c->main_lebs, c 1519 fs/ubifs/super.c y, y >> 20, c->log_lebs + c->max_bud_cnt); c 1520 fs/ubifs/super.c ubifs_msg(c, "reserved for root: %llu bytes (%llu KiB)", c 1521 fs/ubifs/super.c c->report_rp_size, c->report_rp_size >> 10); c 1522 fs/ubifs/super.c ubifs_msg(c, "media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s", c 1523 fs/ubifs/super.c c->fmt_version, c->ro_compat_version, c 1524 fs/ubifs/super.c UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid, c 1525 fs/ubifs/super.c c->big_lpt ? ", big LPT model" : ", small LPT model"); c 1527 fs/ubifs/super.c dbg_gen("default compressor: %s", ubifs_compr_name(c, c->default_compr)); c 1529 fs/ubifs/super.c c->jhead_cnt - NONDATA_JHEADS_CNT); c 1531 fs/ubifs/super.c c->log_lebs, UBIFS_LOG_LNUM, c->log_last); c 1533 fs/ubifs/super.c c->lpt_lebs, c->lpt_first, c->lpt_last); c 1535 fs/ubifs/super.c c->orph_lebs, c->orph_first, c->orph_last); c 1537 fs/ubifs/super.c c->main_lebs, c->main_first, c->leb_cnt - 1); c 1538 fs/ubifs/super.c dbg_gen("index LEBs: %d", c->lst.idx_lebs); c 1540 fs/ubifs/super.c c->bi.old_idx_sz, c->bi.old_idx_sz >> 10, c 1541 fs/ubifs/super.c c->bi.old_idx_sz >> 20); c 1542 fs/ubifs/super.c dbg_gen("key hash type: %d", c->key_hash_type); c 1543 fs/ubifs/super.c dbg_gen("tree fanout: %d", c->fanout); c 1544 fs/ubifs/super.c dbg_gen("reserved GC LEB: %d", c->gc_lnum); c 1545 fs/ubifs/super.c dbg_gen("max. znode size %d", c->max_znode_sz); c 1546 fs/ubifs/super.c dbg_gen("max. index node size %d", c->max_idx_node_sz); c 1555 fs/ubifs/super.c UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout)); c 1556 fs/ubifs/super.c dbg_gen("dead watermark: %d", c->dead_wm); c 1557 fs/ubifs/super.c dbg_gen("dark watermark: %d", c->dark_wm); c 1558 fs/ubifs/super.c dbg_gen("LEB overhead: %d", c->leb_overhead); c 1559 fs/ubifs/super.c x = (long long)c->main_lebs * c->dark_wm; c 1563 fs/ubifs/super.c c->max_bud_bytes, c->max_bud_bytes >> 10, c 1564 fs/ubifs/super.c c->max_bud_bytes >> 20); c 1566 fs/ubifs/super.c c->bg_bud_bytes, c->bg_bud_bytes >> 10, c 1567 fs/ubifs/super.c c->bg_bud_bytes >> 20); c 1569 fs/ubifs/super.c c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20); c 1570 fs/ubifs/super.c dbg_gen("max. seq. number: %llu", c->max_sqnum); c 1571 fs/ubifs/super.c dbg_gen("commit number: %llu", c->cmt_no); c 1572 fs/ubifs/super.c dbg_gen("max. xattrs per inode: %d", ubifs_xattr_max_cnt(c)); c 1573 fs/ubifs/super.c dbg_gen("max orphans: %d", c->max_orphans); c 1579 fs/ubifs/super.c list_del(&c->infos_list); c 1582 fs/ubifs/super.c free_orphans(c); c 1584 fs/ubifs/super.c destroy_journal(c); c 1586 fs/ubifs/super.c ubifs_lpt_free(c, 0); c 1588 fs/ubifs/super.c kfree(c->mst_node); c 1589 fs/ubifs/super.c kfree(c->rcvrd_mst_node); c 1590 fs/ubifs/super.c if (c->bgt) c 1591 fs/ubifs/super.c kthread_stop(c->bgt); c 1593 fs/ubifs/super.c free_wbufs(c); c 1595 fs/ubifs/super.c kfree(c->cbuf); c 1597 fs/ubifs/super.c kfree(c->write_reserve_buf); c 1598 fs/ubifs/super.c kfree(c->bu.buf); c 1599 fs/ubifs/super.c vfree(c->ileb_buf); c 1600 fs/ubifs/super.c vfree(c->sbuf); c 1601 fs/ubifs/super.c kfree(c->bottom_up_buf); c 1602 fs/ubifs/super.c kfree(c->sup_node); c 1603 fs/ubifs/super.c ubifs_debugging_exit(c); c 1616 fs/ubifs/super.c static void ubifs_umount(struct ubifs_info *c) c 1618 fs/ubifs/super.c dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num, c 1619 fs/ubifs/super.c c->vi.vol_id); c 1621 fs/ubifs/super.c dbg_debugfs_exit_fs(c); c 1623 fs/ubifs/super.c list_del(&c->infos_list); c 1626 fs/ubifs/super.c if (c->bgt) c 1627 fs/ubifs/super.c kthread_stop(c->bgt); c 1629 fs/ubifs/super.c destroy_journal(c); c 1630 fs/ubifs/super.c free_wbufs(c); c 1631 fs/ubifs/super.c free_orphans(c); c 1632 fs/ubifs/super.c ubifs_lpt_free(c, 0); c 1633 fs/ubifs/super.c ubifs_exit_authentication(c); c 1635 fs/ubifs/super.c kfree(c->auth_key_name); c 1636 fs/ubifs/super.c kfree(c->auth_hash_name); c 1637 fs/ubifs/super.c kfree(c->cbuf); c 1638 fs/ubifs/super.c kfree(c->rcvrd_mst_node); c 1639 fs/ubifs/super.c kfree(c->mst_node); c 1640 fs/ubifs/super.c kfree(c->write_reserve_buf); c 1641 fs/ubifs/super.c kfree(c->bu.buf); c 1642 fs/ubifs/super.c vfree(c->ileb_buf); c 1643 fs/ubifs/super.c vfree(c->sbuf); c 1644 fs/ubifs/super.c kfree(c->bottom_up_buf); c 1645 fs/ubifs/super.c kfree(c->sup_node); c 1646 fs/ubifs/super.c ubifs_debugging_exit(c); c 1657 fs/ubifs/super.c static int ubifs_remount_rw(struct ubifs_info *c) c 1661 fs/ubifs/super.c if (c->rw_incompat) { c 1662 fs/ubifs/super.c ubifs_err(c, "the file-system is not R/W-compatible"); c 1663 fs/ubifs/super.c ubifs_msg(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", c 1664 fs/ubifs/super.c c->fmt_version, c->ro_compat_version, c 1669 fs/ubifs/super.c mutex_lock(&c->umount_mutex); c 1670 fs/ubifs/super.c dbg_save_space_info(c); c 1671 fs/ubifs/super.c c->remounting_rw = 1; c 1672 fs/ubifs/super.c c->ro_mount = 0; c 1674 fs/ubifs/super.c if (c->space_fixup) { c 1675 fs/ubifs/super.c err = ubifs_fixup_free_space(c); c 1680 fs/ubifs/super.c err = check_free_space(c); c 1684 fs/ubifs/super.c if (c->need_recovery) { c 1685 fs/ubifs/super.c ubifs_msg(c, "completing deferred recovery"); c 1686 fs/ubifs/super.c err = ubifs_write_rcvrd_mst_node(c); c 1689 fs/ubifs/super.c if (!ubifs_authenticated(c)) { c 1690 fs/ubifs/super.c err = ubifs_recover_size(c, true); c 1694 fs/ubifs/super.c err = ubifs_clean_lebs(c, c->sbuf); c 1697 fs/ubifs/super.c err = ubifs_recover_inl_heads(c, c->sbuf); c 1702 fs/ubifs/super.c ubifs_assert(c, c->tot_orphans == 0); c 1703 fs/ubifs/super.c err = ubifs_clear_orphans(c); c 1708 fs/ubifs/super.c if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) { c 1709 fs/ubifs/super.c c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); c 1710 fs/ubifs/super.c err = ubifs_write_master(c); c 1715 fs/ubifs/super.c if (c->superblock_need_write) { c 1716 fs/ubifs/super.c struct ubifs_sb_node *sup = c->sup_node; c 1718 fs/ubifs/super.c err = ubifs_write_sb_node(c, sup); c 1722 fs/ubifs/super.c c->superblock_need_write = 0; c 1725 fs/ubifs/super.c c->ileb_buf = vmalloc(c->leb_size); c 1726 fs/ubifs/super.c if (!c->ileb_buf) { c 1731 fs/ubifs/super.c c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ + \ c 1733 fs/ubifs/super.c if (!c->write_reserve_buf) { c 1738 fs/ubifs/super.c err = ubifs_lpt_init(c, 0, 1); c 1743 fs/ubifs/super.c c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name); c 1744 fs/ubifs/super.c if (IS_ERR(c->bgt)) { c 1745 fs/ubifs/super.c err = PTR_ERR(c->bgt); c 1746 fs/ubifs/super.c c->bgt = NULL; c 1747 fs/ubifs/super.c ubifs_err(c, "cannot spawn \"%s\", error %d", c 1748 fs/ubifs/super.c c->bgt_name, err); c 1751 fs/ubifs/super.c wake_up_process(c->bgt); c 1753 fs/ubifs/super.c c->orph_buf = vmalloc(c->leb_size); c 1754 fs/ubifs/super.c if (!c->orph_buf) { c 1760 fs/ubifs/super.c lnum = c->lhead_lnum + 1; c 1761 fs/ubifs/super.c if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) c 1763 fs/ubifs/super.c if (lnum == c->ltail_lnum) { c 1764 fs/ubifs/super.c err = ubifs_consolidate_log(c); c 1769 fs/ubifs/super.c if (c->need_recovery) { c 1770 fs/ubifs/super.c err = ubifs_rcvry_gc_commit(c); c 1774 fs/ubifs/super.c if (ubifs_authenticated(c)) { c 1775 fs/ubifs/super.c err = ubifs_recover_size(c, false); c 1780 fs/ubifs/super.c err = ubifs_leb_unmap(c, c->gc_lnum); c 1786 fs/ubifs/super.c c->remounting_rw = 0; c 1788 fs/ubifs/super.c if (c->need_recovery) { c 1789 fs/ubifs/super.c c->need_recovery = 0; c 1790 fs/ubifs/super.c ubifs_msg(c, "deferred recovery completed"); c 1802 fs/ubifs/super.c err = dbg_check_space_info(c); c 1805 fs/ubifs/super.c mutex_unlock(&c->umount_mutex); c 1809 fs/ubifs/super.c c->ro_mount = 1; c 1810 fs/ubifs/super.c vfree(c->orph_buf); c 1811 fs/ubifs/super.c c->orph_buf = NULL; c 1812 fs/ubifs/super.c if (c->bgt) { c 1813 fs/ubifs/super.c kthread_stop(c->bgt); c 1814 fs/ubifs/super.c c->bgt = NULL; c 1816 fs/ubifs/super.c free_wbufs(c); c 1817 fs/ubifs/super.c kfree(c->write_reserve_buf); c 1818 fs/ubifs/super.c c->write_reserve_buf = NULL; c 1819 fs/ubifs/super.c vfree(c->ileb_buf); c 1820 fs/ubifs/super.c c->ileb_buf = NULL; c 1821 fs/ubifs/super.c ubifs_lpt_free(c, 1); c 1822 fs/ubifs/super.c c->remounting_rw = 0; c 1823 fs/ubifs/super.c mutex_unlock(&c->umount_mutex); c 1834 fs/ubifs/super.c static void ubifs_remount_ro(struct ubifs_info *c) c 1838 fs/ubifs/super.c ubifs_assert(c, !c->need_recovery); c 1839 fs/ubifs/super.c ubifs_assert(c, !c->ro_mount); c 1841 fs/ubifs/super.c mutex_lock(&c->umount_mutex); c 1842 fs/ubifs/super.c if (c->bgt) { c 1843 fs/ubifs/super.c kthread_stop(c->bgt); c 1844 fs/ubifs/super.c c->bgt = NULL; c 1847 fs/ubifs/super.c dbg_save_space_info(c); c 1849 fs/ubifs/super.c for (i = 0; i < c->jhead_cnt; i++) { c 1850 fs/ubifs/super.c err = ubifs_wbuf_sync(&c->jheads[i].wbuf); c 1852 fs/ubifs/super.c ubifs_ro_mode(c, err); c 1855 fs/ubifs/super.c c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); c 1856 fs/ubifs/super.c c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); c 1857 fs/ubifs/super.c c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); c 1858 fs/ubifs/super.c err = ubifs_write_master(c); c 1860 fs/ubifs/super.c ubifs_ro_mode(c, err); c 1862 fs/ubifs/super.c vfree(c->orph_buf); c 1863 fs/ubifs/super.c c->orph_buf = NULL; c 1864 fs/ubifs/super.c kfree(c->write_reserve_buf); c 1865 fs/ubifs/super.c c->write_reserve_buf = NULL; c 1866 fs/ubifs/super.c vfree(c->ileb_buf); c 1867 fs/ubifs/super.c c->ileb_buf = NULL; c 1868 fs/ubifs/super.c ubifs_lpt_free(c, 1); c 1869 fs/ubifs/super.c c->ro_mount = 1; c 1870 fs/ubifs/super.c err = dbg_check_space_info(c); c 1872 fs/ubifs/super.c ubifs_ro_mode(c, err); c 1873 fs/ubifs/super.c mutex_unlock(&c->umount_mutex); c 1879 fs/ubifs/super.c struct ubifs_info *c = sb->s_fs_info; c 1881 fs/ubifs/super.c ubifs_msg(c, "un-mount UBI device %d", c->vi.ubi_num); c 1888 fs/ubifs/super.c if (!c->ro_error) { c 1889 fs/ubifs/super.c ubifs_assert(c, c->bi.idx_growth == 0); c 1890 fs/ubifs/super.c ubifs_assert(c, c->bi.dd_growth == 0); c 1891 fs/ubifs/super.c ubifs_assert(c, c->bi.data_growth == 0); c 1900 fs/ubifs/super.c mutex_lock(&c->umount_mutex); c 1901 fs/ubifs/super.c if (!c->ro_mount) { c 1906 fs/ubifs/super.c if (c->bgt) { c 1907 fs/ubifs/super.c kthread_stop(c->bgt); c 1908 fs/ubifs/super.c c->bgt = NULL; c 1915 fs/ubifs/super.c if (!c->ro_error) { c 1919 fs/ubifs/super.c for (i = 0; i < c->jhead_cnt; i++) { c 1920 fs/ubifs/super.c err = ubifs_wbuf_sync(&c->jheads[i].wbuf); c 1922 fs/ubifs/super.c ubifs_ro_mode(c, err); c 1930 fs/ubifs/super.c c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); c 1931 fs/ubifs/super.c c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); c 1932 fs/ubifs/super.c c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum); c 1933 fs/ubifs/super.c err = ubifs_write_master(c); c 1940 fs/ubifs/super.c ubifs_err(c, "failed to write master node, error %d", c 1943 fs/ubifs/super.c for (i = 0; i < c->jhead_cnt; i++) c 1945 fs/ubifs/super.c hrtimer_cancel(&c->jheads[i].wbuf.timer); c 1949 fs/ubifs/super.c ubifs_umount(c); c 1950 fs/ubifs/super.c ubi_close_volume(c->ubi); c 1951 fs/ubifs/super.c mutex_unlock(&c->umount_mutex); c 1957 fs/ubifs/super.c struct ubifs_info *c = sb->s_fs_info; c 1962 fs/ubifs/super.c err = ubifs_parse_options(c, data, 1); c 1964 fs/ubifs/super.c ubifs_err(c, "invalid or unknown remount parameter"); c 1968 fs/ubifs/super.c if (c->ro_mount && !(*flags & SB_RDONLY)) { c 1969 fs/ubifs/super.c if (c->ro_error) { c 1970 fs/ubifs/super.c ubifs_msg(c, "cannot re-mount R/W due to prior errors"); c 1973 fs/ubifs/super.c if (c->ro_media) { c 1974 fs/ubifs/super.c ubifs_msg(c, "cannot re-mount R/W - UBI volume is R/O"); c 1977 fs/ubifs/super.c err = ubifs_remount_rw(c); c 1980 fs/ubifs/super.c } else if (!c->ro_mount && (*flags & SB_RDONLY)) { c 1981 fs/ubifs/super.c if (c->ro_error) { c 1982 fs/ubifs/super.c ubifs_msg(c, "cannot re-mount R/O due to prior errors"); c 1985 fs/ubifs/super.c ubifs_remount_ro(c); c 1988 fs/ubifs/super.c if (c->bulk_read == 1) c 1989 fs/ubifs/super.c bu_init(c); c 1992 fs/ubifs/super.c mutex_lock(&c->bu_mutex); c 1993 fs/ubifs/super.c kfree(c->bu.buf); c 1994 fs/ubifs/super.c c->bu.buf = NULL; c 1995 fs/ubifs/super.c mutex_unlock(&c->bu_mutex); c 1998 fs/ubifs/super.c if (!c->need_recovery) c 1999 fs/ubifs/super.c ubifs_assert(c, c->lst.taken_empty_lebs > 0); c 2085 fs/ubifs/super.c struct ubifs_info *c; c 2087 fs/ubifs/super.c c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); c 2088 fs/ubifs/super.c if (c) { c 2089 fs/ubifs/super.c spin_lock_init(&c->cnt_lock); c 2090 fs/ubifs/super.c spin_lock_init(&c->cs_lock); c 2091 fs/ubifs/super.c spin_lock_init(&c->buds_lock); c 2092 fs/ubifs/super.c spin_lock_init(&c->space_lock); c 2093 fs/ubifs/super.c spin_lock_init(&c->orphan_lock); c 2094 fs/ubifs/super.c init_rwsem(&c->commit_sem); c 2095 fs/ubifs/super.c mutex_init(&c->lp_mutex); c 2096 fs/ubifs/super.c mutex_init(&c->tnc_mutex); c 2097 fs/ubifs/super.c mutex_init(&c->log_mutex); c 2098 fs/ubifs/super.c mutex_init(&c->umount_mutex); c 2099 fs/ubifs/super.c mutex_init(&c->bu_mutex); c 2100 fs/ubifs/super.c mutex_init(&c->write_reserve_mutex); c 2101 fs/ubifs/super.c init_waitqueue_head(&c->cmt_wq); c 2102 fs/ubifs/super.c c->buds = RB_ROOT; c 2103 fs/ubifs/super.c c->old_idx = RB_ROOT; c 2104 fs/ubifs/super.c c->size_tree = RB_ROOT; c 2105 fs/ubifs/super.c c->orph_tree = RB_ROOT; c 2106 fs/ubifs/super.c INIT_LIST_HEAD(&c->infos_list); c 2107 fs/ubifs/super.c INIT_LIST_HEAD(&c->idx_gc); c 2108 fs/ubifs/super.c INIT_LIST_HEAD(&c->replay_list); c 2109 fs/ubifs/super.c INIT_LIST_HEAD(&c->replay_buds); c 2110 fs/ubifs/super.c INIT_LIST_HEAD(&c->uncat_list); c 2111 fs/ubifs/super.c INIT_LIST_HEAD(&c->empty_list); c 2112 fs/ubifs/super.c INIT_LIST_HEAD(&c->freeable_list); c 2113 fs/ubifs/super.c INIT_LIST_HEAD(&c->frdi_idx_list); c 2114 fs/ubifs/super.c INIT_LIST_HEAD(&c->unclean_leb_list); c 2115 fs/ubifs/super.c INIT_LIST_HEAD(&c->old_buds); c 2116 fs/ubifs/super.c INIT_LIST_HEAD(&c->orph_list); c 2117 fs/ubifs/super.c INIT_LIST_HEAD(&c->orph_new); c 2118 fs/ubifs/super.c c->no_chk_data_crc = 1; c 2119 fs/ubifs/super.c c->assert_action = ASSACT_RO; c 2121 fs/ubifs/super.c c->highest_inum = UBIFS_FIRST_INO; c 2122 fs/ubifs/super.c c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; c 2124 fs/ubifs/super.c ubi_get_volume_info(ubi, &c->vi); c 2125 fs/ubifs/super.c ubi_get_device_info(c->vi.ubi_num, &c->di); c 2127 fs/ubifs/super.c return c; c 2132 fs/ubifs/super.c struct ubifs_info *c = sb->s_fs_info; c 2136 fs/ubifs/super.c c->vfs_sb = sb; c 2138 fs/ubifs/super.c c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE); c 2139 fs/ubifs/super.c if (IS_ERR(c->ubi)) { c 2140 fs/ubifs/super.c err = PTR_ERR(c->ubi); c 2144 fs/ubifs/super.c err = ubifs_parse_options(c, data, 0); c 2158 fs/ubifs/super.c err = super_setup_bdi_name(sb, "ubifs_%d_%d", c->vi.ubi_num, c 2159 fs/ubifs/super.c c->vi.vol_id); c 2163 fs/ubifs/super.c sb->s_fs_info = c; c 2167 fs/ubifs/super.c sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c); c 2168 fs/ubifs/super.c if (c->max_inode_sz > MAX_LFS_FILESIZE) c 2169 fs/ubifs/super.c sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; c 2176 fs/ubifs/super.c mutex_lock(&c->umount_mutex); c 2177 fs/ubifs/super.c err = mount_ubifs(c); c 2179 fs/ubifs/super.c ubifs_assert(c, err < 0); c 2196 fs/ubifs/super.c mutex_unlock(&c->umount_mutex); c 2200 fs/ubifs/super.c ubifs_umount(c); c 2202 fs/ubifs/super.c mutex_unlock(&c->umount_mutex); c 2204 fs/ubifs/super.c ubi_close_volume(c->ubi); c 2212 fs/ubifs/super.c struct ubifs_info *c = sb->s_fs_info; c 2214 fs/ubifs/super.c return c->vi.cdev == c1->vi.cdev; c 2227 fs/ubifs/super.c struct ubifs_info *c; c 2246 fs/ubifs/super.c c = alloc_ubifs_info(ubi); c 2247 fs/ubifs/super.c if (!c) { c 2252 fs/ubifs/super.c dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); c 2254 fs/ubifs/super.c sb = sget(fs_type, sb_test, sb_set, flags, c); c 2257 fs/ubifs/super.c kfree(c); c 2263 fs/ubifs/super.c kfree(c); c 2277 fs/ubifs/super.c ubifs_msg(c, "full atime support is enabled."); c 2296 fs/ubifs/super.c struct ubifs_info *c = s->s_fs_info; c 2298 fs/ubifs/super.c kfree(c); c 25 fs/ubifs/tnc.c static int try_read_node(const struct ubifs_info *c, void *buf, int type, c 27 fs/ubifs/tnc.c static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, c 70 fs/ubifs/tnc.c static int insert_old_idx(struct ubifs_info *c, int lnum, int offs) c 81 fs/ubifs/tnc.c p = &c->old_idx.rb_node; c 94 fs/ubifs/tnc.c ubifs_err(c, "old idx added twice!"); c 100 fs/ubifs/tnc.c rb_insert_color(&old_idx->rb, &c->old_idx); c 111 fs/ubifs/tnc.c int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode) c 118 fs/ubifs/tnc.c return insert_old_idx(c, zbr->lnum, zbr->offs); c 120 fs/ubifs/tnc.c if (c->zroot.len) c 121 fs/ubifs/tnc.c return insert_old_idx(c, c->zroot.lnum, c 122 fs/ubifs/tnc.c c->zroot.offs); c 133 fs/ubifs/tnc.c static int ins_clr_old_idx_znode(struct ubifs_info *c, c 143 fs/ubifs/tnc.c err = insert_old_idx(c, zbr->lnum, zbr->offs); c 151 fs/ubifs/tnc.c if (c->zroot.len) { c 152 fs/ubifs/tnc.c err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs); c 155 fs/ubifs/tnc.c c->zroot.lnum = 0; c 156 fs/ubifs/tnc.c c->zroot.offs = 0; c 157 fs/ubifs/tnc.c c->zroot.len = 0; c 172 fs/ubifs/tnc.c void destroy_old_idx(struct ubifs_info *c) c 176 fs/ubifs/tnc.c rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb) c 179 fs/ubifs/tnc.c c->old_idx = RB_ROOT; c 189 fs/ubifs/tnc.c static struct ubifs_znode *copy_znode(struct ubifs_info *c, c 194 fs/ubifs/tnc.c zn = kmemdup(znode, c->max_znode_sz, GFP_NOFS); c 202 fs/ubifs/tnc.c ubifs_assert(c, !ubifs_zn_obsolete(znode)); c 218 fs/ubifs/tnc.c atomic_long_inc(&c->dirty_zn_cnt); c 230 fs/ubifs/tnc.c static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt) c 232 fs/ubifs/tnc.c c->calc_idx_sz -= ALIGN(dirt, 8); c 233 fs/ubifs/tnc.c return ubifs_add_dirt(c, lnum, dirt); c 243 fs/ubifs/tnc.c static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, c 253 fs/ubifs/tnc.c atomic_long_inc(&c->dirty_zn_cnt); c 254 fs/ubifs/tnc.c atomic_long_dec(&c->clean_zn_cnt); c 256 fs/ubifs/tnc.c err = add_idx_dirt(c, zbr->lnum, zbr->len); c 263 fs/ubifs/tnc.c zn = copy_znode(c, znode); c 268 fs/ubifs/tnc.c err = insert_old_idx(c, zbr->lnum, zbr->offs); c 271 fs/ubifs/tnc.c err = add_idx_dirt(c, zbr->lnum, zbr->len); c 305 fs/ubifs/tnc.c static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 312 fs/ubifs/tnc.c ubifs_assert(c, !zbr->leaf); c 313 fs/ubifs/tnc.c ubifs_assert(c, zbr->len != 0); c 314 fs/ubifs/tnc.c ubifs_assert(c, is_hash_key(c, &zbr->key)); c 316 fs/ubifs/tnc.c err = ubifs_validate_entry(c, dent); c 319 fs/ubifs/tnc.c ubifs_dump_node(c, dent); c 341 fs/ubifs/tnc.c static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 346 fs/ubifs/tnc.c ubifs_assert(c, !zbr->leaf); c 347 fs/ubifs/tnc.c ubifs_assert(c, zbr->len != 0); c 349 fs/ubifs/tnc.c err = ubifs_validate_entry(c, node); c 352 fs/ubifs/tnc.c ubifs_dump_node(c, node); c 384 fs/ubifs/tnc.c static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 389 fs/ubifs/tnc.c ubifs_assert(c, is_hash_key(c, &zbr->key)); c 393 fs/ubifs/tnc.c ubifs_assert(c, zbr->len != 0); c 398 fs/ubifs/tnc.c if (c->replaying) { c 399 fs/ubifs/tnc.c err = fallible_read_node(c, &zbr->key, zbr, node); c 409 fs/ubifs/tnc.c err = ubifs_tnc_read_node(c, zbr, node); c 415 fs/ubifs/tnc.c err = lnc_add(c, zbr, node); c 441 fs/ubifs/tnc.c static int try_read_node(const struct ubifs_info *c, void *buf, int type, c 453 fs/ubifs/tnc.c err = ubifs_leb_read(c, lnum, buf, offs, len, 1); c 455 fs/ubifs/tnc.c ubifs_err(c, "cannot read node type %d from LEB %d:%d, error %d", c 470 fs/ubifs/tnc.c if (type != UBIFS_DATA_NODE || !c->no_chk_data_crc || c->mounting || c 471 fs/ubifs/tnc.c c->remounting_rw) { c 478 fs/ubifs/tnc.c err = ubifs_node_check_hash(c, buf, zbr->hash); c 480 fs/ubifs/tnc.c ubifs_bad_hash(c, buf, zbr->hash, lnum, offs); c 497 fs/ubifs/tnc.c static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, c 504 fs/ubifs/tnc.c ret = try_read_node(c, node, key_type(c, key), zbr); c 510 fs/ubifs/tnc.c key_read(c, &dent->key, &node_key); c 511 fs/ubifs/tnc.c if (keys_cmp(c, key, &node_key) != 0) c 514 fs/ubifs/tnc.c if (ret == 0 && c->replaying) c 531 fs/ubifs/tnc.c static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 543 fs/ubifs/tnc.c err = ubifs_tnc_read_node(c, zbr, dent); c 548 fs/ubifs/tnc.c err = lnc_add_directly(c, zbr, dent); c 581 fs/ubifs/tnc.c static struct ubifs_znode *get_znode(struct ubifs_info *c, c 590 fs/ubifs/tnc.c znode = ubifs_load_znode(c, zbr, znode, n); c 603 fs/ubifs/tnc.c static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n) c 622 fs/ubifs/tnc.c znode = get_znode(c, znode, nn); c 626 fs/ubifs/tnc.c znode = get_znode(c, znode, 0); c 648 fs/ubifs/tnc.c static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n) c 666 fs/ubifs/tnc.c znode = get_znode(c, znode, nn); c 671 fs/ubifs/tnc.c znode = get_znode(c, znode, nn); c 700 fs/ubifs/tnc.c static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key, c 706 fs/ubifs/tnc.c err = matches_name(c, &(*zn)->zbranch[*n], nm); c 715 fs/ubifs/tnc.c err = tnc_prev(c, zn, n); c 717 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); c 723 fs/ubifs/tnc.c if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { c 754 fs/ubifs/tnc.c err = tnc_next(c, zn, n); c 757 fs/ubifs/tnc.c ubifs_assert(c, 0); c 762 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); c 767 fs/ubifs/tnc.c err = matches_name(c, &(*zn)->zbranch[*n], nm); c 774 fs/ubifs/tnc.c ubifs_assert(c, err == NAME_GREATER); c 782 fs/ubifs/tnc.c err = tnc_next(c, &znode, &nn); c 787 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[nn].key, key)) c 789 fs/ubifs/tnc.c err = matches_name(c, &znode->zbranch[nn], nm); c 798 fs/ubifs/tnc.c ubifs_assert(c, err == NAME_LESS); c 818 fs/ubifs/tnc.c static int fallible_matches_name(struct ubifs_info *c, c 831 fs/ubifs/tnc.c err = fallible_read_node(c, &zbr->key, zbr, dent); c 839 fs/ubifs/tnc.c ubifs_assert(c, err == 1); c 841 fs/ubifs/tnc.c err = lnc_add_directly(c, zbr, dent); c 888 fs/ubifs/tnc.c static int fallible_resolve_collision(struct ubifs_info *c, c 897 fs/ubifs/tnc.c cmp = fallible_matches_name(c, &znode->zbranch[nn], nm); c 917 fs/ubifs/tnc.c err = tnc_prev(c, zn, n); c 919 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); c 925 fs/ubifs/tnc.c if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) { c 928 fs/ubifs/tnc.c err = tnc_next(c, zn, n); c 931 fs/ubifs/tnc.c ubifs_assert(c, 0); c 936 fs/ubifs/tnc.c ubifs_assert(c, *n == 0); c 941 fs/ubifs/tnc.c err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm); c 965 fs/ubifs/tnc.c err = tnc_next(c, &znode, &nn); c 970 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[nn].key, key)) c 972 fs/ubifs/tnc.c err = fallible_matches_name(c, &znode->zbranch[nn], nm); c 1033 fs/ubifs/tnc.c static int resolve_collision_directly(struct ubifs_info *c, c 1048 fs/ubifs/tnc.c err = tnc_prev(c, &znode, &nn); c 1053 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[nn].key, key)) c 1066 fs/ubifs/tnc.c err = tnc_next(c, &znode, &nn); c 1071 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[nn].key, key)) c 1090 fs/ubifs/tnc.c static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c, c 1094 fs/ubifs/tnc.c int *path = c->bottom_up_buf, p = 0; c 1096 fs/ubifs/tnc.c ubifs_assert(c, c->zroot.znode); c 1097 fs/ubifs/tnc.c ubifs_assert(c, znode); c 1098 fs/ubifs/tnc.c if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) { c 1099 fs/ubifs/tnc.c kfree(c->bottom_up_buf); c 1100 fs/ubifs/tnc.c c->bottom_up_buf = kmalloc_array(c->zroot.znode->level, c 1103 fs/ubifs/tnc.c if (!c->bottom_up_buf) c 1105 fs/ubifs/tnc.c path = c->bottom_up_buf; c 1107 fs/ubifs/tnc.c if (c->zroot.znode->level) { c 1116 fs/ubifs/tnc.c ubifs_assert(c, p < c->zroot.znode->level); c 1130 fs/ubifs/tnc.c ubifs_assert(c, path[p - 1] >= 0); c 1131 fs/ubifs/tnc.c ubifs_assert(c, path[p - 1] < zp->child_cnt); c 1133 fs/ubifs/tnc.c znode = dirty_cow_znode(c, zbr); c 1135 fs/ubifs/tnc.c ubifs_assert(c, znode == c->zroot.znode); c 1136 fs/ubifs/tnc.c znode = dirty_cow_znode(c, &c->zroot); c 1140 fs/ubifs/tnc.c ubifs_assert(c, path[p - 1] >= 0); c 1141 fs/ubifs/tnc.c ubifs_assert(c, path[p - 1] < znode->child_cnt); c 1170 fs/ubifs/tnc.c int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, c 1178 fs/ubifs/tnc.c ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY); c 1180 fs/ubifs/tnc.c znode = c->zroot.znode; c 1182 fs/ubifs/tnc.c znode = ubifs_load_znode(c, &c->zroot, NULL, 0); c 1192 fs/ubifs/tnc.c exact = ubifs_search_zbranch(c, znode, key, n); c 1208 fs/ubifs/tnc.c znode = ubifs_load_znode(c, zbr, znode, *n); c 1214 fs/ubifs/tnc.c if (exact || !is_hash_key(c, key) || *n != -1) { c 1262 fs/ubifs/tnc.c err = tnc_prev(c, &znode, n); c 1270 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[*n].key)) { c 1306 fs/ubifs/tnc.c static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key, c 1315 fs/ubifs/tnc.c znode = c->zroot.znode; c 1317 fs/ubifs/tnc.c znode = ubifs_load_znode(c, &c->zroot, NULL, 0); c 1322 fs/ubifs/tnc.c znode = dirty_cow_znode(c, &c->zroot); c 1331 fs/ubifs/tnc.c exact = ubifs_search_zbranch(c, znode, key, n); c 1342 fs/ubifs/tnc.c znode = dirty_cow_znode(c, zbr); c 1349 fs/ubifs/tnc.c znode = ubifs_load_znode(c, zbr, znode, *n); c 1352 fs/ubifs/tnc.c znode = dirty_cow_znode(c, zbr); c 1358 fs/ubifs/tnc.c if (exact || !is_hash_key(c, key) || *n != -1) { c 1367 fs/ubifs/tnc.c err = tnc_prev(c, &znode, n); c 1375 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[*n].key)) { c 1382 fs/ubifs/tnc.c znode = dirty_cow_bottom_up(c, znode); c 1402 fs/ubifs/tnc.c static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1) c 1406 fs/ubifs/tnc.c gced_lnum = c->gced_lnum; c 1408 fs/ubifs/tnc.c gc_seq2 = c->gc_seq; c 1420 fs/ubifs/tnc.c if (gced_lnum != c->gced_lnum) c 1441 fs/ubifs/tnc.c int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, c 1449 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 1450 fs/ubifs/tnc.c found = ubifs_lookup_level0(c, key, &znode, &n); c 1463 fs/ubifs/tnc.c if (is_hash_key(c, key)) { c 1468 fs/ubifs/tnc.c err = tnc_read_hashed_node(c, zt, node); c 1472 fs/ubifs/tnc.c err = ubifs_tnc_read_node(c, zt, node); c 1477 fs/ubifs/tnc.c gc_seq1 = c->gc_seq; c 1478 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 1480 fs/ubifs/tnc.c if (ubifs_get_wbuf(c, zbr.lnum)) { c 1482 fs/ubifs/tnc.c err = ubifs_tnc_read_node(c, &zbr, node); c 1486 fs/ubifs/tnc.c err = fallible_read_node(c, key, &zbr, node); c 1487 fs/ubifs/tnc.c if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) { c 1498 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 1515 fs/ubifs/tnc.c int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) c 1519 fs/ubifs/tnc.c unsigned int block = key_block(c, &bu->key); c 1526 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 1528 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &bu->key, &znode, &n); c 1551 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); c 1557 fs/ubifs/tnc.c if (key_inum(c, key) != key_inum(c, &bu->key) || c 1558 fs/ubifs/tnc.c key_type(c, key) != UBIFS_DATA_KEY) { c 1585 fs/ubifs/tnc.c next_block = key_block(c, key); c 1604 fs/ubifs/tnc.c bu->gc_seq = c->gc_seq; c 1605 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 1627 fs/ubifs/tnc.c block = key_block(c, &bu->key) + bu->blk_cnt; c 1630 fs/ubifs/tnc.c if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block) c 1650 fs/ubifs/tnc.c const struct ubifs_info *c = wbuf->c; c 1654 fs/ubifs/tnc.c ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); c 1655 fs/ubifs/tnc.c ubifs_assert(c, !(offs & 7) && offs < c->leb_size); c 1656 fs/ubifs/tnc.c ubifs_assert(c, offs + len <= c->leb_size); c 1663 fs/ubifs/tnc.c return ubifs_leb_read(c, lnum, buf, offs, len, 0); c 1677 fs/ubifs/tnc.c return ubifs_leb_read(c, lnum, buf, offs, rlen, 0); c 1690 fs/ubifs/tnc.c static int validate_data_node(struct ubifs_info *c, void *buf, c 1698 fs/ubifs/tnc.c ubifs_err(c, "bad node type (%d but expected %d)", c 1703 fs/ubifs/tnc.c err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0); c 1705 fs/ubifs/tnc.c ubifs_err(c, "expected node type %d", UBIFS_DATA_NODE); c 1709 fs/ubifs/tnc.c err = ubifs_node_check_hash(c, buf, zbr->hash); c 1711 fs/ubifs/tnc.c ubifs_bad_hash(c, buf, zbr->hash, zbr->lnum, zbr->offs); c 1717 fs/ubifs/tnc.c ubifs_err(c, "bad node length %d, expected %d", len, zbr->len); c 1722 fs/ubifs/tnc.c key_read(c, buf + UBIFS_KEY_OFFSET, &key1); c 1723 fs/ubifs/tnc.c if (!keys_eq(c, &zbr->key, &key1)) { c 1724 fs/ubifs/tnc.c ubifs_err(c, "bad key in node at LEB %d:%d", c 1736 fs/ubifs/tnc.c ubifs_err(c, "bad node at LEB %d:%d", zbr->lnum, zbr->offs); c 1737 fs/ubifs/tnc.c ubifs_dump_node(c, buf); c 1752 fs/ubifs/tnc.c int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) c 1761 fs/ubifs/tnc.c ubifs_err(c, "buffer too small %d vs %d", bu->buf_len, len); c 1766 fs/ubifs/tnc.c wbuf = ubifs_get_wbuf(c, lnum); c 1770 fs/ubifs/tnc.c err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0); c 1773 fs/ubifs/tnc.c if (maybe_leb_gced(c, lnum, bu->gc_seq)) c 1777 fs/ubifs/tnc.c ubifs_err(c, "failed to read from LEB %d:%d, error %d", c 1787 fs/ubifs/tnc.c err = validate_data_node(c, buf, &bu->zbranch[i]); c 1809 fs/ubifs/tnc.c static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, c 1816 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 1817 fs/ubifs/tnc.c found = ubifs_lookup_level0(c, key, &znode, &n); c 1826 fs/ubifs/tnc.c ubifs_assert(c, n >= 0); c 1828 fs/ubifs/tnc.c err = resolve_collision(c, key, &znode, &n, nm); c 1837 fs/ubifs/tnc.c err = tnc_read_hashed_node(c, &znode->zbranch[n], node); c 1840 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 1857 fs/ubifs/tnc.c int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, c 1867 fs/ubifs/tnc.c err = ubifs_tnc_lookup(c, key, node); c 1880 fs/ubifs/tnc.c return do_lookup_nm(c, key, node, nm); c 1883 fs/ubifs/tnc.c static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key, c 1893 fs/ubifs/tnc.c err = tnc_next(c, &znode, n); c 1902 fs/ubifs/tnc.c if (key_inum(c, dkey) != key_inum(c, key) || c 1903 fs/ubifs/tnc.c key_type(c, dkey) != key_type(c, key)) { c 1907 fs/ubifs/tnc.c err = tnc_read_hashed_node(c, zbr, dent); c 1911 fs/ubifs/tnc.c if (key_hash(c, key) == key_hash(c, dkey) && c 1917 fs/ubifs/tnc.c err = tnc_next(c, &znode, n); c 1923 fs/ubifs/tnc.c static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key, c 1930 fs/ubifs/tnc.c ubifs_assert(c, is_hash_key(c, key)); c 1932 fs/ubifs/tnc.c lowest_dent_key(c, &start_key, key_inum(c, key)); c 1934 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 1935 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &start_key, &znode, &n); c 1939 fs/ubifs/tnc.c err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err); c 1942 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 1960 fs/ubifs/tnc.c int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key, c 1966 fs/ubifs/tnc.c if (!c->double_hash) c 1973 fs/ubifs/tnc.c err = ubifs_tnc_lookup(c, key, node); c 1984 fs/ubifs/tnc.c return do_lookup_dh(c, key, node, cookie); c 1996 fs/ubifs/tnc.c static void correct_parent_keys(const struct ubifs_info *c, c 2001 fs/ubifs/tnc.c ubifs_assert(c, znode->parent); c 2002 fs/ubifs/tnc.c ubifs_assert(c, znode->iip == 0); c 2007 fs/ubifs/tnc.c while (keys_cmp(c, key, key1) < 0) { c 2008 fs/ubifs/tnc.c key_copy(c, key, key1); c 2029 fs/ubifs/tnc.c static void insert_zbranch(struct ubifs_info *c, struct ubifs_znode *znode, c 2034 fs/ubifs/tnc.c ubifs_assert(c, ubifs_zn_dirty(znode)); c 2081 fs/ubifs/tnc.c static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode, c 2088 fs/ubifs/tnc.c ubifs_assert(c, n >= 0 && n <= c->fanout); c 2093 fs/ubifs/tnc.c if (znode->child_cnt < c->fanout) { c 2094 fs/ubifs/tnc.c ubifs_assert(c, n != c->fanout); c 2097 fs/ubifs/tnc.c insert_zbranch(c, znode, zbr, n); c 2101 fs/ubifs/tnc.c correct_parent_keys(c, znode); c 2117 fs/ubifs/tnc.c ins_clr_old_idx_znode(c, znode); c 2119 fs/ubifs/tnc.c zn = kzalloc(c->max_znode_sz, GFP_NOFS); c 2126 fs/ubifs/tnc.c if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) { c 2128 fs/ubifs/tnc.c if (n == c->fanout) { c 2130 fs/ubifs/tnc.c if (key_inum(c, key1) == key_inum(c, key) && c 2131 fs/ubifs/tnc.c key_type(c, key1) == UBIFS_DATA_KEY) c 2135 fs/ubifs/tnc.c } else if (appending && n != c->fanout) { c 2139 fs/ubifs/tnc.c if (n >= (c->fanout + 1) / 2) { c 2141 fs/ubifs/tnc.c if (key_inum(c, key1) == key_inum(c, key) && c 2142 fs/ubifs/tnc.c key_type(c, key1) == UBIFS_DATA_KEY) { c 2144 fs/ubifs/tnc.c if (key_inum(c, key1) != key_inum(c, key) || c 2145 fs/ubifs/tnc.c key_type(c, key1) != UBIFS_DATA_KEY) { c 2147 fs/ubifs/tnc.c move = c->fanout - keep; c 2156 fs/ubifs/tnc.c keep = c->fanout; c 2159 fs/ubifs/tnc.c keep = (c->fanout + 1) / 2; c 2160 fs/ubifs/tnc.c move = c->fanout - keep; c 2185 fs/ubifs/tnc.c atomic_long_inc(&c->dirty_zn_cnt); c 2206 fs/ubifs/tnc.c insert_zbranch(c, zi, zbr, n); c 2211 fs/ubifs/tnc.c correct_parent_keys(c, znode); c 2230 fs/ubifs/tnc.c zi = kzalloc(c->max_znode_sz, GFP_NOFS); c 2238 fs/ubifs/tnc.c atomic_long_inc(&c->dirty_zn_cnt); c 2242 fs/ubifs/tnc.c zi->zbranch[0].lnum = c->zroot.lnum; c 2243 fs/ubifs/tnc.c zi->zbranch[0].offs = c->zroot.offs; c 2244 fs/ubifs/tnc.c zi->zbranch[0].len = c->zroot.len; c 2248 fs/ubifs/tnc.c c->zroot.lnum = 0; c 2249 fs/ubifs/tnc.c c->zroot.offs = 0; c 2250 fs/ubifs/tnc.c c->zroot.len = 0; c 2251 fs/ubifs/tnc.c c->zroot.znode = zi; c 2274 fs/ubifs/tnc.c int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, c 2280 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2282 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); c 2290 fs/ubifs/tnc.c ubifs_copy_hash(c, hash, zbr.hash); c 2291 fs/ubifs/tnc.c key_copy(c, key, &zbr.key); c 2292 fs/ubifs/tnc.c err = tnc_insert(c, znode, &zbr, n + 1); c 2297 fs/ubifs/tnc.c err = ubifs_add_dirt(c, zbr->lnum, zbr->len); c 2301 fs/ubifs/tnc.c ubifs_copy_hash(c, hash, zbr->hash); c 2305 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2306 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2325 fs/ubifs/tnc.c int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, c 2331 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2334 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); c 2346 fs/ubifs/tnc.c err = ubifs_add_dirt(c, zbr->lnum, zbr->len); c 2353 fs/ubifs/tnc.c } else if (is_hash_key(c, key)) { c 2354 fs/ubifs/tnc.c found = resolve_collision_directly(c, key, &znode, &n, c 2366 fs/ubifs/tnc.c znode = dirty_cow_bottom_up(c, znode); c 2374 fs/ubifs/tnc.c err = ubifs_add_dirt(c, zbr->lnum, c 2386 fs/ubifs/tnc.c err = ubifs_add_dirt(c, lnum, len); c 2389 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2392 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2409 fs/ubifs/tnc.c int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, c 2416 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2418 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); c 2425 fs/ubifs/tnc.c if (c->replaying) c 2426 fs/ubifs/tnc.c found = fallible_resolve_collision(c, key, &znode, &n, c 2429 fs/ubifs/tnc.c found = resolve_collision(c, key, &znode, &n, nm); c 2438 fs/ubifs/tnc.c znode = dirty_cow_bottom_up(c, znode); c 2449 fs/ubifs/tnc.c err = ubifs_add_dirt(c, zbr->lnum, zbr->len); c 2453 fs/ubifs/tnc.c ubifs_copy_hash(c, hash, zbr->hash); c 2465 fs/ubifs/tnc.c ubifs_copy_hash(c, hash, zbr.hash); c 2466 fs/ubifs/tnc.c key_copy(c, key, &zbr.key); c 2467 fs/ubifs/tnc.c err = tnc_insert(c, znode, &zbr, n + 1); c 2470 fs/ubifs/tnc.c if (c->replaying) { c 2479 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2480 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2483 fs/ubifs/tnc.c return ubifs_tnc_remove_nm(c, key, &noname); c 2489 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2490 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2503 fs/ubifs/tnc.c static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) c 2510 fs/ubifs/tnc.c ubifs_assert(c, znode->level == 0); c 2511 fs/ubifs/tnc.c ubifs_assert(c, n >= 0 && n < c->fanout); c 2517 fs/ubifs/tnc.c err = ubifs_add_dirt(c, zbr->lnum, zbr->len); c 2519 fs/ubifs/tnc.c ubifs_dump_znode(c, znode); c 2537 fs/ubifs/tnc.c ubifs_assert(c, !ubifs_zn_obsolete(znode)); c 2538 fs/ubifs/tnc.c ubifs_assert(c, ubifs_zn_dirty(znode)); c 2543 fs/ubifs/tnc.c atomic_long_dec(&c->dirty_zn_cnt); c 2545 fs/ubifs/tnc.c err = insert_old_idx_znode(c, znode); c 2551 fs/ubifs/tnc.c atomic_long_inc(&c->clean_zn_cnt); c 2560 fs/ubifs/tnc.c ubifs_assert(c, znode->level != 0); c 2575 fs/ubifs/tnc.c znode = get_znode(c, znode, 0); c 2578 fs/ubifs/tnc.c znode = dirty_cow_znode(c, zbr); c 2583 fs/ubifs/tnc.c if (c->zroot.len) { c 2584 fs/ubifs/tnc.c err = insert_old_idx(c, c->zroot.lnum, c 2585 fs/ubifs/tnc.c c->zroot.offs); c 2589 fs/ubifs/tnc.c c->zroot.lnum = zbr->lnum; c 2590 fs/ubifs/tnc.c c->zroot.offs = zbr->offs; c 2591 fs/ubifs/tnc.c c->zroot.len = zbr->len; c 2592 fs/ubifs/tnc.c c->zroot.znode = znode; c 2593 fs/ubifs/tnc.c ubifs_assert(c, !ubifs_zn_obsolete(zp)); c 2594 fs/ubifs/tnc.c ubifs_assert(c, ubifs_zn_dirty(zp)); c 2595 fs/ubifs/tnc.c atomic_long_dec(&c->dirty_zn_cnt); c 2599 fs/ubifs/tnc.c atomic_long_inc(&c->clean_zn_cnt); c 2616 fs/ubifs/tnc.c int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key) c 2621 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2623 fs/ubifs/tnc.c found = lookup_level0_dirty(c, key, &znode, &n); c 2629 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); c 2631 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2634 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2646 fs/ubifs/tnc.c int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key, c 2652 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2654 fs/ubifs/tnc.c err = lookup_level0_dirty(c, key, &znode, &n); c 2659 fs/ubifs/tnc.c if (c->replaying) c 2660 fs/ubifs/tnc.c err = fallible_resolve_collision(c, key, &znode, &n, c 2663 fs/ubifs/tnc.c err = resolve_collision(c, key, &znode, &n, nm); c 2670 fs/ubifs/tnc.c znode = dirty_cow_bottom_up(c, znode); c 2676 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); c 2682 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2683 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2695 fs/ubifs/tnc.c int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key, c 2703 fs/ubifs/tnc.c if (!c->double_hash) c 2706 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2707 fs/ubifs/tnc.c err = lookup_level0_dirty(c, key, &znode, &n); c 2718 fs/ubifs/tnc.c err = tnc_read_hashed_node(c, zbr, dent); c 2726 fs/ubifs/tnc.c lowest_dent_key(c, &start_key, key_inum(c, key)); c 2728 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &start_key, &znode, &n); c 2732 fs/ubifs/tnc.c err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err); c 2738 fs/ubifs/tnc.c znode = dirty_cow_bottom_up(c, znode); c 2744 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); c 2750 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2751 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2764 fs/ubifs/tnc.c static int key_in_range(struct ubifs_info *c, union ubifs_key *key, c 2767 fs/ubifs/tnc.c if (keys_cmp(c, key, from_key) < 0) c 2769 fs/ubifs/tnc.c if (keys_cmp(c, key, to_key) > 0) c 2784 fs/ubifs/tnc.c int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, c 2791 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2794 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, from_key, &znode, &n); c 2801 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); c 2809 fs/ubifs/tnc.c if (!key_in_range(c, key, from_key, to_key)) { c 2817 fs/ubifs/tnc.c znode = dirty_cow_bottom_up(c, znode); c 2827 fs/ubifs/tnc.c if (!key_in_range(c, key, from_key, to_key)) c 2830 fs/ubifs/tnc.c err = ubifs_add_dirt(c, znode->zbranch[i].lnum, c 2833 fs/ubifs/tnc.c ubifs_dump_znode(c, znode); c 2845 fs/ubifs/tnc.c err = tnc_delete(c, znode, n); c 2852 fs/ubifs/tnc.c err = dbg_check_tnc(c, 0); c 2853 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 2866 fs/ubifs/tnc.c int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) c 2878 fs/ubifs/tnc.c lowest_xent_key(c, &key1, inum); c 2883 fs/ubifs/tnc.c xent = ubifs_tnc_next_ent(c, &key1, &nm); c 2895 fs/ubifs/tnc.c ubifs_evict_xattr_inode(c, xattr_inum); c 2899 fs/ubifs/tnc.c err = ubifs_tnc_remove_nm(c, &key1, &nm); c 2905 fs/ubifs/tnc.c lowest_ino_key(c, &key1, xattr_inum); c 2906 fs/ubifs/tnc.c highest_ino_key(c, &key2, xattr_inum); c 2907 fs/ubifs/tnc.c err = ubifs_tnc_remove_range(c, &key1, &key2); c 2915 fs/ubifs/tnc.c key_read(c, &xent->key, &key1); c 2919 fs/ubifs/tnc.c lowest_ino_key(c, &key1, inum); c 2920 fs/ubifs/tnc.c highest_ino_key(c, &key2, inum); c 2922 fs/ubifs/tnc.c return ubifs_tnc_remove_range(c, &key1, &key2); c 2948 fs/ubifs/tnc.c struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, c 2952 fs/ubifs/tnc.c int n, err, type = key_type(c, key); c 2959 fs/ubifs/tnc.c ubifs_assert(c, is_hash_key(c, key)); c 2961 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 2962 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, key, &znode, &n); c 2969 fs/ubifs/tnc.c if (c->replaying) c 2970 fs/ubifs/tnc.c err = fallible_resolve_collision(c, key, &znode, &n, c 2973 fs/ubifs/tnc.c err = resolve_collision(c, key, &znode, &n, nm); c 2981 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); c 2996 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); c 3014 fs/ubifs/tnc.c if (key_inum(c, dkey) != key_inum(c, key) || c 3015 fs/ubifs/tnc.c key_type(c, dkey) != type) { c 3020 fs/ubifs/tnc.c err = tnc_read_hashed_node(c, zbr, dent); c 3024 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 3030 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 3040 fs/ubifs/tnc.c static void tnc_destroy_cnext(struct ubifs_info *c) c 3044 fs/ubifs/tnc.c if (!c->cnext) c 3046 fs/ubifs/tnc.c ubifs_assert(c, c->cmt_state == COMMIT_BROKEN); c 3047 fs/ubifs/tnc.c cnext = c->cnext; c 3054 fs/ubifs/tnc.c } while (cnext && cnext != c->cnext); c 3061 fs/ubifs/tnc.c void ubifs_tnc_close(struct ubifs_info *c) c 3063 fs/ubifs/tnc.c tnc_destroy_cnext(c); c 3064 fs/ubifs/tnc.c if (c->zroot.znode) { c 3067 fs/ubifs/tnc.c n = atomic_long_read(&c->clean_zn_cnt); c 3068 fs/ubifs/tnc.c freed = ubifs_destroy_tnc_subtree(c, c->zroot.znode); c 3069 fs/ubifs/tnc.c ubifs_assert(c, freed == n); c 3072 fs/ubifs/tnc.c kfree(c->gap_lebs); c 3073 fs/ubifs/tnc.c kfree(c->ilebs); c 3074 fs/ubifs/tnc.c destroy_old_idx(c); c 3085 fs/ubifs/tnc.c static struct ubifs_znode *left_znode(struct ubifs_info *c, c 3099 fs/ubifs/tnc.c znode = get_znode(c, znode, n); c 3104 fs/ubifs/tnc.c znode = get_znode(c, znode, n); c 3122 fs/ubifs/tnc.c static struct ubifs_znode *right_znode(struct ubifs_info *c, c 3136 fs/ubifs/tnc.c znode = get_znode(c, znode, n); c 3140 fs/ubifs/tnc.c znode = get_znode(c, znode, 0); c 3175 fs/ubifs/tnc.c static struct ubifs_znode *lookup_znode(struct ubifs_info *c, c 3182 fs/ubifs/tnc.c ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY); c 3192 fs/ubifs/tnc.c znode = c->zroot.znode; c 3194 fs/ubifs/tnc.c znode = ubifs_load_znode(c, &c->zroot, NULL, 0); c 3199 fs/ubifs/tnc.c if (c->zroot.lnum == lnum && c->zroot.offs == offs) c 3205 fs/ubifs/tnc.c ubifs_search_zbranch(c, znode, key, &n); c 3215 fs/ubifs/tnc.c znode = left_znode(c, znode); c 3220 fs/ubifs/tnc.c ubifs_search_zbranch(c, znode, key, &n); c 3221 fs/ubifs/tnc.c ubifs_assert(c, n >= 0); c 3225 fs/ubifs/tnc.c znode = get_znode(c, znode, n); c 3231 fs/ubifs/tnc.c return get_znode(c, znode, n); c 3233 fs/ubifs/tnc.c if (!is_hash_key(c, key)) c 3247 fs/ubifs/tnc.c znode = left_znode(c, znode); c 3257 fs/ubifs/tnc.c return get_znode(c, znode, n); c 3259 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[n].key, key) < 0) c 3269 fs/ubifs/tnc.c znode = right_znode(c, znode); c 3279 fs/ubifs/tnc.c return get_znode(c, znode, n); c 3281 fs/ubifs/tnc.c if (keys_cmp(c, &znode->zbranch[n].key, key) > 0) c 3304 fs/ubifs/tnc.c int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, c 3309 fs/ubifs/tnc.c znode = lookup_znode(c, key, level, lnum, offs); c 3331 fs/ubifs/tnc.c static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, c 3337 fs/ubifs/tnc.c const int unique = !is_hash_key(c, key); c 3339 fs/ubifs/tnc.c found = ubifs_lookup_level0(c, key, &znode, &n); c 3357 fs/ubifs/tnc.c err = tnc_prev(c, &znode, &n); c 3362 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[n].key)) c 3372 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); c 3378 fs/ubifs/tnc.c if (keys_cmp(c, key, &znode->zbranch[n].key)) c 3401 fs/ubifs/tnc.c int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level, c 3406 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 3408 fs/ubifs/tnc.c err = is_idx_node_in_tnc(c, key, level, lnum, offs); c 3420 fs/ubifs/tnc.c err = is_leaf_node_in_tnc(c, key, lnum, offs); c 3423 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 3441 fs/ubifs/tnc.c int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level, c 3447 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 3448 fs/ubifs/tnc.c znode = lookup_znode(c, key, level, lnum, offs); c 3455 fs/ubifs/tnc.c znode = dirty_cow_bottom_up(c, znode); c 3462 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 3477 fs/ubifs/tnc.c int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, c 3487 fs/ubifs/tnc.c if (!dbg_is_chk_gen(c)) c 3491 fs/ubifs/tnc.c data_key_init(c, &from_key, inode->i_ino, block); c 3492 fs/ubifs/tnc.c highest_data_key(c, &to_key, inode->i_ino); c 3494 fs/ubifs/tnc.c mutex_lock(&c->tnc_mutex); c 3495 fs/ubifs/tnc.c err = ubifs_lookup_level0(c, &from_key, &znode, &n); c 3504 fs/ubifs/tnc.c err = tnc_next(c, &znode, &n); c 3512 fs/ubifs/tnc.c ubifs_assert(c, err == 0); c 3514 fs/ubifs/tnc.c if (!key_in_range(c, key, &from_key, &to_key)) c 3518 fs/ubifs/tnc.c block = key_block(c, key); c 3519 fs/ubifs/tnc.c ubifs_err(c, "inode %lu has size %lld, but there are data at offset %lld", c 3522 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 3523 fs/ubifs/tnc.c ubifs_dump_inode(c, inode); c 3528 fs/ubifs/tnc.c mutex_unlock(&c->tnc_mutex); c 25 fs/ubifs/tnc_commit.c static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, c 37 fs/ubifs/tnc_commit.c struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); c 40 fs/ubifs/tnc_commit.c key_write_idx(c, &zbr->key, &br->key); c 44 fs/ubifs/tnc_commit.c ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br)); c 46 fs/ubifs/tnc_commit.c ubifs_err(c, "bad ref in znode"); c 47 fs/ubifs/tnc_commit.c ubifs_dump_znode(c, znode); c 49 fs/ubifs/tnc_commit.c ubifs_dump_znode(c, zbr->znode); c 54 fs/ubifs/tnc_commit.c ubifs_prepare_node(c, idx, len, 0); c 55 fs/ubifs/tnc_commit.c ubifs_node_calc_hash(c, idx, hash); c 61 fs/ubifs/tnc_commit.c err = insert_old_idx_znode(c, znode); c 72 fs/ubifs/tnc_commit.c ubifs_copy_hash(c, hash, zbr->hash); c 74 fs/ubifs/tnc_commit.c c->zroot.lnum = lnum; c 75 fs/ubifs/tnc_commit.c c->zroot.offs = offs; c 76 fs/ubifs/tnc_commit.c c->zroot.len = len; c 77 fs/ubifs/tnc_commit.c ubifs_copy_hash(c, hash, c->zroot.hash); c 79 fs/ubifs/tnc_commit.c c->calc_idx_sz += ALIGN(len, 8); c 81 fs/ubifs/tnc_commit.c atomic_long_dec(&c->dirty_zn_cnt); c 83 fs/ubifs/tnc_commit.c ubifs_assert(c, ubifs_zn_dirty(znode)); c 84 fs/ubifs/tnc_commit.c ubifs_assert(c, ubifs_zn_cow(znode)); c 106 fs/ubifs/tnc_commit.c static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end, c 111 fs/ubifs/tnc_commit.c ubifs_assert(c, (gap_start & 7) == 0); c 112 fs/ubifs/tnc_commit.c ubifs_assert(c, (gap_end & 7) == 0); c 113 fs/ubifs/tnc_commit.c ubifs_assert(c, gap_end >= gap_start); c 120 fs/ubifs/tnc_commit.c while (c->enext) { c 121 fs/ubifs/tnc_commit.c len = ubifs_idx_node_sz(c, c->enext->child_cnt); c 123 fs/ubifs/tnc_commit.c struct ubifs_znode *znode = c->enext; c 127 fs/ubifs/tnc_commit.c ubifs_assert(c, alen <= gap_remains); c 128 fs/ubifs/tnc_commit.c err = make_idx_node(c, c->ileb_buf + gap_pos, znode, c 134 fs/ubifs/tnc_commit.c c->enext = znode->cnext; c 135 fs/ubifs/tnc_commit.c if (c->enext == c->cnext) c 136 fs/ubifs/tnc_commit.c c->enext = NULL; c 141 fs/ubifs/tnc_commit.c if (gap_end == c->leb_size) { c 142 fs/ubifs/tnc_commit.c c->ileb_len = ALIGN(gap_pos, c->min_io_size); c 144 fs/ubifs/tnc_commit.c pad_len = c->ileb_len - gap_pos; c 150 fs/ubifs/tnc_commit.c ubifs_pad(c, c->ileb_buf + gap_pos, pad_len); c 163 fs/ubifs/tnc_commit.c static int find_old_idx(struct ubifs_info *c, int lnum, int offs) c 168 fs/ubifs/tnc_commit.c p = c->old_idx.rb_node; c 198 fs/ubifs/tnc_commit.c static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key, c 203 fs/ubifs/tnc_commit.c ret = is_idx_node_in_tnc(c, key, level, lnum, offs); c 207 fs/ubifs/tnc_commit.c if (find_old_idx(c, lnum, offs)) c 224 fs/ubifs/tnc_commit.c static int layout_leb_in_gaps(struct ubifs_info *c, int p) c 232 fs/ubifs/tnc_commit.c lnum = ubifs_find_dirty_idx_leb(c); c 239 fs/ubifs/tnc_commit.c c->gap_lebs[p] = lnum; c 246 fs/ubifs/tnc_commit.c sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0); c 247 fs/ubifs/tnc_commit.c c->ileb_len = 0; c 255 fs/ubifs/tnc_commit.c ubifs_assert(c, snod->type == UBIFS_IDX_NODE); c 257 fs/ubifs/tnc_commit.c key_read(c, ubifs_idx_key(c, idx), &snod->key); c 260 fs/ubifs/tnc_commit.c in_use = is_idx_node_in_use(c, &snod->key, level, lnum, c 277 fs/ubifs/tnc_commit.c written = fill_gap(c, lnum, gap_start, gap_end, &dirt); c 287 fs/ubifs/tnc_commit.c c->ileb_len = c->leb_size; c 288 fs/ubifs/tnc_commit.c gap_end = c->leb_size; c 290 fs/ubifs/tnc_commit.c written = fill_gap(c, lnum, gap_start, gap_end, &dirt); c 298 fs/ubifs/tnc_commit.c err = ubifs_read_one_lp(c, lnum, &lp); c 301 fs/ubifs/tnc_commit.c if (lp.free == c->leb_size) { c 306 fs/ubifs/tnc_commit.c err = ubifs_change_one_lp(c, lnum, c 307 fs/ubifs/tnc_commit.c c->leb_size - c->ileb_len, c 314 fs/ubifs/tnc_commit.c err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt, c 318 fs/ubifs/tnc_commit.c err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len); c 334 fs/ubifs/tnc_commit.c static int get_leb_cnt(struct ubifs_info *c, int cnt) c 339 fs/ubifs/tnc_commit.c cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz; c 342 fs/ubifs/tnc_commit.c d = c->leb_size / c->max_idx_node_sz; c 356 fs/ubifs/tnc_commit.c static int layout_in_gaps(struct ubifs_info *c, int cnt) c 362 fs/ubifs/tnc_commit.c c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int), c 364 fs/ubifs/tnc_commit.c if (!c->gap_lebs) c 367 fs/ubifs/tnc_commit.c old_idx_lebs = c->lst.idx_lebs; c 369 fs/ubifs/tnc_commit.c ubifs_assert(c, p < c->lst.idx_lebs); c 370 fs/ubifs/tnc_commit.c written = layout_leb_in_gaps(c, p); c 374 fs/ubifs/tnc_commit.c kfree(c->gap_lebs); c 375 fs/ubifs/tnc_commit.c c->gap_lebs = NULL; c 378 fs/ubifs/tnc_commit.c if (!dbg_is_chk_index(c)) { c 383 fs/ubifs/tnc_commit.c ubifs_warn(c, "out of space"); c 384 fs/ubifs/tnc_commit.c ubifs_dump_budg(c, &c->bi); c 385 fs/ubifs/tnc_commit.c ubifs_dump_lprops(c); c 392 fs/ubifs/tnc_commit.c leb_needed_cnt = get_leb_cnt(c, cnt); c 394 fs/ubifs/tnc_commit.c leb_needed_cnt, c->ileb_cnt); c 403 fs/ubifs/tnc_commit.c if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs && c 404 fs/ubifs/tnc_commit.c old_idx_lebs < c->lst.idx_lebs) { c 405 fs/ubifs/tnc_commit.c old_idx_lebs = c->lst.idx_lebs; c 406 fs/ubifs/tnc_commit.c gap_lebs = krealloc(c->gap_lebs, sizeof(int) * c 409 fs/ubifs/tnc_commit.c kfree(c->gap_lebs); c 410 fs/ubifs/tnc_commit.c c->gap_lebs = NULL; c 413 fs/ubifs/tnc_commit.c c->gap_lebs = gap_lebs; c 415 fs/ubifs/tnc_commit.c } while (leb_needed_cnt > c->ileb_cnt); c 417 fs/ubifs/tnc_commit.c c->gap_lebs[p] = -1; c 429 fs/ubifs/tnc_commit.c static int layout_in_empty_space(struct ubifs_info *c) c 435 fs/ubifs/tnc_commit.c cnext = c->enext; c 439 fs/ubifs/tnc_commit.c lnum = c->ihead_lnum; c 440 fs/ubifs/tnc_commit.c buf_offs = c->ihead_offs; c 442 fs/ubifs/tnc_commit.c buf_len = ubifs_idx_node_sz(c, c->fanout); c 443 fs/ubifs/tnc_commit.c buf_len = ALIGN(buf_len, c->min_io_size); c 448 fs/ubifs/tnc_commit.c next_len = ubifs_idx_node_sz(c, cnext->child_cnt); c 449 fs/ubifs/tnc_commit.c if (buf_offs + next_len > c->leb_size) c 455 fs/ubifs/tnc_commit.c len = ubifs_idx_node_sz(c, znode->child_cnt); c 459 fs/ubifs/tnc_commit.c if (c->ileb_nxt >= c->ileb_cnt) { c 460 fs/ubifs/tnc_commit.c ubifs_err(c, "out of space"); c 463 fs/ubifs/tnc_commit.c lnum = c->ilebs[c->ileb_nxt++]; c 487 fs/ubifs/tnc_commit.c c->zroot.lnum = lnum; c 488 fs/ubifs/tnc_commit.c c->zroot.offs = offs; c 489 fs/ubifs/tnc_commit.c c->zroot.len = len; c 491 fs/ubifs/tnc_commit.c c->calc_idx_sz += ALIGN(len, 8); c 497 fs/ubifs/tnc_commit.c atomic_long_dec(&c->dirty_zn_cnt); c 504 fs/ubifs/tnc_commit.c if (cnext == c->cnext) c 507 fs/ubifs/tnc_commit.c next_len = ubifs_idx_node_sz(c, cnext->child_cnt); c 515 fs/ubifs/tnc_commit.c buf_offs + used + next_len <= c->leb_size && c 520 fs/ubifs/tnc_commit.c buf_offs + used + next_len <= c->leb_size) c 523 fs/ubifs/tnc_commit.c blen = ALIGN(wlen, c->min_io_size); c 528 fs/ubifs/tnc_commit.c if (buf_offs + next_len > c->leb_size) { c 529 fs/ubifs/tnc_commit.c err = ubifs_update_one_lp(c, lnum, c 530 fs/ubifs/tnc_commit.c c->leb_size - buf_offs, blen - used, c 542 fs/ubifs/tnc_commit.c err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs, c 549 fs/ubifs/tnc_commit.c c->dbg->new_ihead_lnum = lnum; c 550 fs/ubifs/tnc_commit.c c->dbg->new_ihead_offs = buf_offs; c 568 fs/ubifs/tnc_commit.c static int layout_commit(struct ubifs_info *c, int no_space, int cnt) c 573 fs/ubifs/tnc_commit.c err = layout_in_gaps(c, cnt); c 577 fs/ubifs/tnc_commit.c err = layout_in_empty_space(c); c 642 fs/ubifs/tnc_commit.c static int get_znodes_to_commit(struct ubifs_info *c) c 647 fs/ubifs/tnc_commit.c c->cnext = find_first_dirty(c->zroot.znode); c 648 fs/ubifs/tnc_commit.c znode = c->enext = c->cnext; c 655 fs/ubifs/tnc_commit.c ubifs_assert(c, !ubifs_zn_cow(znode)); c 660 fs/ubifs/tnc_commit.c znode->cnext = c->cnext; c 670 fs/ubifs/tnc_commit.c ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt)); c 683 fs/ubifs/tnc_commit.c static int alloc_idx_lebs(struct ubifs_info *c, int cnt) c 687 fs/ubifs/tnc_commit.c c->ileb_cnt = 0; c 688 fs/ubifs/tnc_commit.c c->ileb_nxt = 0; c 689 fs/ubifs/tnc_commit.c leb_cnt = get_leb_cnt(c, cnt); c 693 fs/ubifs/tnc_commit.c c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS); c 694 fs/ubifs/tnc_commit.c if (!c->ilebs) c 697 fs/ubifs/tnc_commit.c lnum = ubifs_find_free_leb_for_idx(c); c 700 fs/ubifs/tnc_commit.c c->ilebs[c->ileb_cnt++] = lnum; c 703 fs/ubifs/tnc_commit.c if (dbg_is_chk_index(c) && !(prandom_u32() & 7)) c 717 fs/ubifs/tnc_commit.c static int free_unused_idx_lebs(struct ubifs_info *c) c 721 fs/ubifs/tnc_commit.c for (i = c->ileb_nxt; i < c->ileb_cnt; i++) { c 722 fs/ubifs/tnc_commit.c lnum = c->ilebs[i]; c 724 fs/ubifs/tnc_commit.c er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0, c 738 fs/ubifs/tnc_commit.c static int free_idx_lebs(struct ubifs_info *c) c 742 fs/ubifs/tnc_commit.c err = free_unused_idx_lebs(c); c 743 fs/ubifs/tnc_commit.c kfree(c->ilebs); c 744 fs/ubifs/tnc_commit.c c->ilebs = NULL; c 758 fs/ubifs/tnc_commit.c int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot) c 762 fs/ubifs/tnc_commit.c mutex_lock(&c->tnc_mutex); c 763 fs/ubifs/tnc_commit.c err = dbg_check_tnc(c, 1); c 766 fs/ubifs/tnc_commit.c cnt = get_znodes_to_commit(c); c 770 fs/ubifs/tnc_commit.c err = alloc_idx_lebs(c, cnt); c 775 fs/ubifs/tnc_commit.c err = layout_commit(c, no_space, cnt); c 778 fs/ubifs/tnc_commit.c ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0); c 779 fs/ubifs/tnc_commit.c err = free_unused_idx_lebs(c); c 783 fs/ubifs/tnc_commit.c destroy_old_idx(c); c 784 fs/ubifs/tnc_commit.c memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch)); c 786 fs/ubifs/tnc_commit.c err = ubifs_save_dirty_idx_lnums(c); c 790 fs/ubifs/tnc_commit.c spin_lock(&c->space_lock); c 799 fs/ubifs/tnc_commit.c ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c)); c 800 fs/ubifs/tnc_commit.c c->bi.old_idx_sz = c->calc_idx_sz; c 801 fs/ubifs/tnc_commit.c c->bi.uncommitted_idx = 0; c 802 fs/ubifs/tnc_commit.c c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); c 803 fs/ubifs/tnc_commit.c spin_unlock(&c->space_lock); c 804 fs/ubifs/tnc_commit.c mutex_unlock(&c->tnc_mutex); c 806 fs/ubifs/tnc_commit.c dbg_cmt("number of index LEBs %d", c->lst.idx_lebs); c 807 fs/ubifs/tnc_commit.c dbg_cmt("size of index %llu", c->calc_idx_sz); c 811 fs/ubifs/tnc_commit.c free_idx_lebs(c); c 813 fs/ubifs/tnc_commit.c mutex_unlock(&c->tnc_mutex); c 824 fs/ubifs/tnc_commit.c static int write_index(struct ubifs_info *c) c 831 fs/ubifs/tnc_commit.c cnext = c->enext; c 839 fs/ubifs/tnc_commit.c lnum = c->ihead_lnum; c 840 fs/ubifs/tnc_commit.c buf_offs = c->ihead_offs; c 843 fs/ubifs/tnc_commit.c buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size); c 848 fs/ubifs/tnc_commit.c next_len = ubifs_idx_node_sz(c, cnext->child_cnt); c 849 fs/ubifs/tnc_commit.c if (buf_offs + next_len > c->leb_size) { c 850 fs/ubifs/tnc_commit.c err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0, c 863 fs/ubifs/tnc_commit.c idx = c->cbuf + used; c 870 fs/ubifs/tnc_commit.c struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); c 873 fs/ubifs/tnc_commit.c key_write_idx(c, &zbr->key, &br->key); c 877 fs/ubifs/tnc_commit.c ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br)); c 879 fs/ubifs/tnc_commit.c ubifs_err(c, "bad ref in znode"); c 880 fs/ubifs/tnc_commit.c ubifs_dump_znode(c, znode); c 882 fs/ubifs/tnc_commit.c ubifs_dump_znode(c, zbr->znode); c 887 fs/ubifs/tnc_commit.c len = ubifs_idx_node_sz(c, znode->child_cnt); c 888 fs/ubifs/tnc_commit.c ubifs_prepare_node(c, idx, len, 0); c 889 fs/ubifs/tnc_commit.c ubifs_node_calc_hash(c, idx, hash); c 891 fs/ubifs/tnc_commit.c mutex_lock(&c->tnc_mutex); c 894 fs/ubifs/tnc_commit.c ubifs_copy_hash(c, hash, c 899 fs/ubifs/tnc_commit.c ubifs_copy_hash(c, hash, c 902 fs/ubifs/tnc_commit.c ubifs_copy_hash(c, hash, c->zroot.hash); c 905 fs/ubifs/tnc_commit.c mutex_unlock(&c->tnc_mutex); c 909 fs/ubifs/tnc_commit.c lnum = c->ilebs[lnum_pos++]; c 918 fs/ubifs/tnc_commit.c ubifs_err(c, "inconsistent znode posn"); c 925 fs/ubifs/tnc_commit.c ubifs_assert(c, ubifs_zn_dirty(znode)); c 926 fs/ubifs/tnc_commit.c ubifs_assert(c, ubifs_zn_cow(znode)); c 974 fs/ubifs/tnc_commit.c if (cnext == c->cnext) c 977 fs/ubifs/tnc_commit.c next_len = ubifs_idx_node_sz(c, cnext->child_cnt); c 980 fs/ubifs/tnc_commit.c if (next_len && nxt_offs <= c->leb_size) { c 987 fs/ubifs/tnc_commit.c blen = ALIGN(wlen, c->min_io_size); c 988 fs/ubifs/tnc_commit.c ubifs_pad(c, c->cbuf + wlen, blen - wlen); c 992 fs/ubifs/tnc_commit.c err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen); c 997 fs/ubifs/tnc_commit.c if (nxt_offs > c->leb_size) { c 998 fs/ubifs/tnc_commit.c err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, c 1008 fs/ubifs/tnc_commit.c memmove(c->cbuf, c->cbuf + blen, used); c 1014 fs/ubifs/tnc_commit.c if (lnum != c->dbg->new_ihead_lnum || c 1015 fs/ubifs/tnc_commit.c buf_offs != c->dbg->new_ihead_offs) { c 1016 fs/ubifs/tnc_commit.c ubifs_err(c, "inconsistent ihead"); c 1020 fs/ubifs/tnc_commit.c c->ihead_lnum = lnum; c 1021 fs/ubifs/tnc_commit.c c->ihead_offs = buf_offs; c 1032 fs/ubifs/tnc_commit.c static void free_obsolete_znodes(struct ubifs_info *c) c 1036 fs/ubifs/tnc_commit.c cnext = c->cnext; c 1044 fs/ubifs/tnc_commit.c atomic_long_inc(&c->clean_zn_cnt); c 1047 fs/ubifs/tnc_commit.c } while (cnext != c->cnext); c 1057 fs/ubifs/tnc_commit.c static int return_gap_lebs(struct ubifs_info *c) c 1061 fs/ubifs/tnc_commit.c if (!c->gap_lebs) c 1065 fs/ubifs/tnc_commit.c for (p = c->gap_lebs; *p != -1; p++) { c 1066 fs/ubifs/tnc_commit.c err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0, c 1072 fs/ubifs/tnc_commit.c kfree(c->gap_lebs); c 1073 fs/ubifs/tnc_commit.c c->gap_lebs = NULL; c 1083 fs/ubifs/tnc_commit.c int ubifs_tnc_end_commit(struct ubifs_info *c) c 1087 fs/ubifs/tnc_commit.c if (!c->cnext) c 1090 fs/ubifs/tnc_commit.c err = return_gap_lebs(c); c 1094 fs/ubifs/tnc_commit.c err = write_index(c); c 1098 fs/ubifs/tnc_commit.c mutex_lock(&c->tnc_mutex); c 1100 fs/ubifs/tnc_commit.c dbg_cmt("TNC height is %d", c->zroot.znode->level + 1); c 1102 fs/ubifs/tnc_commit.c free_obsolete_znodes(c); c 1104 fs/ubifs/tnc_commit.c c->cnext = NULL; c 1105 fs/ubifs/tnc_commit.c kfree(c->ilebs); c 1106 fs/ubifs/tnc_commit.c c->ilebs = NULL; c 1108 fs/ubifs/tnc_commit.c mutex_unlock(&c->tnc_mutex); c 29 fs/ubifs/tnc_misc.c struct ubifs_znode *ubifs_tnc_levelorder_next(const struct ubifs_info *c, c 36 fs/ubifs/tnc_misc.c ubifs_assert(c, zr); c 51 fs/ubifs/tnc_misc.c ubifs_assert(c, znode->level <= zr->level); c 78 fs/ubifs/tnc_misc.c ubifs_assert(c, znode); c 104 fs/ubifs/tnc_misc.c ubifs_assert(c, zn->level >= 0); c 125 fs/ubifs/tnc_misc.c int ubifs_search_zbranch(const struct ubifs_info *c, c 133 fs/ubifs/tnc_misc.c ubifs_assert(c, end > beg); c 137 fs/ubifs/tnc_misc.c cmp = keys_cmp(c, key, &zbr[mid].key); c 151 fs/ubifs/tnc_misc.c ubifs_assert(c, *n >= -1 && *n < znode->child_cnt); c 153 fs/ubifs/tnc_misc.c ubifs_assert(c, keys_cmp(c, key, &zbr[0].key) < 0); c 155 fs/ubifs/tnc_misc.c ubifs_assert(c, keys_cmp(c, key, &zbr[*n].key) > 0); c 157 fs/ubifs/tnc_misc.c ubifs_assert(c, keys_cmp(c, key, &zbr[*n + 1].key) < 0); c 194 fs/ubifs/tnc_misc.c struct ubifs_znode *ubifs_tnc_postorder_next(const struct ubifs_info *c, c 199 fs/ubifs/tnc_misc.c ubifs_assert(c, znode); c 221 fs/ubifs/tnc_misc.c long ubifs_destroy_tnc_subtree(const struct ubifs_info *c, c 228 fs/ubifs/tnc_misc.c ubifs_assert(c, zn); c 249 fs/ubifs/tnc_misc.c zn = ubifs_tnc_postorder_next(c, zn); c 265 fs/ubifs/tnc_misc.c static int read_znode(struct ubifs_info *c, struct ubifs_zbranch *zzbr, c 274 fs/ubifs/tnc_misc.c idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); c 278 fs/ubifs/tnc_misc.c err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); c 284 fs/ubifs/tnc_misc.c err = ubifs_node_check_hash(c, idx, zzbr->hash); c 286 fs/ubifs/tnc_misc.c ubifs_bad_hash(c, idx, zzbr->hash, lnum, offs); c 297 fs/ubifs/tnc_misc.c if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) { c 298 fs/ubifs/tnc_misc.c ubifs_err(c, "current fanout %d, branch count %d", c 299 fs/ubifs/tnc_misc.c c->fanout, znode->child_cnt); c 300 fs/ubifs/tnc_misc.c ubifs_err(c, "max levels %d, znode level %d", c 307 fs/ubifs/tnc_misc.c struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); c 310 fs/ubifs/tnc_misc.c key_read(c, &br->key, &zbr->key); c 314 fs/ubifs/tnc_misc.c ubifs_copy_hash(c, ubifs_branch_hash(c, br), zbr->hash); c 319 fs/ubifs/tnc_misc.c if (zbr->lnum < c->main_first || c 320 fs/ubifs/tnc_misc.c zbr->lnum >= c->leb_cnt || zbr->offs < 0 || c 321 fs/ubifs/tnc_misc.c zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) { c 322 fs/ubifs/tnc_misc.c ubifs_err(c, "bad branch %d", i); c 327 fs/ubifs/tnc_misc.c switch (key_type(c, &zbr->key)) { c 334 fs/ubifs/tnc_misc.c ubifs_err(c, "bad key type at slot %d: %d", c 335 fs/ubifs/tnc_misc.c i, key_type(c, &zbr->key)); c 343 fs/ubifs/tnc_misc.c type = key_type(c, &zbr->key); c 344 fs/ubifs/tnc_misc.c if (c->ranges[type].max_len == 0) { c 345 fs/ubifs/tnc_misc.c if (zbr->len != c->ranges[type].len) { c 346 fs/ubifs/tnc_misc.c ubifs_err(c, "bad target node (type %d) length (%d)", c 348 fs/ubifs/tnc_misc.c ubifs_err(c, "have to be %d", c->ranges[type].len); c 352 fs/ubifs/tnc_misc.c } else if (zbr->len < c->ranges[type].min_len || c 353 fs/ubifs/tnc_misc.c zbr->len > c->ranges[type].max_len) { c 354 fs/ubifs/tnc_misc.c ubifs_err(c, "bad target node (type %d) length (%d)", c 356 fs/ubifs/tnc_misc.c ubifs_err(c, "have to be in range of %d-%d", c 357 fs/ubifs/tnc_misc.c c->ranges[type].min_len, c 358 fs/ubifs/tnc_misc.c c->ranges[type].max_len); c 374 fs/ubifs/tnc_misc.c cmp = keys_cmp(c, key1, key2); c 376 fs/ubifs/tnc_misc.c ubifs_err(c, "bad key order (keys %d and %d)", i, i + 1); c 379 fs/ubifs/tnc_misc.c } else if (cmp == 0 && !is_hash_key(c, key1)) { c 381 fs/ubifs/tnc_misc.c ubifs_err(c, "keys %d and %d are not hashed but equivalent", c 392 fs/ubifs/tnc_misc.c ubifs_err(c, "bad indexing node at LEB %d:%d, error %d", lnum, offs, err); c 393 fs/ubifs/tnc_misc.c ubifs_dump_node(c, idx); c 409 fs/ubifs/tnc_misc.c struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c, c 416 fs/ubifs/tnc_misc.c ubifs_assert(c, !zbr->znode); c 421 fs/ubifs/tnc_misc.c znode = kzalloc(c->max_znode_sz, GFP_NOFS); c 425 fs/ubifs/tnc_misc.c err = read_znode(c, zbr, znode); c 429 fs/ubifs/tnc_misc.c atomic_long_inc(&c->clean_zn_cnt); c 461 fs/ubifs/tnc_misc.c int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 465 fs/ubifs/tnc_misc.c int err, type = key_type(c, key); c 472 fs/ubifs/tnc_misc.c wbuf = ubifs_get_wbuf(c, zbr->lnum); c 477 fs/ubifs/tnc_misc.c err = ubifs_read_node(c, node, type, zbr->len, zbr->lnum, c 486 fs/ubifs/tnc_misc.c key_read(c, node + UBIFS_KEY_OFFSET, &key1); c 487 fs/ubifs/tnc_misc.c if (!keys_eq(c, key, &key1)) { c 488 fs/ubifs/tnc_misc.c ubifs_err(c, "bad key in node at LEB %d:%d", c 492 fs/ubifs/tnc_misc.c ubifs_dump_node(c, node); c 496 fs/ubifs/tnc_misc.c err = ubifs_node_check_hash(c, node, zbr->hash); c 498 fs/ubifs/tnc_misc.c ubifs_bad_hash(c, node, zbr->hash, zbr->lnum, zbr->offs); c 646 fs/ubifs/ubifs.h typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c, c 682 fs/ubifs/ubifs.h struct ubifs_info *c; c 690 fs/ubifs/ubifs.h int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad); c 1509 fs/ubifs/ubifs.h static inline int ubifs_authenticated(const struct ubifs_info *c) c 1511 fs/ubifs/ubifs.h return (IS_ENABLED(CONFIG_UBIFS_FS_AUTHENTICATION)) && c->authenticated; c 1514 fs/ubifs/ubifs.h struct shash_desc *__ubifs_hash_get_desc(const struct ubifs_info *c); c 1515 fs/ubifs/ubifs.h static inline struct shash_desc *ubifs_hash_get_desc(const struct ubifs_info *c) c 1517 fs/ubifs/ubifs.h return ubifs_authenticated(c) ? __ubifs_hash_get_desc(c) : NULL; c 1520 fs/ubifs/ubifs.h static inline int ubifs_shash_init(const struct ubifs_info *c, c 1523 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1529 fs/ubifs/ubifs.h static inline int ubifs_shash_update(const struct ubifs_info *c, c 1535 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) { c 1544 fs/ubifs/ubifs.h static inline int ubifs_shash_final(const struct ubifs_info *c, c 1547 fs/ubifs/ubifs.h return ubifs_authenticated(c) ? crypto_shash_final(desc, out) : 0; c 1550 fs/ubifs/ubifs.h int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *buf, c 1552 fs/ubifs/ubifs.h static inline int ubifs_node_calc_hash(const struct ubifs_info *c, c 1555 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1556 fs/ubifs/ubifs.h return __ubifs_node_calc_hash(c, buf, hash); c 1561 fs/ubifs/ubifs.h int ubifs_prepare_auth_node(struct ubifs_info *c, void *node, c 1573 fs/ubifs/ubifs.h static inline int ubifs_check_hash(const struct ubifs_info *c, c 1576 fs/ubifs/ubifs.h return crypto_memneq(expected, got, c->hash_len); c 1588 fs/ubifs/ubifs.h static inline int ubifs_check_hmac(const struct ubifs_info *c, c 1591 fs/ubifs/ubifs.h return crypto_memneq(expected, got, c->hmac_desc_len); c 1594 fs/ubifs/ubifs.h void ubifs_bad_hash(const struct ubifs_info *c, const void *node, c 1597 fs/ubifs/ubifs.h int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf, c 1599 fs/ubifs/ubifs.h static inline int ubifs_node_check_hash(const struct ubifs_info *c, c 1602 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1603 fs/ubifs/ubifs.h return __ubifs_node_check_hash(c, buf, expected); c 1608 fs/ubifs/ubifs.h int ubifs_init_authentication(struct ubifs_info *c); c 1609 fs/ubifs/ubifs.h void __ubifs_exit_authentication(struct ubifs_info *c); c 1610 fs/ubifs/ubifs.h static inline void ubifs_exit_authentication(struct ubifs_info *c) c 1612 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1613 fs/ubifs/ubifs.h __ubifs_exit_authentication(c); c 1624 fs/ubifs/ubifs.h static inline u8 *ubifs_branch_hash(struct ubifs_info *c, c 1627 fs/ubifs/ubifs.h return (void *)br + sizeof(*br) + c->key_len; c 1638 fs/ubifs/ubifs.h static inline void ubifs_copy_hash(const struct ubifs_info *c, const u8 *from, c 1641 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1642 fs/ubifs/ubifs.h memcpy(to, from, c->hash_len); c 1645 fs/ubifs/ubifs.h int __ubifs_node_insert_hmac(const struct ubifs_info *c, void *buf, c 1647 fs/ubifs/ubifs.h static inline int ubifs_node_insert_hmac(const struct ubifs_info *c, void *buf, c 1650 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1651 fs/ubifs/ubifs.h return __ubifs_node_insert_hmac(c, buf, len, ofs_hmac); c 1656 fs/ubifs/ubifs.h int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *buf, c 1658 fs/ubifs/ubifs.h static inline int ubifs_node_verify_hmac(const struct ubifs_info *c, c 1661 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1662 fs/ubifs/ubifs.h return __ubifs_node_verify_hmac(c, buf, len, ofs_hmac); c 1675 fs/ubifs/ubifs.h static inline int ubifs_auth_node_sz(const struct ubifs_info *c) c 1677 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1678 fs/ubifs/ubifs.h return sizeof(struct ubifs_auth_node) + c->hmac_desc_len; c 1682 fs/ubifs/ubifs.h int ubifs_sb_verify_signature(struct ubifs_info *c, c 1684 fs/ubifs/ubifs.h bool ubifs_hmac_zero(struct ubifs_info *c, const u8 *hmac); c 1686 fs/ubifs/ubifs.h int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac); c 1688 fs/ubifs/ubifs.h int __ubifs_shash_copy_state(const struct ubifs_info *c, struct shash_desc *src, c 1690 fs/ubifs/ubifs.h static inline int ubifs_shash_copy_state(const struct ubifs_info *c, c 1694 fs/ubifs/ubifs.h if (ubifs_authenticated(c)) c 1695 fs/ubifs/ubifs.h return __ubifs_shash_copy_state(c, src, target); c 1701 fs/ubifs/ubifs.h void ubifs_ro_mode(struct ubifs_info *c, int err); c 1702 fs/ubifs/ubifs.h int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, c 1704 fs/ubifs/ubifs.h int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, c 1706 fs/ubifs/ubifs.h int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len); c 1707 fs/ubifs/ubifs.h int ubifs_leb_unmap(struct ubifs_info *c, int lnum); c 1708 fs/ubifs/ubifs.h int ubifs_leb_map(struct ubifs_info *c, int lnum); c 1709 fs/ubifs/ubifs.h int ubifs_is_mapped(const struct ubifs_info *c, int lnum); c 1712 fs/ubifs/ubifs.h int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf); c 1713 fs/ubifs/ubifs.h int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, c 1717 fs/ubifs/ubifs.h int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum, c 1719 fs/ubifs/ubifs.h int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum, c 1721 fs/ubifs/ubifs.h int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, c 1723 fs/ubifs/ubifs.h void ubifs_init_node(struct ubifs_info *c, void *buf, int len, int pad); c 1724 fs/ubifs/ubifs.h void ubifs_crc_node(struct ubifs_info *c, void *buf, int len); c 1725 fs/ubifs/ubifs.h void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad); c 1726 fs/ubifs/ubifs.h int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len, c 1728 fs/ubifs/ubifs.h void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last); c 1729 fs/ubifs/ubifs.h int ubifs_io_init(struct ubifs_info *c); c 1730 fs/ubifs/ubifs.h void ubifs_pad(const struct ubifs_info *c, void *buf, int pad); c 1732 fs/ubifs/ubifs.h int ubifs_bg_wbufs_sync(struct ubifs_info *c); c 1734 fs/ubifs/ubifs.h int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode); c 1737 fs/ubifs/ubifs.h struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, c 1740 fs/ubifs/ubifs.h int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, c 1742 fs/ubifs/ubifs.h struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, c 1744 fs/ubifs/ubifs.h void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 1746 fs/ubifs/ubifs.h int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, c 1748 fs/ubifs/ubifs.h void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, c 1752 fs/ubifs/ubifs.h void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud); c 1753 fs/ubifs/ubifs.h void ubifs_create_buds_lists(struct ubifs_info *c); c 1754 fs/ubifs/ubifs.h int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs); c 1755 fs/ubifs/ubifs.h struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum); c 1756 fs/ubifs/ubifs.h struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum); c 1757 fs/ubifs/ubifs.h int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum); c 1758 fs/ubifs/ubifs.h int ubifs_log_end_commit(struct ubifs_info *c, int new_ltail_lnum); c 1759 fs/ubifs/ubifs.h int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum); c 1760 fs/ubifs/ubifs.h int ubifs_consolidate_log(struct ubifs_info *c); c 1763 fs/ubifs/ubifs.h int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, c 1766 fs/ubifs/ubifs.h int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, c 1768 fs/ubifs/ubifs.h int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode); c 1769 fs/ubifs/ubifs.h int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode); c 1770 fs/ubifs/ubifs.h int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, c 1776 fs/ubifs/ubifs.h int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, c 1783 fs/ubifs/ubifs.h int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, c 1785 fs/ubifs/ubifs.h int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, c 1787 fs/ubifs/ubifs.h int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode1, c 1791 fs/ubifs/ubifs.h int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req); c 1792 fs/ubifs/ubifs.h void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req); c 1793 fs/ubifs/ubifs.h void ubifs_release_dirty_inode_budget(struct ubifs_info *c, c 1795 fs/ubifs/ubifs.h int ubifs_budget_inode_op(struct ubifs_info *c, struct inode *inode, c 1797 fs/ubifs/ubifs.h void ubifs_release_ino_dirty(struct ubifs_info *c, struct inode *inode, c 1799 fs/ubifs/ubifs.h void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, c 1801 fs/ubifs/ubifs.h long long ubifs_get_free_space(struct ubifs_info *c); c 1802 fs/ubifs/ubifs.h long long ubifs_get_free_space_nolock(struct ubifs_info *c); c 1803 fs/ubifs/ubifs.h int ubifs_calc_min_idx_lebs(struct ubifs_info *c); c 1804 fs/ubifs/ubifs.h void ubifs_convert_page_budget(struct ubifs_info *c); c 1805 fs/ubifs/ubifs.h long long ubifs_reported_space(const struct ubifs_info *c, long long free); c 1806 fs/ubifs/ubifs.h long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs); c 1809 fs/ubifs/ubifs.h int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs, c 1811 fs/ubifs/ubifs.h int ubifs_find_free_leb_for_idx(struct ubifs_info *c); c 1812 fs/ubifs/ubifs.h int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, c 1814 fs/ubifs/ubifs.h int ubifs_find_dirty_idx_leb(struct ubifs_info *c); c 1815 fs/ubifs/ubifs.h int ubifs_save_dirty_idx_lnums(struct ubifs_info *c); c 1818 fs/ubifs/ubifs.h int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key, c 1820 fs/ubifs/ubifs.h int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key, c 1822 fs/ubifs/ubifs.h int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key, c 1824 fs/ubifs/ubifs.h int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key, c 1826 fs/ubifs/ubifs.h int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum, c 1828 fs/ubifs/ubifs.h int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, c 1830 fs/ubifs/ubifs.h int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, c 1833 fs/ubifs/ubifs.h int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key); c 1834 fs/ubifs/ubifs.h int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key, c 1836 fs/ubifs/ubifs.h int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key, c 1838 fs/ubifs/ubifs.h int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, c 1840 fs/ubifs/ubifs.h int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum); c 1841 fs/ubifs/ubifs.h struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, c 1844 fs/ubifs/ubifs.h void ubifs_tnc_close(struct ubifs_info *c); c 1845 fs/ubifs/ubifs.h int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level, c 1847 fs/ubifs/ubifs.h int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level, c 1850 fs/ubifs/ubifs.h void destroy_old_idx(struct ubifs_info *c); c 1851 fs/ubifs/ubifs.h int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level, c 1853 fs/ubifs/ubifs.h int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode); c 1854 fs/ubifs/ubifs.h int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu); c 1855 fs/ubifs/ubifs.h int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu); c 1858 fs/ubifs/ubifs.h struct ubifs_znode *ubifs_tnc_levelorder_next(const struct ubifs_info *c, c 1861 fs/ubifs/ubifs.h int ubifs_search_zbranch(const struct ubifs_info *c, c 1865 fs/ubifs/ubifs.h struct ubifs_znode *ubifs_tnc_postorder_next(const struct ubifs_info *c, c 1867 fs/ubifs/ubifs.h long ubifs_destroy_tnc_subtree(const struct ubifs_info *c, c 1869 fs/ubifs/ubifs.h struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c, c 1872 fs/ubifs/ubifs.h int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr, c 1876 fs/ubifs/ubifs.h int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); c 1877 fs/ubifs/ubifs.h int ubifs_tnc_end_commit(struct ubifs_info *c); c 1887 fs/ubifs/ubifs.h void ubifs_commit_required(struct ubifs_info *c); c 1888 fs/ubifs/ubifs.h void ubifs_request_bg_commit(struct ubifs_info *c); c 1889 fs/ubifs/ubifs.h int ubifs_run_commit(struct ubifs_info *c); c 1890 fs/ubifs/ubifs.h void ubifs_recovery_commit(struct ubifs_info *c); c 1891 fs/ubifs/ubifs.h int ubifs_gc_should_commit(struct ubifs_info *c); c 1892 fs/ubifs/ubifs.h void ubifs_wait_for_commit(struct ubifs_info *c); c 1895 fs/ubifs/ubifs.h int ubifs_compare_master_node(struct ubifs_info *c, void *m1, void *m2); c 1896 fs/ubifs/ubifs.h int ubifs_read_master(struct ubifs_info *c); c 1897 fs/ubifs/ubifs.h int ubifs_write_master(struct ubifs_info *c); c 1900 fs/ubifs/ubifs.h int ubifs_read_superblock(struct ubifs_info *c); c 1901 fs/ubifs/ubifs.h int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup); c 1902 fs/ubifs/ubifs.h int ubifs_fixup_free_space(struct ubifs_info *c); c 1903 fs/ubifs/ubifs.h int ubifs_enable_encryption(struct ubifs_info *c); c 1906 fs/ubifs/ubifs.h int ubifs_validate_entry(struct ubifs_info *c, c 1908 fs/ubifs/ubifs.h int ubifs_replay_journal(struct ubifs_info *c); c 1911 fs/ubifs/ubifs.h int ubifs_garbage_collect(struct ubifs_info *c, int anyway); c 1912 fs/ubifs/ubifs.h int ubifs_gc_start_commit(struct ubifs_info *c); c 1913 fs/ubifs/ubifs.h int ubifs_gc_end_commit(struct ubifs_info *c); c 1914 fs/ubifs/ubifs.h void ubifs_destroy_idx_gc(struct ubifs_info *c); c 1915 fs/ubifs/ubifs.h int ubifs_get_idx_gc_leb(struct ubifs_info *c); c 1916 fs/ubifs/ubifs.h int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp); c 1919 fs/ubifs/ubifs.h int ubifs_add_orphan(struct ubifs_info *c, ino_t inum); c 1920 fs/ubifs/ubifs.h void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum); c 1921 fs/ubifs/ubifs.h int ubifs_orphan_start_commit(struct ubifs_info *c); c 1922 fs/ubifs/ubifs.h int ubifs_orphan_end_commit(struct ubifs_info *c); c 1923 fs/ubifs/ubifs.h int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only); c 1924 fs/ubifs/ubifs.h int ubifs_clear_orphans(struct ubifs_info *c); c 1927 fs/ubifs/ubifs.h int ubifs_calc_lpt_geom(struct ubifs_info *c); c 1928 fs/ubifs/ubifs.h int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, c 1930 fs/ubifs/ubifs.h int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr); c 1931 fs/ubifs/ubifs.h struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum); c 1932 fs/ubifs/ubifs.h struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum); c 1933 fs/ubifs/ubifs.h int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum, c 1937 fs/ubifs/ubifs.h void ubifs_pack_lsave(struct ubifs_info *c, void *buf, int *lsave); c 1938 fs/ubifs/ubifs.h void ubifs_pack_ltab(struct ubifs_info *c, void *buf, c 1940 fs/ubifs/ubifs.h void ubifs_pack_pnode(struct ubifs_info *c, void *buf, c 1942 fs/ubifs/ubifs.h void ubifs_pack_nnode(struct ubifs_info *c, void *buf, c 1944 fs/ubifs/ubifs.h struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c, c 1946 fs/ubifs/ubifs.h struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c, c 1948 fs/ubifs/ubifs.h struct ubifs_pnode *ubifs_pnode_lookup(struct ubifs_info *c, int i); c 1949 fs/ubifs/ubifs.h int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip); c 1950 fs/ubifs/ubifs.h void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty); c 1951 fs/ubifs/ubifs.h void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode); c 1952 fs/ubifs/ubifs.h uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, int nrbits); c 1953 fs/ubifs/ubifs.h struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght); c 1955 fs/ubifs/ubifs.h int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, c 1957 fs/ubifs/ubifs.h int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash); c 1960 fs/ubifs/ubifs.h int ubifs_lpt_start_commit(struct ubifs_info *c); c 1961 fs/ubifs/ubifs.h int ubifs_lpt_end_commit(struct ubifs_info *c); c 1962 fs/ubifs/ubifs.h int ubifs_lpt_post_commit(struct ubifs_info *c); c 1963 fs/ubifs/ubifs.h void ubifs_lpt_free(struct ubifs_info *c, int wr_only); c 1966 fs/ubifs/ubifs.h const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, c 1970 fs/ubifs/ubifs.h void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst); c 1971 fs/ubifs/ubifs.h void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, c 1973 fs/ubifs/ubifs.h void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, c 1975 fs/ubifs/ubifs.h void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops); c 1976 fs/ubifs/ubifs.h int ubifs_categorize_lprops(const struct ubifs_info *c, c 1978 fs/ubifs/ubifs.h int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, c 1980 fs/ubifs/ubifs.h int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty, c 1982 fs/ubifs/ubifs.h int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp); c 1983 fs/ubifs/ubifs.h const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c); c 1984 fs/ubifs/ubifs.h const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c); c 1985 fs/ubifs/ubifs.h const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c); c 1986 fs/ubifs/ubifs.h const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c); c 1987 fs/ubifs/ubifs.h int ubifs_calc_dark(const struct ubifs_info *c, int spc); c 1995 fs/ubifs/ubifs.h struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir, c 2010 fs/ubifs/ubifs.h void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum); c 2013 fs/ubifs/ubifs.h static inline void ubifs_evict_xattr_inode(struct ubifs_info *c, c 2037 fs/ubifs/ubifs.h int ubifs_recover_master_node(struct ubifs_info *c); c 2038 fs/ubifs/ubifs.h int ubifs_write_rcvrd_mst_node(struct ubifs_info *c); c 2039 fs/ubifs/ubifs.h struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, c 2041 fs/ubifs/ubifs.h struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, c 2043 fs/ubifs/ubifs.h int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf); c 2044 fs/ubifs/ubifs.h int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf); c 2045 fs/ubifs/ubifs.h int ubifs_rcvry_gc_commit(struct ubifs_info *c); c 2046 fs/ubifs/ubifs.h int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key, c 2048 fs/ubifs/ubifs.h int ubifs_recover_size(struct ubifs_info *c, bool in_place); c 2049 fs/ubifs/ubifs.h void ubifs_destroy_size_tree(struct ubifs_info *c); c 2061 fs/ubifs/ubifs.h void ubifs_compress(const struct ubifs_info *c, const void *in_buf, int in_len, c 2063 fs/ubifs/ubifs.h int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len, c 2076 fs/ubifs/ubifs.h struct ubifs_info *c = inode->i_sb->s_fs_info; c 2077 fs/ubifs/ubifs.h ubifs_assert(c, 0); c 2084 fs/ubifs/ubifs.h struct ubifs_info *c = inode->i_sb->s_fs_info; c 2085 fs/ubifs/ubifs.h ubifs_assert(c, 0); c 2107 fs/ubifs/ubifs.h void ubifs_msg(const struct ubifs_info *c, const char *fmt, ...); c 2109 fs/ubifs/ubifs.h void ubifs_err(const struct ubifs_info *c, const char *fmt, ...); c 2111 fs/ubifs/ubifs.h void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...); c 2116 fs/ubifs/ubifs.h #define ubifs_errc(c, fmt, ...) \ c 2118 fs/ubifs/ubifs.h if (!(c)->probing) \ c 2119 fs/ubifs/ubifs.h ubifs_err(c, fmt, ##__VA_ARGS__); \ c 81 fs/ubifs/xattr.c static int create_xattr(struct ubifs_info *c, struct inode *host, c 91 fs/ubifs/xattr.c if (host_ui->xattr_cnt >= ubifs_xattr_max_cnt(c)) { c 92 fs/ubifs/xattr.c ubifs_err(c, "inode %lu already has too many xattrs (%d), cannot create more", c 104 fs/ubifs/xattr.c ubifs_err(c, "cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d", c 109 fs/ubifs/xattr.c err = ubifs_budget_space(c, &req); c 113 fs/ubifs/xattr.c inode = ubifs_new_inode(c, host, S_IFREG | S_IRWXUGO); c 152 fs/ubifs/xattr.c err = ubifs_jnl_update(c, host, nm, inode, 0, 1); c 158 fs/ubifs/xattr.c ubifs_release_budget(c, &req); c 174 fs/ubifs/xattr.c ubifs_release_budget(c, &req); c 190 fs/ubifs/xattr.c static int change_xattr(struct ubifs_info *c, struct inode *host, c 201 fs/ubifs/xattr.c ubifs_assert(c, ui->data_len == inode->i_size); c 202 fs/ubifs/xattr.c err = ubifs_budget_space(c, &req); c 230 fs/ubifs/xattr.c err = ubifs_jnl_change_xattr(c, inode, host); c 235 fs/ubifs/xattr.c ubifs_release_budget(c, &req); c 244 fs/ubifs/xattr.c ubifs_release_budget(c, &req); c 248 fs/ubifs/xattr.c static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum) c 252 fs/ubifs/xattr.c inode = ubifs_iget(c->vfs_sb, inum); c 254 fs/ubifs/xattr.c ubifs_err(c, "dead extended attribute entry, error %d", c 260 fs/ubifs/xattr.c ubifs_err(c, "corrupt extended attribute entry"); c 269 fs/ubifs/xattr.c struct ubifs_info *c = host->i_sb->s_fs_info; c 276 fs/ubifs/xattr.c ubifs_assert(c, inode_is_locked(host)); c 292 fs/ubifs/xattr.c xent_key_init(c, &key, host->i_ino, &nm); c 293 fs/ubifs/xattr.c err = ubifs_tnc_lookup_nm(c, &key, xent, &nm); c 302 fs/ubifs/xattr.c err = create_xattr(c, host, &nm, value, size); c 312 fs/ubifs/xattr.c inode = iget_xattr(c, le64_to_cpu(xent->inum)); c 318 fs/ubifs/xattr.c err = change_xattr(c, host, inode, value, size); c 330 fs/ubifs/xattr.c struct ubifs_info *c = host->i_sb->s_fs_info; c 344 fs/ubifs/xattr.c xent_key_init(c, &key, host->i_ino, &nm); c 345 fs/ubifs/xattr.c err = ubifs_tnc_lookup_nm(c, &key, xent, &nm); c 352 fs/ubifs/xattr.c inode = iget_xattr(c, le64_to_cpu(xent->inum)); c 359 fs/ubifs/xattr.c ubifs_assert(c, inode->i_size == ui->data_len); c 360 fs/ubifs/xattr.c ubifs_assert(c, ubifs_inode(host)->xattr_size > ui->data_len); c 400 fs/ubifs/xattr.c struct ubifs_info *c = host->i_sb->s_fs_info; c 420 fs/ubifs/xattr.c lowest_xent_key(c, &key, host->i_ino); c 422 fs/ubifs/xattr.c xent = ubifs_tnc_next_ent(c, &key, &nm); c 438 fs/ubifs/xattr.c key_read(c, &xent->key, &key); c 443 fs/ubifs/xattr.c ubifs_err(c, "cannot find next direntry, error %d", err); c 447 fs/ubifs/xattr.c ubifs_assert(c, written <= size); c 451 fs/ubifs/xattr.c static int remove_xattr(struct ubifs_info *c, struct inode *host, c 460 fs/ubifs/xattr.c ubifs_assert(c, ui->data_len == inode->i_size); c 462 fs/ubifs/xattr.c err = ubifs_budget_space(c, &req); c 473 fs/ubifs/xattr.c err = ubifs_jnl_delete_xattr(c, host, inode, nm); c 478 fs/ubifs/xattr.c ubifs_release_budget(c, &req); c 487 fs/ubifs/xattr.c ubifs_release_budget(c, &req); c 495 fs/ubifs/xattr.c struct ubifs_info *c = host->i_sb->s_fs_info; c 501 fs/ubifs/xattr.c if (ubifs_inode(host)->xattr_cnt < ubifs_xattr_max_cnt(c)) c 504 fs/ubifs/xattr.c ubifs_warn(c, "inode %lu has too many xattrs, doing a non-atomic deletion", c 507 fs/ubifs/xattr.c lowest_xent_key(c, &key, host->i_ino); c 509 fs/ubifs/xattr.c xent = ubifs_tnc_next_ent(c, &key, &nm); c 518 fs/ubifs/xattr.c xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum)); c 521 fs/ubifs/xattr.c ubifs_err(c, "dead directory entry '%s', error %d", c 523 fs/ubifs/xattr.c ubifs_ro_mode(c, err); c 528 fs/ubifs/xattr.c ubifs_assert(c, ubifs_inode(xino)->xattr); c 531 fs/ubifs/xattr.c err = remove_xattr(c, host, xino, &nm); c 535 fs/ubifs/xattr.c ubifs_err(c, "cannot remove xattr, error %d", err); c 543 fs/ubifs/xattr.c key_read(c, &xent->key, &key); c 548 fs/ubifs/xattr.c ubifs_err(c, "cannot find next direntry, error %d", err); c 566 fs/ubifs/xattr.c void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum) c 570 fs/ubifs/xattr.c inode = ilookup(c->vfs_sb, xattr_inum); c 580 fs/ubifs/xattr.c struct ubifs_info *c = host->i_sb->s_fs_info; c 586 fs/ubifs/xattr.c ubifs_assert(c, inode_is_locked(host)); c 595 fs/ubifs/xattr.c xent_key_init(c, &key, host->i_ino, &nm); c 596 fs/ubifs/xattr.c err = ubifs_tnc_lookup_nm(c, &key, xent, &nm); c 603 fs/ubifs/xattr.c inode = iget_xattr(c, le64_to_cpu(xent->inum)); c 609 fs/ubifs/xattr.c ubifs_assert(c, inode->i_nlink == 1); c 611 fs/ubifs/xattr.c err = remove_xattr(c, host, inode, &nm); c 662 fs/ubifs/xattr.c struct ubifs_info *c = dentry->i_sb->s_fs_info; c 663 fs/ubifs/xattr.c ubifs_err(c, "cannot initialize security for inode %lu, error %d", c 696 fs/udf/inode.c int c = 1; c 741 fs/udf/inode.c c = !c; c 743 fs/udf/inode.c laarr[c].extLength = (etype << 30) | elen; c 744 fs/udf/inode.c laarr[c].extLocation = eloc; c 783 fs/udf/inode.c if (c) c 803 fs/udf/inode.c c = 0; c 812 fs/udf/inode.c c = !c; c 813 fs/udf/inode.c laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | c 815 fs/udf/inode.c memset(&laarr[c].extLocation, 0x00, c 819 fs/udf/inode.c endnum = c + 1; c 827 fs/udf/inode.c if (!c && count != 1) { c 831 fs/udf/inode.c c = 1; c 838 fs/udf/inode.c laarr[c + 1].extLength = (etype << 30) | elen; c 839 fs/udf/inode.c laarr[c + 1].extLocation = eloc; c 849 fs/udf/inode.c if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) c 850 fs/udf/inode.c newblocknum = laarr[c].extLocation.logicalBlockNum + offset; c 876 fs/udf/inode.c udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); c 883 fs/udf/inode.c udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); c 915 fs/udf/inode.c static void udf_split_extents(struct inode *inode, int *c, int offset, c 922 fs/udf/inode.c if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || c 923 fs/udf/inode.c (laarr[*c].extLength >> 30) == c 925 fs/udf/inode.c int curr = *c; c 955 fs/udf/inode.c (*c)++; c 979 fs/udf/inode.c static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, c 985 fs/udf/inode.c if (*endnum >= (c + 1)) { c 989 fs/udf/inode.c start = c; c 991 fs/udf/inode.c if ((laarr[c + 1].extLength >> 30) == c 993 fs/udf/inode.c start = c + 1; c 995 fs/udf/inode.c (((laarr[c + 1].extLength & c 1000 fs/udf/inode.c start = c; c 1028 fs/udf/inode.c if (start == (c + 1)) c 1033 fs/udf/inode.c memmove(&laarr[c + 2], &laarr[c + 1], c 1034 fs/udf/inode.c sizeof(struct long_ad) * (*endnum - (c + 1))); c 1036 fs/udf/inode.c laarr[c + 1].extLocation.logicalBlockNum = next; c 1037 fs/udf/inode.c laarr[c + 1].extLocation.partitionReferenceNum = c 1038 fs/udf/inode.c laarr[c].extLocation. c 1040 fs/udf/inode.c laarr[c + 1].extLength = c 1044 fs/udf/inode.c start = c + 1; c 49 fs/udf/unicode.c unicode_t c; c 53 fs/udf/unicode.c c = str_i[str_i_idx++]; c 55 fs/udf/unicode.c c = (c << 8) | str_i[str_i_idx++]; c 56 fs/udf/unicode.c if ((c & SURROGATE_MASK) == SURROGATE_PAIR) { c 61 fs/udf/unicode.c c = UNICODE_MAX + 1; c 66 fs/udf/unicode.c if (c & SURROGATE_LOW) { c 67 fs/udf/unicode.c c = UNICODE_MAX + 1; c 76 fs/udf/unicode.c c = UNICODE_MAX + 1; c 80 fs/udf/unicode.c c = PLANE_SIZE + c 81 fs/udf/unicode.c ((c & SURROGATE_CHAR_MASK) << SURROGATE_CHAR_BITS) + c 85 fs/udf/unicode.c *ret = c; c 98 fs/udf/unicode.c unicode_t c; c 109 fs/udf/unicode.c &c); c 111 fs/udf/unicode.c if (c == 0 || c > UNICODE_MAX || (conv_f && c > MAX_WCHAR_T) || c 112 fs/udf/unicode.c (translate && c == '/')) { c 124 fs/udf/unicode.c c = ILLEGAL_CHAR_MARK; c 129 fs/udf/unicode.c len = conv_f(c, &str_o[*str_o_idx], c 132 fs/udf/unicode.c len = utf32_to_utf8(c, &str_o[*str_o_idx], c 156 fs/udf/unicode.c uint32_t c; c 206 fs/udf/unicode.c c = ocu[idx]; c 208 fs/udf/unicode.c c = (c << 8) | ocu[idx + 1]; c 210 fs/udf/unicode.c if (c == EXT_MARK) { c 321 fs/udf/unicode.c unicode_t c; c 337 fs/udf/unicode.c c = SURROGATE_PAIR | c 340 fs/udf/unicode.c ocu[u_len++] = (uint8_t)(c >> 8); c 341 fs/udf/unicode.c ocu[u_len++] = (uint8_t)(c & 0xff); c 215 fs/ufs/ufs_fs.h #define ufs_cgbase(c) (uspi->s_fpg * (c)) c 216 fs/ufs/ufs_fs.h #define ufs_cgstart(c) ((uspi)->fs_magic == UFS2_MAGIC ? ufs_cgbase(c) : \ c 217 fs/ufs/ufs_fs.h (ufs_cgbase(c) + uspi->s_cgoffset * ((c) & ~uspi->s_cgmask))) c 218 fs/ufs/ufs_fs.h #define ufs_cgsblock(c) (ufs_cgstart(c) + uspi->s_sblkno) /* super blk */ c 219 fs/ufs/ufs_fs.h #define ufs_cgcmin(c) (ufs_cgstart(c) + uspi->s_cblkno) /* cg block */ c 220 fs/ufs/ufs_fs.h #define ufs_cgimin(c) (ufs_cgstart(c) + uspi->s_iblkno) /* inode blk */ c 221 fs/ufs/ufs_fs.h #define ufs_cgdmin(c) (ufs_cgstart(c) + uspi->s_dblkno) /* 1st data */ c 2794 fs/unicode/mkutf8data.c unsigned char c = *s; c 2795 fs/unicode/mkutf8data.c return 1 + (c >= 0xC0) + (c >= 0xE0) + (c >= 0xF0); c 3178 fs/unicode/mkutf8data.c int c; c 3186 fs/unicode/mkutf8data.c while ((c = utf8byte(&u8c)) > 0) c 3187 fs/unicode/mkutf8data.c if (c != (unsigned char)*t++) c 3189 fs/unicode/mkutf8data.c if (c < 0) c 3201 fs/unicode/mkutf8data.c while ((c = utf8byte(&u8c)) > 0) c 3202 fs/unicode/mkutf8data.c if (c != (unsigned char)*t++) c 3204 fs/unicode/mkutf8data.c if (c < 0) c 115 fs/unicode/utf8-core.c int c = utf8byte(&cur); c 117 fs/unicode/utf8-core.c dest[nlen] = c; c 118 fs/unicode/utf8-core.c if (!c) c 120 fs/unicode/utf8-core.c if (c == -1) c 139 fs/unicode/utf8-core.c int c = utf8byte(&cur); c 141 fs/unicode/utf8-core.c dest[nlen] = c; c 142 fs/unicode/utf8-core.c if (!c) c 144 fs/unicode/utf8-core.c if (c == -1) c 93 fs/unicode/utf8-norm.c unsigned char c = *s; c 95 fs/unicode/utf8-norm.c return 1 + (c >= 0xC0) + (c >= 0xE0) + (c >= 0xF0); c 180 fs/unicode/utf8-selftest.c unsigned char c; c 188 fs/unicode/utf8-selftest.c while ((c = utf8byte(&u8c)) > 0) { c 189 fs/unicode/utf8-selftest.c test_f((c == nfdi_test_data[i].dec[j]), c 191 fs/unicode/utf8-selftest.c c, nfdi_test_data[i].dec[j]); c 216 fs/unicode/utf8-selftest.c unsigned char c; c 224 fs/unicode/utf8-selftest.c while ((c = utf8byte(&u8c)) > 0) { c 225 fs/unicode/utf8-selftest.c test_f((c == nfdicf_test_data[i].ncf[j]), c 227 fs/unicode/utf8-selftest.c c, nfdicf_test_data[i].ncf[j]); c 2115 fs/xfs/libxfs/xfs_alloc.c uint32_t c = be32_to_cpu(agf->agf_flcount); c 2130 fs/xfs/libxfs/xfs_alloc.c if (c > agfl_size) c 2137 fs/xfs/libxfs/xfs_alloc.c if (c && l >= f) c 2139 fs/xfs/libxfs/xfs_alloc.c else if (c) c 2144 fs/xfs/libxfs/xfs_alloc.c return active != c; c 2095 fs/xfs/libxfs/xfs_da_btree.c int c; c 2104 fs/xfs/libxfs/xfs_da_btree.c c = (int)(*bno + count - b); c 2105 fs/xfs/libxfs/xfs_da_btree.c error = xfs_bmapi_write(tp, dp, b, c, c 108 fs/xfs/xfs_log.c #define xlog_verify_iclog(a,b,c) c 109 fs/xfs/xfs_log.c #define xlog_verify_tail_lsn(a,b,c) c 89 fs/xfs/xfs_stats.c int c; c 93 fs/xfs/xfs_stats.c for_each_possible_cpu(c) { c 96 fs/xfs/xfs_stats.c vn_active = per_cpu_ptr(stats, c)->s.vn_active; c 97 fs/xfs/xfs_stats.c memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); c 98 fs/xfs/xfs_stats.c per_cpu_ptr(stats, c)->s.vn_active = vn_active; c 424 include/acpi/acoutput.h #define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c) c 426 include/acpi/acoutput.h #define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) c 429 include/acpi/acoutput.h #define ACPI_TRACE_POINT(a, b, c, d) acpi_trace_point (a, b, c, d) c 447 include/acpi/acoutput.h #define ACPI_DUMP_OPERANDS(a, b, c) c 449 include/acpi/acoutput.h #define ACPI_DUMP_PATHNAME(a, b, c, d) c 452 include/acpi/acoutput.h #define ACPI_TRACE_POINT(a, b, c, d) c 280 include/acpi/actypes.h #define ACPI_PRINTF_LIKE(c) c 39 include/acpi/platform/acgcc.h #define ACPI_PRINTF_LIKE(c) __attribute__ ((__format__ (__printf__, c, c+1))) c 39 include/asm-generic/atomic.h int c, old; \ c 41 include/asm-generic/atomic.h c = v->counter; \ c 42 include/asm-generic/atomic.h while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c 43 include/asm-generic/atomic.h c = old; \ c 49 include/asm-generic/atomic.h int c, old; \ c 51 include/asm-generic/atomic.h c = v->counter; \ c 52 include/asm-generic/atomic.h while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c 53 include/asm-generic/atomic.h c = old; \ c 55 include/asm-generic/atomic.h return c c_op i; \ c 61 include/asm-generic/atomic.h int c, old; \ c 63 include/asm-generic/atomic.h c = v->counter; \ c 64 include/asm-generic/atomic.h while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ c 65 include/asm-generic/atomic.h c = old; \ c 67 include/asm-generic/atomic.h return c; \ c 17 include/asm-generic/word-at-a-time.h static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) c 19 include/asm-generic/word-at-a-time.h unsigned long mask = (val & c->low_bits) + c->low_bits; c 41 include/asm-generic/word-at-a-time.h static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) c 43 include/asm-generic/word-at-a-time.h unsigned long rhs = val | c->low_bits; c 45 include/asm-generic/word-at-a-time.h return (val + c->high_bits) & ~rhs; c 93 include/asm-generic/word-at-a-time.h static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) c 95 include/asm-generic/word-at-a-time.h unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; c 100 include/asm-generic/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) c 105 include/drm/drm_edid.h u8 c; /* need to divide by 2 */ c 134 include/drm/drm_modes.h #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ c 135 include/drm/drm_modes.h .name = nm, .status = 0, .type = (t), .clock = (c), \ c 1086 include/linux/atomic-fallback.h int c = atomic_read(v); c 1089 include/linux/atomic-fallback.h if (unlikely(c == u)) c 1091 include/linux/atomic-fallback.h } while (!atomic_try_cmpxchg(v, &c, c + a)); c 1093 include/linux/atomic-fallback.h return c; c 1136 include/linux/atomic-fallback.h int c = atomic_read(v); c 1139 include/linux/atomic-fallback.h if (unlikely(c < 0)) c 1141 include/linux/atomic-fallback.h } while (!atomic_try_cmpxchg(v, &c, c + 1)); c 1152 include/linux/atomic-fallback.h int c = atomic_read(v); c 1155 include/linux/atomic-fallback.h if (unlikely(c > 0)) c 1157 include/linux/atomic-fallback.h } while (!atomic_try_cmpxchg(v, &c, c - 1)); c 1168 include/linux/atomic-fallback.h int dec, c = atomic_read(v); c 1171 include/linux/atomic-fallback.h dec = c - 1; c 1174 include/linux/atomic-fallback.h } while (!atomic_try_cmpxchg(v, &c, dec)); c 1181 include/linux/atomic-fallback.h #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) c 1182 include/linux/atomic-fallback.h #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) c 2196 include/linux/atomic-fallback.h s64 c = atomic64_read(v); c 2199 include/linux/atomic-fallback.h if (unlikely(c == u)) c 2201 include/linux/atomic-fallback.h } while (!atomic64_try_cmpxchg(v, &c, c + a)); c 2203 include/linux/atomic-fallback.h return c; c 2246 include/linux/atomic-fallback.h s64 c = atomic64_read(v); c 2249 include/linux/atomic-fallback.h if (unlikely(c < 0)) c 2251 include/linux/atomic-fallback.h } while (!atomic64_try_cmpxchg(v, &c, c + 1)); c 2262 include/linux/atomic-fallback.h s64 c = atomic64_read(v); c 2265 include/linux/atomic-fallback.h if (unlikely(c > 0)) c 2267 include/linux/atomic-fallback.h } while (!atomic64_try_cmpxchg(v, &c, c - 1)); c 2278 include/linux/atomic-fallback.h s64 dec, c = atomic64_read(v); c 2281 include/linux/atomic-fallback.h dec = c - 1; c 2284 include/linux/atomic-fallback.h } while (!atomic64_try_cmpxchg(v, &c, dec)); c 2291 include/linux/atomic-fallback.h #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) c 2292 include/linux/atomic-fallback.h #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) c 46 include/linux/can/dev/peak_canfd.h static inline u16 pucan_cmd_get_opcode(struct pucan_command *c) c 48 include/linux/can/dev/peak_canfd.h return le16_to_cpu(c->opcode_channel) & 0x3ff; c 258 include/linux/can/dev/peak_canfd.h #define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4)) c 96 include/linux/capability.h # define cap_clear(c) do { (c) = __cap_empty_set; } while (0) c 98 include/linux/capability.h #define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag)) c 99 include/linux/capability.h #define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag)) c 100 include/linux/capability.h #define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag)) c 102 include/linux/capability.h #define CAP_BOP_ALL(c, a, b, OP) \ c 106 include/linux/capability.h c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \ c 110 include/linux/capability.h #define CAP_UOP_ALL(c, a, OP) \ c 114 include/linux/capability.h c.cap[__capi] = OP a.cap[__capi]; \ c 142 include/linux/capability.h static inline kernel_cap_t cap_invert(const kernel_cap_t c) c 145 include/linux/capability.h CAP_UOP_ALL(dest, c, ~); c 109 include/linux/cb710.h #define cb710_dump_regs(c, d) do {} while (0) c 285 include/linux/ceph/libceph.h int (*parse_extra_token)(char *c, void *private), c 329 include/linux/ceph/messenger.h extern int ceph_parse_ips(const char *c, const char *end, c 37 include/linux/ceph/pagelist.h struct ceph_pagelist_cursor *c); c 40 include/linux/ceph/pagelist.h struct ceph_pagelist_cursor *c); c 71 include/linux/clk/analogbits-wrpll-cln28hpc.h int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate, c 74 include/linux/clk/analogbits-wrpll-cln28hpc.h unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c); c 76 include/linux/clk/analogbits-wrpll-cln28hpc.h unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c, c 26 include/linux/clkdev.h #define CLKDEV_INIT(d, n, c) \ c 30 include/linux/clkdev.h .clk = c, \ c 19 include/linux/compiler_types.h # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) c 45 include/linux/compiler_types.h # define __cond_lock(x,c) (c) c 55 include/linux/console.h void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos); c 23 include/linux/consolemap.h extern u32 conv_8bit_to_uni(unsigned char c); c 30 include/linux/consolemap.h #define conv_8bit_to_uni(c) ((uint32_t)(c)) c 31 include/linux/consolemap.h #define conv_uni_to_8bit(c) ((int) ((c) & 0xff)) c 32 include/linux/consolemap.h #define console_map_init(c) do { ; } while (0) c 13 include/linux/crc-ccitt.h static inline u16 crc_ccitt_byte(u16 crc, const u8 c) c 15 include/linux/crc-ccitt.h return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; c 18 include/linux/crc-ccitt.h static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c) c 20 include/linux/crc-ccitt.h return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; c 7 include/linux/crc4.h extern uint8_t crc4(uint8_t c, uint64_t x, int bits); c 19 include/linux/crush/hash.h extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c); c 20 include/linux/crush/hash.h extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d); c 21 include/linux/crush/hash.h extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, c 23 include/linux/ctype.h #define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) c 24 include/linux/ctype.h #define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) c 25 include/linux/ctype.h #define iscntrl(c) ((__ismask(c)&(_C)) != 0) c 26 include/linux/ctype.h static inline int isdigit(int c) c 28 include/linux/ctype.h return '0' <= c && c <= '9'; c 30 include/linux/ctype.h #define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) c 31 include/linux/ctype.h #define islower(c) ((__ismask(c)&(_L)) != 0) c 32 include/linux/ctype.h #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) c 33 include/linux/ctype.h #define ispunct(c) ((__ismask(c)&(_P)) != 0) c 35 include/linux/ctype.h #define isspace(c) ((__ismask(c)&(_S)) != 0) c 36 include/linux/ctype.h #define isupper(c) ((__ismask(c)&(_U)) != 0) c 37 include/linux/ctype.h #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) c 39 include/linux/ctype.h #define isascii(c) (((unsigned char)(c))<=0x7f) c 40 include/linux/ctype.h #define toascii(c) (((unsigned char)(c))&0x7f) c 42 include/linux/ctype.h static inline unsigned char __tolower(unsigned char c) c 44 include/linux/ctype.h if (isupper(c)) c 45 include/linux/ctype.h c -= 'A'-'a'; c 46 include/linux/ctype.h return c; c 49 include/linux/ctype.h static inline unsigned char __toupper(unsigned char c) c 51 include/linux/ctype.h if (islower(c)) c 52 include/linux/ctype.h c -= 'a'-'A'; c 53 include/linux/ctype.h return c; c 56 include/linux/ctype.h #define tolower(c) __tolower(c) c 57 include/linux/ctype.h #define toupper(c) __toupper(c) c 63 include/linux/ctype.h static inline char _tolower(const char c) c 65 include/linux/ctype.h return c | 0x20; c 69 include/linux/ctype.h static inline int isodigit(const char c) c 71 include/linux/ctype.h return c >= '0' && c <= '7'; c 25 include/linux/debug_locks.h #define DEBUG_LOCKS_WARN_ON(c) \ c 29 include/linux/debug_locks.h if (!oops_in_progress && unlikely(c)) { \ c 31 include/linux/debug_locks.h WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \ c 38 include/linux/debug_locks.h # define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) c 40 include/linux/debug_locks.h # define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0) c 1037 include/linux/device.h struct device_connection *c; c 1039 include/linux/device.h for (c = cons; c->endpoint[0]; c++) c 1040 include/linux/device.h device_connection_add(c); c 1049 include/linux/device.h struct device_connection *c; c 1051 include/linux/device.h for (c = cons; c->endpoint[0]; c++) c 1052 include/linux/device.h device_connection_remove(c); c 32 include/linux/dm-bufio.h void dm_bufio_client_destroy(struct dm_bufio_client *c); c 39 include/linux/dm-bufio.h void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); c 55 include/linux/dm-bufio.h void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, c 62 include/linux/dm-bufio.h void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, c 69 include/linux/dm-bufio.h void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, c 77 include/linux/dm-bufio.h void dm_bufio_prefetch(struct dm_bufio_client *c, c 108 include/linux/dm-bufio.h void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); c 114 include/linux/dm-bufio.h int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); c 119 include/linux/dm-bufio.h int dm_bufio_issue_flush(struct dm_bufio_client *c); c 132 include/linux/dm-bufio.h void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); c 137 include/linux/dm-bufio.h void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); c 139 include/linux/dm-bufio.h unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); c 140 include/linux/dm-bufio.h sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); c 622 include/linux/dma-mapping.h #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) c 172 include/linux/dma/ipu-dma.h #define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan) c 66 include/linux/efi.h #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ c 67 include/linux/efi.h GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) c 48 include/linux/greybus.h #define GREYBUS_DEVICE_CLASS(c) \ c 50 include/linux/greybus.h .class = (c), c 40 include/linux/hid-debug.h #define hid_dump_input(a,b,c) do { } while (0) c 41 include/linux/hid-debug.h #define hid_dump_report(a,b,c,d) do { } while (0) c 43 include/linux/hid-debug.h #define hid_dump_field(a,b,c) do { } while (0) c 965 include/linux/hid.h __u8 type, __u16 c) c 970 include/linux/hid.h usage->code = c; c 1000 include/linux/hid.h __u8 type, __u16 c) c 1002 include/linux/hid.h hid_map_usage(hidinput, usage, bit, max, type, c); c 1003 include/linux/hid.h clear_bit(c, *bit); c 937 include/linux/hyperv.h static inline bool is_hvsock_channel(const struct vmbus_channel *c) c 939 include/linux/hyperv.h return !!(c->offermsg.offer.chn_flags & c 943 include/linux/hyperv.h static inline bool is_sub_channel(const struct vmbus_channel *c) c 945 include/linux/hyperv.h return c->offermsg.offer.sub_channel_index != 0; c 948 include/linux/hyperv.h static inline void set_channel_affinity_state(struct vmbus_channel *c, c 951 include/linux/hyperv.h c->affinity_policy = policy; c 954 include/linux/hyperv.h static inline void set_channel_read_mode(struct vmbus_channel *c, c 957 include/linux/hyperv.h c->callback_mode = mode; c 960 include/linux/hyperv.h static inline void set_per_channel_state(struct vmbus_channel *c, void *s) c 962 include/linux/hyperv.h c->per_channel_state = s; c 965 include/linux/hyperv.h static inline void *get_per_channel_state(struct vmbus_channel *c) c 967 include/linux/hyperv.h return c->per_channel_state; c 970 include/linux/hyperv.h static inline void set_channel_pending_send_size(struct vmbus_channel *c, c 976 include/linux/hyperv.h spin_lock_irqsave(&c->outbound.ring_lock, flags); c 977 include/linux/hyperv.h ++c->out_full_total; c 979 include/linux/hyperv.h if (!c->out_full_flag) { c 980 include/linux/hyperv.h ++c->out_full_first; c 981 include/linux/hyperv.h c->out_full_flag = true; c 983 include/linux/hyperv.h spin_unlock_irqrestore(&c->outbound.ring_lock, flags); c 985 include/linux/hyperv.h c->out_full_flag = false; c 988 include/linux/hyperv.h c->outbound.ring_buffer->pending_send_sz = size; c 991 include/linux/hyperv.h static inline void set_low_latency_mode(struct vmbus_channel *c) c 993 include/linux/hyperv.h c->low_latency = true; c 996 include/linux/hyperv.h static inline void clear_low_latency_mode(struct vmbus_channel *c) c 998 include/linux/hyperv.h c->low_latency = false; c 373 include/linux/ide.h u8 c[12]; c 25 include/linux/iio/sysfs.h struct iio_chan_spec const *c; c 132 include/linux/intel-iommu.h #define cap_5lp_support(c) (((c) >> 60) & 1) c 133 include/linux/intel-iommu.h #define cap_pi_support(c) (((c) >> 59) & 1) c 134 include/linux/intel-iommu.h #define cap_fl1gp_support(c) (((c) >> 56) & 1) c 135 include/linux/intel-iommu.h #define cap_read_drain(c) (((c) >> 55) & 1) c 136 include/linux/intel-iommu.h #define cap_write_drain(c) (((c) >> 54) & 1) c 137 include/linux/intel-iommu.h #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) c 138 include/linux/intel-iommu.h #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) c 139 include/linux/intel-iommu.h #define cap_pgsel_inv(c) (((c) >> 39) & 1) c 141 include/linux/intel-iommu.h #define cap_super_page_val(c) (((c) >> 34) & 0xf) c 142 include/linux/intel-iommu.h #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ c 145 include/linux/intel-iommu.h #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) c 146 include/linux/intel-iommu.h #define cap_max_fault_reg_offset(c) \ c 147 include/linux/intel-iommu.h (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) c 149 include/linux/intel-iommu.h #define cap_zlr(c) (((c) >> 22) & 1) c 150 include/linux/intel-iommu.h #define cap_isoch(c) (((c) >> 23) & 1) c 151 include/linux/intel-iommu.h #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) c 152 include/linux/intel-iommu.h #define cap_sagaw(c) (((c) >> 8) & 0x1f) c 153 include/linux/intel-iommu.h #define cap_caching_mode(c) (((c) >> 7) & 1) c 154 include/linux/intel-iommu.h #define cap_phmr(c) (((c) >> 6) & 1) c 155 include/linux/intel-iommu.h #define cap_plmr(c) (((c) >> 5) & 1) c 156 include/linux/intel-iommu.h #define cap_rwbf(c) (((c) >> 4) & 1) c 157 include/linux/intel-iommu.h #define cap_afl(c) (((c) >> 3) & 1) c 158 include/linux/intel-iommu.h #define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) c 275 include/linux/intel-iommu.h #define dma_frcd_fault_reason(c) (c & 0xff) c 276 include/linux/intel-iommu.h #define dma_frcd_source_id(c) (c & 0xffff) c 277 include/linux/intel-iommu.h #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) c 278 include/linux/intel-iommu.h #define dma_frcd_pasid_present(c) (((c) >> 31) & 1) c 17 include/linux/isapnp.h #define ISAPNP_VENDOR(a,b,c) (((((a)-'A'+1)&0x3f)<<2)|\ c 19 include/linux/isapnp.h ((((c)-'A'+1)&0x1f)<<8)) c 35 include/linux/jhash.h #define __jhash_mix(a, b, c) \ c 37 include/linux/jhash.h a -= c; a ^= rol32(c, 4); c += b; \ c 38 include/linux/jhash.h b -= a; b ^= rol32(a, 6); a += c; \ c 39 include/linux/jhash.h c -= b; c ^= rol32(b, 8); b += a; \ c 40 include/linux/jhash.h a -= c; a ^= rol32(c, 16); c += b; \ c 41 include/linux/jhash.h b -= a; b ^= rol32(a, 19); a += c; \ c 42 include/linux/jhash.h c -= b; c ^= rol32(b, 4); b += a; \ c 46 include/linux/jhash.h #define __jhash_final(a, b, c) \ c 48 include/linux/jhash.h c ^= b; c -= rol32(b, 14); \ c 49 include/linux/jhash.h a ^= c; a -= rol32(c, 11); \ c 51 include/linux/jhash.h c ^= b; c -= rol32(b, 16); \ c 52 include/linux/jhash.h a ^= c; a -= rol32(c, 4); \ c 54 include/linux/jhash.h c ^= b; c -= rol32(b, 24); \ c 72 include/linux/jhash.h u32 a, b, c; c 76 include/linux/jhash.h a = b = c = JHASH_INITVAL + length + initval; c 82 include/linux/jhash.h c += __get_unaligned_cpu32(k + 8); c 83 include/linux/jhash.h __jhash_mix(a, b, c); c 89 include/linux/jhash.h case 12: c += (u32)k[11]<<24; /* fall through */ c 90 include/linux/jhash.h case 11: c += (u32)k[10]<<16; /* fall through */ c 91 include/linux/jhash.h case 10: c += (u32)k[9]<<8; /* fall through */ c 92 include/linux/jhash.h case 9: c += k[8]; /* fall through */ c 101 include/linux/jhash.h __jhash_final(a, b, c); c 106 include/linux/jhash.h return c; c 118 include/linux/jhash.h u32 a, b, c; c 121 include/linux/jhash.h a = b = c = JHASH_INITVAL + (length<<2) + initval; c 127 include/linux/jhash.h c += k[2]; c 128 include/linux/jhash.h __jhash_mix(a, b, c); c 135 include/linux/jhash.h case 3: c += k[2]; /* fall through */ c 138 include/linux/jhash.h __jhash_final(a, b, c); c 143 include/linux/jhash.h return c; c 148 include/linux/jhash.h static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) c 152 include/linux/jhash.h c += initval; c 154 include/linux/jhash.h __jhash_final(a, b, c); c 156 include/linux/jhash.h return c; c 159 include/linux/jhash.h static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) c 161 include/linux/jhash.h return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); c 120 include/linux/jiffies.h #define time_in_range(a,b,c) \ c 122 include/linux/jiffies.h time_before_eq(a,c)) c 127 include/linux/jiffies.h #define time_in_range_open(a,b,c) \ c 129 include/linux/jiffies.h time_before(a,c)) c 146 include/linux/jiffies.h #define time_in_range64(a, b, c) \ c 148 include/linux/jiffies.h time_before_eq64(a, c)) c 492 include/linux/key.h #define key_fsuid_changed(c) do { } while(0) c 493 include/linux/key.h #define key_fsgid_changed(c) do { } while(0) c 251 include/linux/kprobes.h extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c); c 252 include/linux/kprobes.h extern void __free_insn_slot(struct kprobe_insn_cache *c, c 255 include/linux/kprobes.h extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c, c 74 include/linux/lightnvm.h } c; c 525 include/linux/lightnvm.h ppa64.c.line = ppa32 & ((~0U) >> 1); c 526 include/linux/lightnvm.h ppa64.c.is_cached = 1; c 569 include/linux/lightnvm.h } else if (ppa64.c.is_cached) { c 570 include/linux/lightnvm.h ppa32 |= ppa64.c.line; c 409 include/linux/lockdep.h #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) c 410 include/linux/lockdep.h #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) c 430 include/linux/lockdep.h # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) c 490 include/linux/lockdep.h #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) c 491 include/linux/lockdep.h #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) c 1626 include/linux/lsm_hooks.h void (*cred_getsecid)(const struct cred *c, u32 *secid); c 222 include/linux/math64.h u64 c; c 237 include/linux/math64.h rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; c 238 include/linux/math64.h rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; c 239 include/linux/math64.h rh.l.high = (c >> 32) + rh.l.high; c 10 include/linux/memfd.h static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a) c 51 include/linux/mfd/ingenic-tcu.h #define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE)) c 52 include/linux/mfd/ingenic-tcu.h #define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE)) c 53 include/linux/mfd/ingenic-tcu.h #define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE)) c 54 include/linux/mfd/ingenic-tcu.h #define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE)) c 605 include/linux/mfd/twl.h #define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED) c 322 include/linux/mmc/card.h #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) c 323 include/linux/mmc/card.h #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) c 324 include/linux/mmc/card.h #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) c 158 include/linux/mroute_base.h static inline void mr_cache_put(struct mr_mfc *c) c 160 include/linux/mroute_base.h if (refcount_dec_and_test(&c->mfc_un.res.refcount)) c 161 include/linux/mroute_base.h call_rcu(&c->rcu, c->free); c 164 include/linux/mroute_base.h static inline void mr_cache_hold(struct mr_mfc *c) c 166 include/linux/mroute_base.h refcount_inc(&c->mfc_un.res.refcount); c 286 include/linux/mroute_base.h struct mr_mfc *c, struct rtmsg *rtm); c 290 include/linux/mroute_base.h u32 portid, u32 seq, struct mr_mfc *c, c 298 include/linux/mroute_base.h u32 portid, u32 seq, struct mr_mfc *c, c 337 include/linux/mroute_base.h struct mr_mfc *c, struct rtmsg *rtm) c 348 include/linux/mroute_base.h u32 portid, u32 seq, struct mr_mfc *c, c 20 include/linux/netfilter/ipset/ip_set.h #define _IP_SET_MODULE_DESC(a, b, c) \ c 21 include/linux/netfilter/ipset/ip_set.h MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c) c 22 include/linux/netfilter/ipset/ip_set.h #define IP_SET_MODULE_DESC(a, b, c) \ c 23 include/linux/netfilter/ipset/ip_set.h _IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c)) c 105 include/linux/netfilter/ipset/ip_set.h struct ip_set_comment_rcu __rcu *c; c 537 include/linux/netfilter/ipset/ip_set.h struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); c 540 include/linux/netfilter/ipset/ip_set.h if (unlikely(c)) { c 541 include/linux/netfilter/ipset/ip_set.h set->ext_size -= sizeof(*c) + strlen(c->str) + 1; c 542 include/linux/netfilter/ipset/ip_set.h kfree_rcu(c, rcu); c 543 include/linux/netfilter/ipset/ip_set.h rcu_assign_pointer(comment->c, NULL); c 549 include/linux/netfilter/ipset/ip_set.h c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); c 550 include/linux/netfilter/ipset/ip_set.h if (unlikely(!c)) c 552 include/linux/netfilter/ipset/ip_set.h strlcpy(c->str, ext->comment, len + 1); c 553 include/linux/netfilter/ipset/ip_set.h set->ext_size += sizeof(*c) + strlen(c->str) + 1; c 554 include/linux/netfilter/ipset/ip_set.h rcu_assign_pointer(comment->c, c); c 561 include/linux/netfilter/ipset/ip_set.h struct ip_set_comment_rcu *c = rcu_dereference(comment->c); c 563 include/linux/netfilter/ipset/ip_set.h if (!c) c 565 include/linux/netfilter/ipset/ip_set.h return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); c 576 include/linux/netfilter/ipset/ip_set.h struct ip_set_comment_rcu *c; c 578 include/linux/netfilter/ipset/ip_set.h c = rcu_dereference_protected(comment->c, 1); c 579 include/linux/netfilter/ipset/ip_set.h if (unlikely(!c)) c 581 include/linux/netfilter/ipset/ip_set.h set->ext_size -= sizeof(*c) + strlen(c->str) + 1; c 582 include/linux/netfilter/ipset/ip_set.h kfree_rcu(c, rcu); c 583 include/linux/netfilter/ipset/ip_set.h rcu_assign_pointer(comment->c, NULL); c 62 include/linux/nls.h static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c) c 64 include/linux/nls.h unsigned char nc = t->charset2lower[c]; c 66 include/linux/nls.h return nc ? nc : c; c 69 include/linux/nls.h static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c) c 71 include/linux/nls.h unsigned char nc = t->charset2upper[c]; c 73 include/linux/nls.h return nc ? nc : c; c 112 include/linux/nvmem-provider.h static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) c 120 include/linux/nvmem-provider.h devm_nvmem_register(struct device *dev, const struct nvmem_config *c) c 122 include/linux/nvmem-provider.h return nvmem_register(c); c 275 include/linux/overflow.h static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) c 281 include/linux/overflow.h if (check_mul_overflow(bytes, c, &bytes)) c 291 include/linux/overflow.h static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) c 297 include/linux/overflow.h if (check_add_overflow(bytes, c, &bytes)) c 582 include/linux/pagemap.h volatile char c; c 592 include/linux/pagemap.h if (unlikely(__get_user(c, uaddr) != 0)) c 600 include/linux/pagemap.h return __get_user(c, end); c 603 include/linux/pagemap.h (void)c; c 167 include/linux/pinctrl/pinconf-generic.h #define PCONFDUMP(a, b, c, d) { \ c 168 include/linux/pinctrl/pinconf-generic.h .param = a, .display = b, .format = c, .has_arg = d \ c 11 include/linux/platform_data/mcs.h #define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) c 114 include/linux/pm_qos.h int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, c 131 include/linux/pm_qos.h s32 pm_qos_read_value(struct pm_qos_constraints *c); c 150 include/linux/pm_runtime.h static inline void __pm_runtime_disable(struct device *dev, bool c) {} c 13 include/linux/ppp_defs.h #define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c) c 124 include/linux/qed/qed_chain.h } c; c 360 include/linux/qed/qed_chain.h p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx; c 369 include/linux/qed/qed_chain.h p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx; c 433 include/linux/qed/qed_chain.h p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx; c 442 include/linux/qed/qed_chain.h p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx; c 484 include/linux/qed/qed_chain.h p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val; c 485 include/linux/qed/qed_chain.h p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val; c 487 include/linux/qed/qed_chain.h p_chain->pbl.c.u32.prod_page_idx = reset_val; c 488 include/linux/qed/qed_chain.h p_chain->pbl.c.u32.cons_page_idx = reset_val; c 694 include/linux/qed/qed_chain.h p_chain->pbl.c.u16.prod_page_idx = c 695 include/linux/qed/qed_chain.h (p_chain->pbl.c.u16.prod_page_idx - c 698 include/linux/qed/qed_chain.h p_chain->pbl.c.u32.prod_page_idx = c 699 include/linux/qed/qed_chain.h (p_chain->pbl.c.u32.prod_page_idx - c 111 include/linux/rbtree_latch.h int c; c 115 include/linux/rbtree_latch.h c = comp(key, ltn); c 117 include/linux/rbtree_latch.h if (c < 0) c 119 include/linux/rbtree_latch.h else if (c > 0) c 260 include/linux/rcupdate.h #define RCU_LOCKDEP_WARN(c, s) \ c 263 include/linux/rcupdate.h if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ c 290 include/linux/rcupdate.h #define RCU_LOCKDEP_WARN(c, s) do { } while (0) c 316 include/linux/rcupdate.h #define __rcu_dereference_check(p, c, space) \ c 320 include/linux/rcupdate.h RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ c 324 include/linux/rcupdate.h #define __rcu_dereference_protected(p, c, space) \ c 326 include/linux/rcupdate.h RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ c 395 include/linux/rcupdate.h #define rcu_swap_protected(rcu_ptr, ptr, c) do { \ c 396 include/linux/rcupdate.h typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ c 455 include/linux/rcupdate.h #define rcu_dereference_check(p, c) \ c 456 include/linux/rcupdate.h __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) c 465 include/linux/rcupdate.h #define rcu_dereference_bh_check(p, c) \ c 466 include/linux/rcupdate.h __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) c 475 include/linux/rcupdate.h #define rcu_dereference_sched_check(p, c) \ c 476 include/linux/rcupdate.h __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ c 504 include/linux/rcupdate.h #define rcu_dereference_protected(p, c) \ c 505 include/linux/rcupdate.h __rcu_dereference_protected((p), (c), __rcu) c 148 include/linux/sctp.h #define sctp_test_T_bit(c) ((c)->chunk_hdr->flags & SCTP_CHUNK_FLAG_T) c 381 include/linux/security.h void security_cred_getsecid(const struct cred *c, u32 *secid); c 123 include/linux/seq_buf.h extern int seq_buf_putc(struct seq_buf *s, unsigned char c); c 106 include/linux/seq_file.h void seq_pad(struct seq_file *m, char c); c 119 include/linux/seq_file.h void seq_putc(struct seq_file *m, char c); c 395 include/linux/serial_core.h struct console *c); c 113 include/linux/shdma-base.h #define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \ c 114 include/linux/shdma-base.h i < (d)->dma_dev.chancnt; c = (d)->schan[++i]) c 36 include/linux/siphash.h u64 siphash_3u64(const u64 a, const u64 b, const u64 c, c 38 include/linux/siphash.h u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, c 41 include/linux/siphash.h u64 siphash_3u32(const u32 a, const u32 b, const u32 c, c 49 include/linux/siphash.h static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c, c 52 include/linux/siphash.h return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); c 106 include/linux/siphash.h u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, c 108 include/linux/siphash.h u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, c 206 include/linux/slimbus.h struct slim_stream_config *c); c 54 include/linux/slub_def.h #define slub_percpu_partial(c) ((c)->partial) c 56 include/linux/slub_def.h #define slub_set_percpu_partial(c, p) \ c 58 include/linux/slub_def.h slub_percpu_partial(c) = (p)->next; \ c 61 include/linux/slub_def.h #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c)) c 63 include/linux/slub_def.h #define slub_percpu_partial(c) NULL c 65 include/linux/slub_def.h #define slub_set_percpu_partial(c, p) c 67 include/linux/slub_def.h #define slub_percpu_partial_read_once(c) NULL c 110 include/linux/srcu.h #define srcu_dereference_check(p, ssp, c) \ c 111 include/linux/srcu.h __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu) c 177 include/linux/string.h void *memchr_inv(const void *s, int c, size_t n); c 356 include/linux/string.h __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) c 363 include/linux/string.h return __builtin_memset(p, c, size); c 397 include/linux/string.h __FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) c 404 include/linux/string.h return __real_memscan(p, c, size); c 422 include/linux/string.h __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) c 429 include/linux/string.h return __builtin_memchr(p, c, size); c 432 include/linux/string.h void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); c 433 include/linux/string.h __FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) c 440 include/linux/string.h return __real_memchr_inv(p, c, size); c 43 include/linux/stringhash.h partial_name_hash(unsigned long c, unsigned long prevhash) c 45 include/linux/stringhash.h return (prevhash + (c << 4) + (c >> 4)) * 11; c 68 include/linux/tee_drv.h u64 c; c 86 include/linux/trace_seq.h extern void trace_seq_putc(struct trace_seq *s, unsigned char c); c 122 include/linux/trace_seq.h static inline void trace_seq_putc(struct trace_seq *s, unsigned char c) c 490 include/linux/tty.h extern int tty_put_char(struct tty_struct *tty, unsigned char c); c 516 include/linux/usb/composite.h extern int usb_string_id(struct usb_composite_dev *c); c 517 include/linux/usb/composite.h extern int usb_string_ids_tab(struct usb_composite_dev *c, c 522 include/linux/usb/composite.h extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); c 606 include/linux/usb/composite.h void usb_remove_function(struct usb_configuration *c, struct usb_function *f); c 80 include/linux/usb/ehci_def.h #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ c 20 include/linux/uuid.h #define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ c 24 include/linux/uuid.h ((c) >> 8) & 0xff, (c) & 0xff, \ c 179 include/linux/virtio_config.h vq_callback_t *c, const char *n) c 181 include/linux/virtio_config.h vq_callback_t *callbacks[] = { c }; c 29 include/linux/vt_buffer.h static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) c 34 include/linux/vt_buffer.h scr_writew(c, s++); c 36 include/linux/vt_buffer.h memset16(s, c, count / 2); c 76 include/linux/vt_kern.h #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ c 126 include/linux/vt_kern.h #define vc_translate(vc, c) (c) c 155 include/linux/vt_kern.h unsigned int c; /* Printed char */ c 188 include/linux/vt_kern.h void vc_scrolldelta_helper(struct vc_data *c, int lines, c 67 include/media/cec.h struct completion c; c 13 include/media/i2c/ir-kbd-i2c.h struct i2c_client *c; c 116 include/media/tveeprom.h int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len); c 1278 include/media/v4l2-ctrls.h struct media_device *mdev, struct v4l2_ext_controls *c); c 1294 include/media/v4l2-ctrls.h struct v4l2_ext_controls *c); c 1311 include/media/v4l2-ctrls.h struct v4l2_ext_controls *c); c 109 include/media/v4l2-flash-led-class.h static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c) c 111 include/media/v4l2-flash-led-class.h return container_of(c->handler, struct v4l2_flash, hdl); c 235 include/net/9p/client.h void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status); c 647 include/net/bluetooth/hci_core.h struct discovery_state *c = &hdev->discovery; c 648 include/net/bluetooth/hci_core.h return jiffies - c->timestamp; c 710 include/net/bluetooth/hci_core.h static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) c 713 include/net/bluetooth/hci_core.h list_add_rcu(&c->list, &h->list); c 714 include/net/bluetooth/hci_core.h switch (c->type) { c 723 include/net/bluetooth/hci_core.h if (c->role == HCI_ROLE_SLAVE) c 733 include/net/bluetooth/hci_core.h static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) c 737 include/net/bluetooth/hci_core.h list_del_rcu(&c->list); c 740 include/net/bluetooth/hci_core.h switch (c->type) { c 749 include/net/bluetooth/hci_core.h if (c->role == HCI_ROLE_SLAVE) c 779 include/net/bluetooth/hci_core.h struct hci_conn_hash *c = &hdev->conn_hash; c 781 include/net/bluetooth/hci_core.h return c->acl_num + c->amp_num + c->sco_num + c->le_num; c 787 include/net/bluetooth/hci_core.h struct hci_conn *c; c 792 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { c 793 include/net/bluetooth/hci_core.h if (c->handle == handle) { c 794 include/net/bluetooth/hci_core.h type = c->type; c 808 include/net/bluetooth/hci_core.h struct hci_conn *c; c 812 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { c 813 include/net/bluetooth/hci_core.h if (c->handle == handle) { c 815 include/net/bluetooth/hci_core.h return c; c 827 include/net/bluetooth/hci_core.h struct hci_conn *c; c 831 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { c 832 include/net/bluetooth/hci_core.h if (c->type == type && !bacmp(&c->dst, ba)) { c 834 include/net/bluetooth/hci_core.h return c; c 848 include/net/bluetooth/hci_core.h struct hci_conn *c; c 852 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { c 853 include/net/bluetooth/hci_core.h if (c->type != LE_LINK) c 856 include/net/bluetooth/hci_core.h if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { c 858 include/net/bluetooth/hci_core.h return c; c 871 include/net/bluetooth/hci_core.h struct hci_conn *c; c 875 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { c 876 include/net/bluetooth/hci_core.h if (c->type == type && c->state == state) { c 878 include/net/bluetooth/hci_core.h return c; c 890 include/net/bluetooth/hci_core.h struct hci_conn *c; c 894 include/net/bluetooth/hci_core.h list_for_each_entry_rcu(c, &h->list, list) { c 895 include/net/bluetooth/hci_core.h if (c->type == LE_LINK && c->state == BT_CONNECT && c 896 include/net/bluetooth/hci_core.h !test_bit(HCI_CONN_SCANNING, &c->flags)) { c 898 include/net/bluetooth/hci_core.h return c; c 1039 include/net/bluetooth/hci_core.h #define to_hci_conn(c) container_of(c, struct hci_conn, dev) c 1499 include/net/bluetooth/hci_core.h int hci_mgmt_chan_register(struct hci_mgmt_chan *c); c 1500 include/net/bluetooth/hci_core.h void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); c 802 include/net/bluetooth/l2cap.h void l2cap_chan_hold(struct l2cap_chan *c); c 803 include/net/bluetooth/l2cap.h void l2cap_chan_put(struct l2cap_chan *c); c 843 include/net/bluetooth/l2cap.h #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) c 844 include/net/bluetooth/l2cap.h #define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) c 845 include/net/bluetooth/l2cap.h #define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer) c 846 include/net/bluetooth/l2cap.h #define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer) c 847 include/net/bluetooth/l2cap.h #define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \ c 849 include/net/bluetooth/l2cap.h #define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer) c 7269 include/net/cfg80211.h void (*iter)(const struct ieee80211_iface_combination *c, c 318 include/net/ip.h int i, c; \ c 319 include/net/ip.h for_each_possible_cpu(c) { \ c 323 include/net/ip.h c, stats_list[i].entry, \ c 330 include/net/ip.h int i, c; \ c 331 include/net/ip.h for_each_possible_cpu(c) { \ c 335 include/net/ip.h c, stats_list[i].entry); \ c 166 include/net/llc_c_ac.h u8 llc_circular_between(u8 a, u8 b, u8 c); c 2599 include/net/mac80211.h const struct ieee80211_tx_info *c) c 2601 include/net/mac80211.h if (WARN_ON_ONCE(c->control.rates[0].idx < 0)) c 2603 include/net/mac80211.h return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx]; c 2608 include/net/mac80211.h const struct ieee80211_tx_info *c) c 2610 include/net/mac80211.h if (c->control.rts_cts_rate_idx < 0) c 2612 include/net/mac80211.h return &hw->wiphy->bands[c->band]->bitrates[c->control.rts_cts_rate_idx]; c 2617 include/net/mac80211.h const struct ieee80211_tx_info *c, int idx) c 2619 include/net/mac80211.h if (c->control.rates[idx + 1].idx < 0) c 2621 include/net/mac80211.h return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[idx + 1].idx]; c 59 include/net/netfilter/nf_queue.h u32 a, b, c; c 72 include/net/netfilter/nf_queue.h c = (__force u32) ip6h->saddr.s6_addr32[1]; c 74 include/net/netfilter/nf_queue.h c = (__force u32) ip6h->daddr.s6_addr32[1]; c 76 include/net/netfilter/nf_queue.h return jhash_3words(a, b, c, initval); c 380 include/net/sctp/sm.h if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag) c 415 include/net/sctp/sm.h (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) || c 416 include/net/sctp/sm.h (sctp_test_T_bit(chunk) && asoc->c.peer_vtag && c 417 include/net/sctp/sm.h (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) { c 335 include/net/sctp/structs.h struct sctp_cookie c; c 1572 include/net/sctp/structs.h struct sctp_cookie c; c 339 include/net/xfrm.h const struct km_event *c); c 340 include/net/xfrm.h void km_state_notify(struct xfrm_state *x, const struct km_event *c); c 571 include/net/xfrm.h int (*notify)(struct xfrm_state *x, const struct km_event *c); c 575 include/net/xfrm.h int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c); c 583 include/net/xfrm.h bool (*is_alive)(const struct km_event *c); c 43 include/sound/control.h snd_kcontrol_tlv_rw_t *c; c 62 include/sound/control.h snd_kcontrol_tlv_rw_t *c; c 75 include/sound/hda_chmap.h int snd_hdac_chmap_to_spk_mask(unsigned char c); c 291 include/sound/hda_codec.h #define list_for_each_codec(c, bus) \ c 292 include/sound/hda_codec.h list_for_each_entry(c, &(bus)->core.codec_list, core.list) c 293 include/sound/hda_codec.h #define list_for_each_codec_safe(c, n, bus) \ c 294 include/sound/hda_codec.h list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) c 68 include/sound/info.h } c; c 144 include/sound/info.h entry->c.text.read = read; c 138 include/sound/pcm_params.h int i, c = 0; c 144 include/sound/pcm_params.h if (c) c 146 include/sound/pcm_params.h c++; c 355 include/sound/sb.h static inline int snd_sbmixer_add_ctl_elem(struct snd_sb *chip, const struct sbmix_elem *c) c 357 include/sound/sb.h return snd_sbmixer_add_ctl(chip, c->name, 0, c->type, c->private_value); c 33 include/sound/seq_midi_event.h bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c, c 310 include/sound/soc-component.h static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c, c 313 include/sound/soc-component.h dev_set_drvdata(c->dev, data); c 316 include/sound/soc-component.h static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c) c 318 include/sound/soc-component.h return dev_get_drvdata(c->dev); c 324 include/sound/soc.h .tlv.c = (snd_soc_bytes_tlv_callback), \ c 25 include/trace/bpf_probe.h #define __perf_count(c) (c) c 72 include/trace/events/bcache.h __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); c 152 include/trace/events/bcache.h TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, c 154 include/trace/events/bcache.h TP_ARGS(c, inode, bio, writeback, bypass), c 167 include/trace/events/bcache.h memcpy(__entry->uuid, c->sb.set_uuid, 16); c 195 include/trace/events/bcache.h TP_PROTO(struct cache_set *c), c 196 include/trace/events/bcache.h TP_ARGS(c), c 203 include/trace/events/bcache.h memcpy(__entry->uuid, c->sb.set_uuid, 16); c 215 include/trace/events/bcache.h TP_PROTO(struct cache_set *c), c 216 include/trace/events/bcache.h TP_ARGS(c) c 220 include/trace/events/bcache.h TP_PROTO(struct cache_set *c), c 221 include/trace/events/bcache.h TP_ARGS(c) c 253 include/trace/events/bcache.h TP_PROTO(struct cache_set *c), c 254 include/trace/events/bcache.h TP_ARGS(c) c 273 include/trace/events/bcache.h __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); c 287 include/trace/events/bcache.h TP_PROTO(struct cache_set *c), c 288 include/trace/events/bcache.h TP_ARGS(c) c 312 include/trace/events/bcache.h TP_PROTO(struct cache_set *c), c 313 include/trace/events/bcache.h TP_ARGS(c) c 317 include/trace/events/bcache.h TP_PROTO(struct cache_set *c), c 318 include/trace/events/bcache.h TP_ARGS(c) c 347 include/trace/events/bcache.h __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0); c 374 include/trace/events/bcache.h __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); c 149 include/trace/events/libata.h #define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h) c 683 include/trace/events/rcu.h unsigned long secs, unsigned long c_old, unsigned long c), c 685 include/trace/events/rcu.h TP_ARGS(rcutorturename, rhp, secs, c_old, c), c 692 include/trace/events/rcu.h __field(unsigned long, c) c 702 include/trace/events/rcu.h __entry->c = c; c 707 include/trace/events/rcu.h __entry->secs, __entry->c_old, __entry->c) c 25 include/trace/perf.h #define __perf_count(c) (__count = (c)) c 690 include/trace/trace_events.h #define __perf_count(c) (c) c 69 include/uapi/drm/drm_fourcc.h #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ c 70 include/uapi/drm/drm_fourcc.h ((__u32)(c) << 16) | ((__u32)(d) << 24)) c 60 include/uapi/linux/atm.h #define __SO_LEVEL_MATCH(c,m) (((c) >> 22) == ((m) & 0x1FF)) c 61 include/uapi/linux/atm.h #define __SO_NUMBER(c) (((c) >> 16) & 0x3f) c 62 include/uapi/linux/atm.h #define __SO_SIZE(c) ((c) & 0x3fff) c 79 include/uapi/linux/map_to_7segment.h static __inline__ int map_to_seg7(struct seg7_conversion_map *map, int c) c 81 include/uapi/linux/map_to_7segment.h return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL; c 98 include/uapi/linux/map_to_7segment.h #define _SEG7(l,a,b,c,d,e,f,g) \ c 99 include/uapi/linux/map_to_7segment.h ( a<<BIT_SEG7_A | b<<BIT_SEG7_B | c<<BIT_SEG7_C | d<<BIT_SEG7_D | \ c 107 include/uapi/linux/netfilter/x_tables.h #define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0) c 108 include/uapi/linux/netfilter/x_tables.h #define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0) c 138 include/uapi/linux/ppp_defs.h struct compstat c; /* packet compression statistics */ c 19 include/uapi/linux/romfs_fs.h #define __mk4(a,b,c,d) cpu_to_be32(__mkl(__mkw(a,b),__mkw(c,d))) c 198 include/uapi/linux/tee.h __u64 c; c 27 include/uapi/linux/uuid.h #define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ c 31 include/uapi/linux/uuid.h (c) & 0xff, ((c) >> 8) & 0xff, \ c 36 include/uapi/linux/uuid.h #define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ c 37 include/uapi/linux/uuid.h GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) c 81 include/uapi/linux/videodev2.h #define v4l2_fourcc(a, b, c, d)\ c 82 include/uapi/linux/videodev2.h ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) c 83 include/uapi/linux/videodev2.h #define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1U << 31)) c 1129 include/uapi/linux/videodev2.h struct v4l2_rect c; c 1180 include/uapi/linux/videodev2.h struct v4l2_rect c; c 52 include/uapi/mtd/inftl-user.h struct inftl_unittail c; c 58 include/uapi/mtd/nftl-user.h struct nftl_uci2 c; c 504 include/uapi/sound/asound.h } c; c 916 include/video/pm3fb.h #define PM3Window_FrameCount(c) (((c) & 0xff) << 9) c 988 include/video/pm3fb.h #define PM3RLCount_Count(c) ((c) & 0x0fff) c 124 init/do_mounts.c char c = 0; c 127 init/do_mounts.c "PARTNROFF=%d%c", &offset, &c) != 1) { c 509 init/do_mounts.c char c; c 526 init/do_mounts.c ksys_read(fd, &c, 1); c 424 init/initramfs.c char c = buf[written]; c 425 init/initramfs.c if (c == '0') { c 429 init/initramfs.c } else if (c == 0) { c 294 kernel/audit.h #define auditsc_get_stamp(c, t, s) 0 c 325 kernel/audit.h #define audit_filter_inodes(t, c) AUDIT_DISABLED c 130 kernel/auditsc.c struct audit_chunk *c[31]; c 216 kernel/auditsc.c p->c[--left] = chunk; c 224 kernel/auditsc.c p->c[30] = chunk; c 264 kernel/auditsc.c audit_put_chunk(q->c[n]); c 265 kernel/auditsc.c q->c[n] = NULL; c 269 kernel/auditsc.c audit_put_chunk(q->c[n]); c 270 kernel/auditsc.c q->c[n] = NULL; c 294 kernel/auditsc.c if (audit_tree_match(p->c[n], tree)) c 300 kernel/auditsc.c if (audit_tree_match(p->c[n], tree)) c 529 kernel/bpf/btf.c static bool __btf_name_char_ok(char c, bool first, bool dot_ok) c 531 kernel/bpf/btf.c if ((first ? !isalpha(c) : c 532 kernel/bpf/btf.c !isalnum(c)) && c 533 kernel/bpf/btf.c c != '_' && c 534 kernel/bpf/btf.c ((c == '.' && !dot_ok) || c 535 kernel/bpf/btf.c c != '.')) c 970 kernel/cgroup/cgroup-v1.c char c = param->string[i]; c 971 kernel/cgroup/cgroup-v1.c if (isalnum(c)) c 973 kernel/cgroup/cgroup-v1.c if ((c == '.') || (c == '-') || (c == '_')) c 1232 kernel/cgroup/cgroup.c struct cgroup *c = link->cgrp; c 1234 kernel/cgroup/cgroup.c if (c->root == cgrp->root) c 1235 kernel/cgroup/cgroup.c c = cgrp; c 1236 kernel/cgroup/cgroup.c link_css_set(&tmp_links, cset, c); c 1381 kernel/cgroup/cgroup.c struct cgroup *c = link->cgrp; c 1383 kernel/cgroup/cgroup.c if (c->root == root) { c 1384 kernel/cgroup/cgroup.c res = c; c 1412 kernel/cgroup/cgroup.c struct cgroup *c = link->cgrp; c 1414 kernel/cgroup/cgroup.c if (c->root == root) { c 1415 kernel/cgroup/cgroup.c res = c; c 566 kernel/cgroup/cpuset.c struct cpuset *c, *par; c 573 kernel/cgroup/cpuset.c cpuset_for_each_child(c, css, cur) c 574 kernel/cgroup/cpuset.c if (!is_cpuset_subset(c, trial)) c 594 kernel/cgroup/cpuset.c cpuset_for_each_child(c, css, par) { c 595 kernel/cgroup/cpuset.c if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c 596 kernel/cgroup/cpuset.c c != cur && c 597 kernel/cgroup/cpuset.c cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) c 599 kernel/cgroup/cpuset.c if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && c 600 kernel/cgroup/cpuset.c c != cur && c 601 kernel/cgroup/cpuset.c nodes_intersects(trial->mems_allowed, c->mems_allowed)) c 646 kernel/cgroup/cpuset.c update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) c 648 kernel/cgroup/cpuset.c if (dattr->relax_domain_level < c->relax_domain_level) c 649 kernel/cgroup/cpuset.c dattr->relax_domain_level = c->relax_domain_level; c 827 kernel/cgroup/cpuset.c struct cpuset *c = csa[k]; c 829 kernel/cgroup/cpuset.c if (c->pn == bpn) c 830 kernel/cgroup/cpuset.c c->pn = apn; c 101 kernel/cgroup/debug.c struct cgroup *c = link->cgrp; c 103 kernel/cgroup/debug.c cgroup_name(c, name_buf, NAME_MAX + 1); c 105 kernel/cgroup/debug.c c->root->hierarchy_id, name_buf); c 356 kernel/cgroup/rdma.c static int parse_resource(char *c, int *intval) c 359 kernel/cgroup/rdma.c char *name, *value = c; c 392 kernel/cgroup/rdma.c char *c; c 396 kernel/cgroup/rdma.c while ((c = strsep(&options, " ")) != NULL) { c 399 kernel/cgroup/rdma.c index = parse_resource(c, &intval); c 329 kernel/debug/gdbstub.c char *c = buf; c 332 kernel/debug/gdbstub.c c[size] = *buf++; c 333 kernel/debug/gdbstub.c if (c[size] == 0x7d) c 334 kernel/debug/gdbstub.c c[size] = *buf++ ^ 0x20; c 338 kernel/debug/gdbstub.c return probe_kernel_write(mem, c, size); c 566 kernel/debug/kdb/kdb_io.c struct console *c = console_drivers; c 711 kernel/debug/kdb/kdb_io.c while (c) { c 712 kernel/debug/kdb/kdb_io.c c->write(c, cp, retlen - (cp - kdb_buffer)); c 714 kernel/debug/kdb/kdb_io.c c = c->next; c 765 kernel/debug/kdb/kdb_io.c c = console_drivers; c 775 kernel/debug/kdb/kdb_io.c while (c) { c 776 kernel/debug/kdb/kdb_io.c c->write(c, moreprompt, strlen(moreprompt)); c 778 kernel/debug/kdb/kdb_io.c c = c->next; c 1466 kernel/debug/kdb/kdb_main.c unsigned char c; c 1468 kernel/debug/kdb/kdb_main.c if (kdb_getarea(c, addr)) c 1470 kernel/debug/kdb/kdb_main.c kdb_printf("%02x", c); c 1494 kernel/debug/kdb/kdb_main.c char *c = cbuf; c 1531 kernel/debug/kdb/kdb_main.c unsigned char c[8]; c 1535 kernel/debug/kdb/kdb_main.c cp = wc.c + 8 - bytesperword; c 1537 kernel/debug/kdb/kdb_main.c cp = wc.c; c 1540 kernel/debug/kdb/kdb_main.c #define printable_char(c) \ c 1541 kernel/debug/kdb/kdb_main.c ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; }) c 1543 kernel/debug/kdb/kdb_main.c *c++ = printable_char(*cp++); c 349 kernel/exit.c struct task_struct *c, *g, *p = current; c 372 kernel/exit.c list_for_each_entry(c, &p->children, sibling) { c 373 kernel/exit.c if (c->mm == mm) c 380 kernel/exit.c list_for_each_entry(c, &p->real_parent->children, sibling) { c 381 kernel/exit.c if (c->mm == mm) c 391 kernel/exit.c for_each_thread(g, c) { c 392 kernel/exit.c if (c->mm == mm) c 394 kernel/exit.c if (c->mm) c 408 kernel/exit.c BUG_ON(c == p); c 409 kernel/exit.c get_task_struct(c); c 414 kernel/exit.c task_lock(c); c 420 kernel/exit.c if (c->mm != mm) { c 421 kernel/exit.c task_unlock(c); c 422 kernel/exit.c put_task_struct(c); c 425 kernel/exit.c WRITE_ONCE(mm->owner, c); c 426 kernel/exit.c task_unlock(c); c 427 kernel/exit.c put_task_struct(c); c 155 kernel/irq/irqdesc.c unsigned int c = kstat_irqs_cpu(irq, cpu); c 157 kernel/irq/irqdesc.c ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); c 1177 kernel/irq/manage.c struct irq_chip *c = d->chip; c 1179 kernel/irq/manage.c return c->irq_request_resources ? c->irq_request_resources(d) : 0; c 1185 kernel/irq/manage.c struct irq_chip *c = d->chip; c 1187 kernel/irq/manage.c if (c->irq_release_resources) c 1188 kernel/irq/manage.c c->irq_release_resources(d); c 1210 kernel/irq/manage.c struct irq_chip *c = d->chip; c 1212 kernel/irq/manage.c return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; c 1218 kernel/irq/manage.c struct irq_chip *c = d->chip; c 1220 kernel/irq/manage.c if (c->irq_nmi_teardown) c 1221 kernel/irq/manage.c c->irq_nmi_teardown(d); c 96 kernel/kprobes.c static int slots_per_page(struct kprobe_insn_cache *c) c 98 kernel/kprobes.c return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); c 125 kernel/kprobes.c static int collect_garbage_slots(struct kprobe_insn_cache *c); c 131 kernel/kprobes.c kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) c 137 kernel/kprobes.c mutex_lock(&c->mutex); c 140 kernel/kprobes.c list_for_each_entry_rcu(kip, &c->pages, list) { c 141 kernel/kprobes.c if (kip->nused < slots_per_page(c)) { c 143 kernel/kprobes.c for (i = 0; i < slots_per_page(c); i++) { c 147 kernel/kprobes.c slot = kip->insns + (i * c->insn_size); c 153 kernel/kprobes.c kip->nused = slots_per_page(c); c 160 kernel/kprobes.c if (c->nr_garbage && collect_garbage_slots(c) == 0) c 164 kernel/kprobes.c kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); c 173 kernel/kprobes.c kip->insns = c->alloc(); c 179 kernel/kprobes.c memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); c 183 kernel/kprobes.c kip->cache = c; c 184 kernel/kprobes.c list_add_rcu(&kip->list, &c->pages); c 187 kernel/kprobes.c mutex_unlock(&c->mutex); c 214 kernel/kprobes.c static int collect_garbage_slots(struct kprobe_insn_cache *c) c 221 kernel/kprobes.c list_for_each_entry_safe(kip, next, &c->pages, list) { c 226 kernel/kprobes.c for (i = 0; i < slots_per_page(c); i++) { c 231 kernel/kprobes.c c->nr_garbage = 0; c 235 kernel/kprobes.c void __free_insn_slot(struct kprobe_insn_cache *c, c 241 kernel/kprobes.c mutex_lock(&c->mutex); c 243 kernel/kprobes.c list_for_each_entry_rcu(kip, &c->pages, list) { c 245 kernel/kprobes.c (c->insn_size * sizeof(kprobe_opcode_t)); c 246 kernel/kprobes.c if (idx >= 0 && idx < slots_per_page(c)) c 261 kernel/kprobes.c if (++c->nr_garbage > slots_per_page(c)) c 262 kernel/kprobes.c collect_garbage_slots(c); c 267 kernel/kprobes.c mutex_unlock(&c->mutex); c 275 kernel/kprobes.c bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) c 281 kernel/kprobes.c list_for_each_entry_rcu(kip, &c->pages, list) { c 44 kernel/locking/lock_events.h #define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c) c 51 kernel/locking/lock_events.h #define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c) c 56 kernel/locking/lock_events.h #define lockevent_add(ev, c) c 57 kernel/locking/lock_events.h #define lockevent_cond_inc(ev, c) c 523 kernel/locking/lockdep.c u64 c = 0; c 528 kernel/locking/lockdep.c c++; c 532 kernel/locking/lockdep.c return c; c 538 kernel/locking/lockdep.c u64 c = 0; c 543 kernel/locking/lockdep.c c++; c 545 kernel/locking/lockdep.c return c; c 597 kernel/locking/lockdep.c char c = '.'; c 608 kernel/locking/lockdep.c c = '+'; c 610 kernel/locking/lockdep.c c = '?'; c 612 kernel/locking/lockdep.c c = '-'; c 614 kernel/locking/lockdep.c return c; c 920 kernel/locking/lockdep.c static bool class_lock_list_valid(struct lock_class *c, struct list_head *h) c 925 kernel/locking/lockdep.c if (e->links_to != c) { c 927 kernel/locking/lockdep.c c->name ? : "(?)", c 368 kernel/locking/lockdep_proc.c static void seq_line(struct seq_file *m, char c, int offset, int length) c 375 kernel/locking/lockdep_proc.c seq_printf(m, "%c", c); c 623 kernel/locking/lockdep_proc.c char c; c 626 kernel/locking/lockdep_proc.c if (get_user(c, buf)) c 629 kernel/locking/lockdep_proc.c if (c != '0') c 133 kernel/locking/qspinlock_stat.h #define pv_kick(c) __pv_kick(c) c 144 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c) c 145 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) c 146 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) c 205 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_relaxed(l,c,n) (0) c 206 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_acquire(l,c,n) (0) c 207 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_release(l,c,n) (0) c 106 kernel/locking/rwsem.c # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \ c 108 kernel/locking/rwsem.c WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\ c 109 kernel/locking/rwsem.c #c, atomic_long_read(&(sem)->count), \ c 116 kernel/locking/rwsem.c # define DEBUG_RWSEMS_WARN_ON(c, sem) c 1812 kernel/module.c DECLARE_COMPLETION_ONSTACK(c); c 1813 kernel/module.c mod->mkobj.kobj_completion = &c; c 1815 kernel/module.c wait_for_completion(&c); c 77 kernel/params.c static char dash2underscore(char c) c 79 kernel/params.c if (c == '-') c 81 kernel/params.c return c; c 102 kernel/power/qos.c static inline int pm_qos_get_value(struct pm_qos_constraints *c) c 107 kernel/power/qos.c if (plist_head_empty(&c->list)) c 108 kernel/power/qos.c return c->no_constraint_value; c 110 kernel/power/qos.c switch (c->type) { c 112 kernel/power/qos.c return plist_first(&c->list)->prio; c 115 kernel/power/qos.c return plist_last(&c->list)->prio; c 118 kernel/power/qos.c plist_for_each(node, &c->list) c 130 kernel/power/qos.c s32 pm_qos_read_value(struct pm_qos_constraints *c) c 132 kernel/power/qos.c return c->target_value; c 135 kernel/power/qos.c static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value) c 137 kernel/power/qos.c c->target_value = value; c 143 kernel/power/qos.c struct pm_qos_constraints *c; c 154 kernel/power/qos.c c = qos->constraints; c 155 kernel/power/qos.c if (IS_ERR_OR_NULL(c)) { c 162 kernel/power/qos.c if (plist_head_empty(&c->list)) { c 167 kernel/power/qos.c switch (c->type) { c 181 kernel/power/qos.c plist_for_each_entry(req, &c->list, node) { c 184 kernel/power/qos.c if ((req->node).prio != c->default_value) { c 194 kernel/power/qos.c type, pm_qos_get_value(c), active_reqs, tot_reqs); c 214 kernel/power/qos.c int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, c 222 kernel/power/qos.c prev_value = pm_qos_get_value(c); c 224 kernel/power/qos.c new_value = c->default_value; c 230 kernel/power/qos.c plist_del(node, &c->list); c 238 kernel/power/qos.c plist_del(node, &c->list); c 242 kernel/power/qos.c plist_add(node, &c->list); c 249 kernel/power/qos.c curr_value = pm_qos_get_value(c); c 250 kernel/power/qos.c pm_qos_set_value(c, curr_value); c 257 kernel/power/qos.c if (c->notifiers) c 258 kernel/power/qos.c blocking_notifier_call_chain(c->notifiers, c 662 kernel/power/qos.c struct pm_qos_constraints *c; c 664 kernel/power/qos.c c = &qos->min_freq; c 665 kernel/power/qos.c plist_head_init(&c->list); c 666 kernel/power/qos.c c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE; c 667 kernel/power/qos.c c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE; c 668 kernel/power/qos.c c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE; c 669 kernel/power/qos.c c->type = PM_QOS_MAX; c 670 kernel/power/qos.c c->notifiers = &qos->min_freq_notifiers; c 671 kernel/power/qos.c BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); c 673 kernel/power/qos.c c = &qos->max_freq; c 674 kernel/power/qos.c plist_head_init(&c->list); c 675 kernel/power/qos.c c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE; c 676 kernel/power/qos.c c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE; c 677 kernel/power/qos.c c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE; c 678 kernel/power/qos.c c->type = PM_QOS_MIN; c 679 kernel/power/qos.c c->notifiers = &qos->max_freq_notifiers; c 680 kernel/power/qos.c BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers); c 38 kernel/printk/braille.c _braille_register_console(struct console *console, struct console_cmdline *c) c 42 kernel/printk/braille.c if (c->brl_options) { c 44 kernel/printk/braille.c rtn = braille_register_console(console, c->index, c->options, c 45 kernel/printk/braille.c c->brl_options); c 8 kernel/printk/braille.h braille_set_options(struct console_cmdline *c, char *brl_options) c 10 kernel/printk/braille.h c->brl_options = brl_options; c 24 kernel/printk/braille.h _braille_register_console(struct console *console, struct console_cmdline *c); c 32 kernel/printk/braille.h braille_set_options(struct console_cmdline *c, char *brl_options) c 43 kernel/printk/braille.h _braille_register_console(struct console *console, struct console_cmdline *c) c 720 kernel/printk/printk.c static void append_char(char **pp, char *e, char c) c 723 kernel/printk/printk.c *(*pp)++ = c; c 756 kernel/printk/printk.c unsigned char c = text[i]; c 758 kernel/printk/printk.c if (c < ' ' || c >= 127 || c == '\\') c 759 kernel/printk/printk.c p += scnprintf(p, e - p, "\\x%02x", c); c 761 kernel/printk/printk.c append_char(&p, e, c); c 769 kernel/printk/printk.c unsigned char c = dict[i]; c 776 kernel/printk/printk.c if (c == '\0') { c 782 kernel/printk/printk.c if (c < ' ' || c >= 127 || c == '\\') { c 783 kernel/printk/printk.c p += scnprintf(p, e - p, "\\x%02x", c); c 787 kernel/printk/printk.c append_char(&p, e, c); c 2148 kernel/printk/printk.c struct console_cmdline *c; c 2155 kernel/printk/printk.c for (i = 0, c = console_cmdline; c 2156 kernel/printk/printk.c i < MAX_CMDLINECONSOLES && c->name[0]; c 2157 kernel/printk/printk.c i++, c++) { c 2158 kernel/printk/printk.c if (strcmp(c->name, name) == 0 && c->index == idx) { c 2168 kernel/printk/printk.c strlcpy(c->name, name, sizeof(c->name)); c 2169 kernel/printk/printk.c c->options = options; c 2170 kernel/printk/printk.c braille_set_options(c, brl_options); c 2172 kernel/printk/printk.c c->index = idx; c 2555 kernel/printk/printk.c struct console *c; c 2569 kernel/printk/printk.c for_each_console(c) c 2570 kernel/printk/printk.c if ((c->flags & CON_ENABLED) && c->unblank) c 2571 kernel/printk/printk.c c->unblank(); c 2609 kernel/printk/printk.c struct console *c; c 2613 kernel/printk/printk.c for_each_console(c) { c 2614 kernel/printk/printk.c if (!c->device) c 2616 kernel/printk/printk.c driver = c->device(c, index); c 2681 kernel/printk/printk.c struct console_cmdline *c; c 2734 kernel/printk/printk.c for (i = 0, c = console_cmdline; c 2735 kernel/printk/printk.c i < MAX_CMDLINECONSOLES && c->name[0]; c 2736 kernel/printk/printk.c i++, c++) { c 2738 kernel/printk/printk.c newcon->match(newcon, c->name, c->index, c->options) != 0) { c 2740 kernel/printk/printk.c BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); c 2741 kernel/printk/printk.c if (strcmp(c->name, newcon->name) != 0) c 2744 kernel/printk/printk.c newcon->index != c->index) c 2747 kernel/printk/printk.c newcon->index = c->index; c 2749 kernel/printk/printk.c if (_braille_register_console(newcon, c)) c 2753 kernel/printk/printk.c newcon->setup(newcon, c->options) != 0) c 123 kernel/printk/printk_safe.c const char *c, *end; c 126 kernel/printk/printk_safe.c c = start; c 131 kernel/printk/printk_safe.c while (c < end) { c 132 kernel/printk/printk_safe.c if (*c == '\n') { c 133 kernel/printk/printk_safe.c printk_safe_flush_line(start, c - start + 1); c 134 kernel/printk/printk_safe.c start = ++c; c 140 kernel/printk/printk_safe.c if ((c + 1 < end) && printk_get_level(c)) { c 142 kernel/printk/printk_safe.c c = printk_skip_level(c); c 146 kernel/printk/printk_safe.c printk_safe_flush_line(start, c - start); c 147 kernel/printk/printk_safe.c start = c++; c 153 kernel/printk/printk_safe.c c++; c 463 kernel/rcu/rcu.h unsigned long c); c 477 kernel/rcu/rcu.h unsigned long c); c 479 kernel/rcu/rcu.h #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ c 1301 kernel/rcu/tree.c unsigned long c; c 1305 kernel/rcu/tree.c c = rcu_seq_snap(&rcu_state.gp_seq); c 1306 kernel/rcu/tree.c if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { c 1308 kernel/rcu/tree.c (void)rcu_segcblist_accelerate(&rdp->cblist, c); c 1765 kernel/rcu/tree_plugin.h unsigned long c; c 1787 kernel/rcu/tree_plugin.h c = rdp->nocb_nobypass_count + 1; c 1790 kernel/rcu/tree_plugin.h c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; c 1793 kernel/rcu/tree_plugin.h c = 0; c 1794 kernel/rcu/tree_plugin.h else if (c > nocb_nobypass_lim_per_jiffy) c 1795 kernel/rcu/tree_plugin.h c = nocb_nobypass_lim_per_jiffy; c 1797 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_nobypass_count, c); c 441 kernel/rcu/update.c unsigned long c_old, unsigned long c) c 443 kernel/rcu/update.c trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); c 447 kernel/rcu/update.c #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ c 1524 kernel/sched/topology.c int a, b, c, n; c 1540 kernel/sched/topology.c for_each_online_node(c) { c 1541 kernel/sched/topology.c if (node_distance(a, c) < n && c 1542 kernel/sched/topology.c node_distance(b, c) < n) { c 84 kernel/stacktrace.c struct stacktrace_cookie *c = cookie; c 86 kernel/stacktrace.c if (c->len >= c->size) c 89 kernel/stacktrace.c if (c->skip > 0) { c 90 kernel/stacktrace.c c->skip--; c 93 kernel/stacktrace.c c->store[c->len++] = addr; c 94 kernel/stacktrace.c return c->len < c->size; c 117 kernel/stacktrace.c struct stacktrace_cookie c = { c 123 kernel/stacktrace.c arch_stack_walk(consume_entry, &c, current, NULL); c 124 kernel/stacktrace.c return c.len; c 141 kernel/stacktrace.c struct stacktrace_cookie c = { c 151 kernel/stacktrace.c arch_stack_walk(consume_entry, &c, tsk, NULL); c 153 kernel/stacktrace.c return c.len; c 169 kernel/stacktrace.c struct stacktrace_cookie c = { c 175 kernel/stacktrace.c arch_stack_walk(consume_entry, &c, current, regs); c 176 kernel/stacktrace.c return c.len; c 196 kernel/stacktrace.c struct stacktrace_cookie c = { c 209 kernel/stacktrace.c ret = arch_stack_walk_reliable(consume_entry, &c, tsk); c 211 kernel/stacktrace.c return ret ? ret : c.len; c 226 kernel/stacktrace.c struct stacktrace_cookie c = { c 238 kernel/stacktrace.c arch_stack_walk_user(consume_entry, &c, task_pt_regs(current)); c 241 kernel/stacktrace.c return c.len; c 2006 kernel/sysctl.c char c; c 2031 kernel/sysctl.c if (get_user(c, p++)) c 2033 kernel/sysctl.c if (c == 0 || c == '\n') c 2035 kernel/sysctl.c data[len++] = c; c 2277 kernel/sysctl.c static int proc_put_char(void __user **buf, size_t *size, char c) c 2281 kernel/sysctl.c if (put_user(c, *buffer)) c 3175 kernel/sysctl.c char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c; c 3210 kernel/sysctl.c sizeof(tr_a), &c); c 3234 kernel/sysctl.c if (c == '-') { c 3237 kernel/sysctl.c &c); c 2107 kernel/trace/trace_events_filter.c .rec = { .a = va, .b = vb, .c = vc, .d = vd, \ c 12 kernel/trace/trace_events_filter_test.h TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), c 14 kernel/trace/trace_events_filter_test.h TP_ARGS(a, b, c, d, e, f, g, h), c 19 kernel/trace/trace_events_filter_test.h __field(int, c) c 30 kernel/trace/trace_events_filter_test.h __entry->c = c; c 39 kernel/trace/trace_events_filter_test.h __entry->a, __entry->b, __entry->c, __entry->d, c 1042 kernel/trace/trace_kprobe.c u8 c; c 1045 kernel/trace/trace_kprobe.c ret = probe_kernel_read(&c, (u8 *)addr + len, 1); c 1047 kernel/trace/trace_kprobe.c } while (c && ret == 0 && len < MAX_STRING_SIZE); c 810 kernel/trace/trace_probe.c char c; c 820 kernel/trace/trace_probe.c c = *tmp; c 829 kernel/trace/trace_probe.c *tmp = c; c 236 kernel/trace/trace_seq.c void trace_seq_putc(struct trace_seq *s, unsigned char c) c 248 kernel/trace/trace_seq.c seq_buf_putc(&s->seq, c); c 728 kernel/trace/trace_uprobe.c char c = is_ret_probe(tu) ? 'r' : 'p'; c 731 kernel/trace/trace_uprobe.c seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp), c 177 kernel/ucount.c int c, old; c 178 kernel/ucount.c c = atomic_read(v); c 180 kernel/ucount.c if (unlikely(c >= u)) c 182 kernel/ucount.c old = atomic_cmpxchg(v, c, c+1); c 183 kernel/ucount.c if (likely(old == c)) c 185 kernel/ucount.c c = old; c 222 lib/842/842_compress.c static int add_template(struct sw842_param *p, u8 c) c 225 lib/842/842_compress.c u8 *t = comp_ops[c]; c 228 lib/842/842_compress.c if (c >= OPS_MAX) c 286 lib/842/842_compress.c c, i, t[0], t[1], t[2], t[3]); c 295 lib/842/842_compress.c c, b, t[0], t[1], t[2], t[3]); c 380 lib/842/842_compress.c static bool check_template(struct sw842_param *p, u8 c) c 382 lib/842/842_compress.c u8 *t = comp_ops[c]; c 385 lib/842/842_compress.c if (c >= OPS_MAX) c 146 lib/atomic64_test.c #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) c 105 lib/bch.c unsigned int c[0]; /* polynomial terms */ c 114 lib/bch.c unsigned int c[2]; c 401 lib/bch.c pelp->c[0] = 1; c 403 lib/bch.c elp->c[0] = 1; c 413 lib/bch.c if (pelp->c[j]) { c 414 lib/bch.c l = a_log(bch, pelp->c[j]); c 415 lib/bch.c elp->c[j+k] ^= a_pow(bch, tmp+l); c 431 lib/bch.c d ^= gf_mul(bch, elp->c[j], syn[2*i+2-j]); c 447 lib/bch.c int rem, c, r, p, k, param[BCH_MAX_M]; c 453 lib/bch.c for (c = 0; c < m; c++) { c 455 lib/bch.c p = c-k; c 477 lib/bch.c param[k++] = c; c 500 lib/bch.c for (c = 0; c < k; c++) c 501 lib/bch.c rows[param[c]] = (rows[param[c]] & ~1)|((p >> c) & 1); c 519 lib/bch.c unsigned int b, unsigned int c, c 528 lib/bch.c rows[0] = c; c 560 lib/bch.c if (poly->c[0]) c 562 lib/bch.c roots[n++] = mod_s(bch, GF_N(bch)-bch->a_log_tab[poly->c[0]]+ c 563 lib/bch.c bch->a_log_tab[poly->c[1]]); c 576 lib/bch.c if (poly->c[0] && poly->c[1]) { c 578 lib/bch.c l0 = bch->a_log_tab[poly->c[0]]; c 579 lib/bch.c l1 = bch->a_log_tab[poly->c[1]]; c 580 lib/bch.c l2 = bch->a_log_tab[poly->c[2]]; c 616 lib/bch.c unsigned int a, b, c, a2, b2, c2, e3, tmp[4]; c 618 lib/bch.c if (poly->c[0]) { c 620 lib/bch.c e3 = poly->c[3]; c 621 lib/bch.c c2 = gf_div(bch, poly->c[0], e3); c 622 lib/bch.c b2 = gf_div(bch, poly->c[1], e3); c 623 lib/bch.c a2 = gf_div(bch, poly->c[2], e3); c 626 lib/bch.c c = gf_mul(bch, a2, c2); /* c = a2c2 */ c 631 lib/bch.c if (find_affine4_roots(bch, a, b, c, tmp) == 4) { c 649 lib/bch.c unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4; c 651 lib/bch.c if (poly->c[0] == 0) c 655 lib/bch.c e4 = poly->c[4]; c 656 lib/bch.c d = gf_div(bch, poly->c[0], e4); c 657 lib/bch.c c = gf_div(bch, poly->c[1], e4); c 658 lib/bch.c b = gf_div(bch, poly->c[2], e4); c 659 lib/bch.c a = gf_div(bch, poly->c[3], e4); c 664 lib/bch.c if (c) { c 666 lib/bch.c f = gf_div(bch, c, a); c 691 lib/bch.c b2 = c; c 712 lib/bch.c int i, d = a->deg, l = GF_N(bch)-a_log(bch, a->c[a->deg]); c 716 lib/bch.c rep[i] = a->c[i] ? mod_s(bch, a_log(bch, a->c[i])+l) : -1; c 726 lib/bch.c unsigned int i, j, *c = a->c; c 739 lib/bch.c if (c[j]) { c 740 lib/bch.c la = a_log(bch, c[j]); c 745 lib/bch.c c[p] ^= bch->a_pow_tab[mod_s(bch, c 751 lib/bch.c while (!c[a->deg] && a->deg) c 766 lib/bch.c memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int)); c 769 lib/bch.c q->c[0] = 0; c 814 lib/bch.c z->c[0] = 0; c 815 lib/bch.c z->c[1] = bch->a_pow_tab[k]; c 826 lib/bch.c out->c[j] ^= z->c[j]; c 827 lib/bch.c z->c[2*j] = gf_sqr(bch, z->c[j]); c 828 lib/bch.c z->c[2*j+1] = 0; c 839 lib/bch.c while (!out->c[out->deg] && out->deg) c 934 lib/bch.c syn0 = gf_div(bch, p->c[0], p->c[p->deg]); c 1203 lib/bch.c g->c[0] = 1; c 1208 lib/bch.c g->c[g->deg+1] = 1; c 1210 lib/bch.c g->c[j] = gf_mul(bch, g->c[j], r)^g->c[j-1]; c 1212 lib/bch.c g->c[0] = gf_mul(bch, g->c[0], r); c 1223 lib/bch.c if (g->c[n-1-j]) c 380 lib/bitmap.c int c, old_c, totaldigits, ndigits, nchunks, nbits; c 386 lib/bitmap.c nchunks = nbits = totaldigits = c = 0; c 393 lib/bitmap.c old_c = c; c 395 lib/bitmap.c if (__get_user(c, ubuf++)) c 399 lib/bitmap.c c = *buf++; c 401 lib/bitmap.c if (isspace(c)) c 409 lib/bitmap.c if (totaldigits && c && isspace(old_c)) c 413 lib/bitmap.c if (c == '\0' || c == ',') c 416 lib/bitmap.c if (!isxdigit(c)) c 427 lib/bitmap.c chunk = (chunk << 4) | hex_to_bin(c); c 441 lib/bitmap.c } while (buflen && c == ','); c 551 lib/bitmap.c static inline bool end_of_str(char c) c 553 lib/bitmap.c return c == '\0' || c == '\n'; c 556 lib/bitmap.c static inline bool __end_of_region(char c) c 558 lib/bitmap.c return isspace(c) || c == ','; c 561 lib/bitmap.c static inline bool end_of_region(char c) c 563 lib/bitmap.c return __end_of_region(c) || end_of_str(c); c 126 lib/btree.c static unsigned long *longset(unsigned long *s, unsigned long c, size_t n) c 131 lib/btree.c s[i] = c; c 25 lib/crc4.c uint8_t crc4(uint8_t c, uint64_t x, int bits) c 37 lib/crc4.c c = crc4_tab[c ^ ((x >> i) & 0xf)]; c 39 lib/crc4.c return c; c 609 lib/crypto/des.c #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) c 626 lib/crypto/des.c unsigned long a, b, c, d, w; c 630 lib/crypto/des.c c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; c 634 lib/crypto/des.c pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c 635 lib/crypto/des.c pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 636 lib/crypto/des.c pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 637 lib/crypto/des.c pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 638 lib/crypto/des.c pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 639 lib/crypto/des.c pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 640 lib/crypto/des.c pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 641 lib/crypto/des.c pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; c 642 lib/crypto/des.c pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 643 lib/crypto/des.c pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 644 lib/crypto/des.c pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 645 lib/crypto/des.c pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 646 lib/crypto/des.c pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 647 lib/crypto/des.c pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 648 lib/crypto/des.c pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; c 649 lib/crypto/des.c pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); c 652 lib/crypto/des.c w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); c 658 lib/crypto/des.c c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; c 663 lib/crypto/des.c w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); c 665 lib/crypto/des.c pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c 666 lib/crypto/des.c pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 667 lib/crypto/des.c pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 668 lib/crypto/des.c pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 669 lib/crypto/des.c pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 670 lib/crypto/des.c pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 671 lib/crypto/des.c pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 672 lib/crypto/des.c pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; c 673 lib/crypto/des.c pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 674 lib/crypto/des.c pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 675 lib/crypto/des.c pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 676 lib/crypto/des.c pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 677 lib/crypto/des.c pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 678 lib/crypto/des.c pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 679 lib/crypto/des.c pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; c 680 lib/crypto/des.c pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); c 686 lib/crypto/des.c c = a ^ b; c 687 lib/crypto/des.c c &= 0xffff0000; c 688 lib/crypto/des.c a ^= c; c 689 lib/crypto/des.c b ^= c; c 717 lib/crypto/des.c unsigned long a, b, c, d; c 721 lib/crypto/des.c c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; c 725 lib/crypto/des.c pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c 726 lib/crypto/des.c pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 727 lib/crypto/des.c pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 728 lib/crypto/des.c pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 729 lib/crypto/des.c pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 730 lib/crypto/des.c pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 731 lib/crypto/des.c pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 732 lib/crypto/des.c pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; c 733 lib/crypto/des.c pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 734 lib/crypto/des.c pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 735 lib/crypto/des.c pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 736 lib/crypto/des.c pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 737 lib/crypto/des.c pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 738 lib/crypto/des.c pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 739 lib/crypto/des.c pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; c 740 lib/crypto/des.c pe[15 * 2] = DES_PC2(b, c, d, a); c 746 lib/crypto/des.c c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; c 750 lib/crypto/des.c pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c 751 lib/crypto/des.c pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 752 lib/crypto/des.c pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 753 lib/crypto/des.c pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 754 lib/crypto/des.c pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 755 lib/crypto/des.c pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; c 756 lib/crypto/des.c pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; c 757 lib/crypto/des.c pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; c 758 lib/crypto/des.c pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 759 lib/crypto/des.c pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 760 lib/crypto/des.c pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 761 lib/crypto/des.c pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 762 lib/crypto/des.c pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; c 763 lib/crypto/des.c pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; c 764 lib/crypto/des.c pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; c 765 lib/crypto/des.c pe[15 * 2 + 1] = DES_PC2(b, c, d, a); c 771 lib/crypto/des.c c = a ^ b; c 772 lib/crypto/des.c c &= 0xffff0000; c 773 lib/crypto/des.c a ^= c; c 774 lib/crypto/des.c b ^= c; c 48 lib/crypto/sha256.c u32 a, b, c, d, e, f, g, h, t1, t2; c 61 lib/crypto/sha256.c a = state[0]; b = state[1]; c = state[2]; d = state[3]; c 66 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 68 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 69 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; c 71 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3]; c 73 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4]; c 75 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5]; c 76 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 78 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 80 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 83 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 85 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 86 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10]; c 88 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11]; c 90 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12]; c 92 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13]; c 93 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 95 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 97 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 100 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 102 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 103 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18]; c 105 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19]; c 107 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20]; c 109 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21]; c 110 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 112 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 114 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 117 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 119 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 120 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26]; c 122 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27]; c 124 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28]; c 126 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29]; c 127 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 129 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 131 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 134 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 136 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 137 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34]; c 139 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35]; c 141 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36]; c 143 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37]; c 144 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 146 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 148 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 151 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 153 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 154 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42]; c 156 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43]; c 158 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44]; c 160 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45]; c 161 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 163 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 165 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 168 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 170 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 171 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50]; c 173 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51]; c 175 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52]; c 177 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53]; c 178 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 180 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 182 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 185 lib/crypto/sha256.c t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; c 187 lib/crypto/sha256.c t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; c 188 lib/crypto/sha256.c t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58]; c 190 lib/crypto/sha256.c t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59]; c 192 lib/crypto/sha256.c t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60]; c 194 lib/crypto/sha256.c t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61]; c 195 lib/crypto/sha256.c t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; c 197 lib/crypto/sha256.c t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; c 199 lib/crypto/sha256.c t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; c 201 lib/crypto/sha256.c state[0] += a; state[1] += b; state[2] += c; state[3] += d; c 205 lib/crypto/sha256.c a = b = c = d = e = f = g = h = t1 = t2 = 0; c 633 lib/decompress_bunzip2.c unsigned int i, j, c; c 656 lib/decompress_bunzip2.c c = i << 24; c 658 lib/decompress_bunzip2.c c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1); c 659 lib/decompress_bunzip2.c bd->crc32Table[i] = c; c 66 lib/earlycpio.c unsigned char c, x; c 86 lib/earlycpio.c c = *p++; c 88 lib/earlycpio.c x = c - '0'; c 94 lib/earlycpio.c x = (c | 0x20) - 'a'; c 109 lib/fonts/fonts.c int i, c, cc, res; c 116 lib/fonts/fonts.c c = f->pref; c 120 lib/fonts/fonts.c c = 100; c 124 lib/fonts/fonts.c c = 100; c 128 lib/fonts/fonts.c c += 1000; c 133 lib/fonts/fonts.c c += 20 - res; c 137 lib/fonts/fonts.c c += 1000; c 139 lib/fonts/fonts.c if (c > cc) { c 140 lib/fonts/fonts.c cc = c; c 25 lib/gen_crc64table.c uint64_t i, j, c, crc; c 29 lib/gen_crc64table.c c = i << 56; c 32 lib/gen_crc64table.c if ((crc ^ c) & 0x8000000000000000ULL) c 36 lib/gen_crc64table.c c <<= 1; c 56 lib/glob.c unsigned char c = *str++; c 61 lib/glob.c if (c == '\0') c 95 lib/glob.c match |= (a <= c && c <= b); c 108 lib/glob.c if (c == d) { c 114 lib/glob.c if (c == '\0' || !back_pat) c 353 lib/inflate.c unsigned c[BMAX+1]; /* bit length count table */ c 358 lib/inflate.c unsigned *c, *v, *x; c 368 lib/inflate.c c = stk->c; c 374 lib/inflate.c memzero(stk->c, sizeof(stk->c)); c 379 lib/inflate.c c[*p]++; /* assume all entries <= BMAX */ c 382 lib/inflate.c if (c[0] == n) /* null input--all zero length codes */ c 395 lib/inflate.c if (c[j]) c 401 lib/inflate.c if (c[i]) c 412 lib/inflate.c if ((y -= c[j]) < 0) { c 416 lib/inflate.c if ((y -= c[i]) < 0) { c 420 lib/inflate.c c[i] += y; c 426 lib/inflate.c p = c + 1; xp = x + 2; c 457 lib/inflate.c a = c[k]; c 475 lib/inflate.c xp = c + k; c 1151 lib/inflate.c unsigned long c; /* crc shift register */ c 1168 lib/inflate.c c = 0; c 1171 lib/inflate.c c = c & 1 ? (c >> 1) ^ e : c >> 1; c 1173 lib/inflate.c c ^= e; c 1175 lib/inflate.c crc_32_tab[i] = c; c 56 lib/kstrtox.c unsigned int c = *s; c 57 lib/kstrtox.c unsigned int lc = c | 0x20; /* don't tolower() this line */ c 60 lib/kstrtox.c if ('0' <= c && c <= '9') c 61 lib/kstrtox.c val = c - '0'; c 249 lib/locking-selftest.c #define WWL(x, c) ww_mutex_lock(x, c) c 232 lib/lz4/lz4defs.h #define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c)) c 440 lib/mpi/longlong.h #define rshift_rhlc(r, h, l, c) \ c 443 lib/mpi/longlong.h "=r" (r) : "r" (h), "r" (l), "rn" (c)) c 499 lib/mpi/longlong.h #define rshift_rhlc(r, h, l, c) \ c 506 lib/mpi/longlong.h : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \ c 151 lib/mpi/mpi-pow.c int c; c 163 lib/mpi/mpi-pow.c c = count_leading_zeros(e); c 164 lib/mpi/mpi-pow.c e = (e << c) << 1; /* shift the exp bits to the left, lose msb */ c 165 lib/mpi/mpi-pow.c c = BITS_PER_MPI_LIMB - 1 - c; c 178 lib/mpi/mpi-pow.c while (c) { c 244 lib/mpi/mpi-pow.c c--; c 252 lib/mpi/mpi-pow.c c = BITS_PER_MPI_LIMB; c 62 lib/random32.c #define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b) c 77 lib/reed_solomon/test_rslib.c uint16_t *c; /* sent codeword */ c 104 lib/reed_solomon/test_rslib.c kfree(ws->c); c 118 lib/reed_solomon/test_rslib.c ws->c = kmalloc_array(2 * (nn + nroots), c 120 lib/reed_solomon/test_rslib.c if (!ws->c) c 123 lib/reed_solomon/test_rslib.c ws->r = ws->c + nn; c 159 lib/reed_solomon/test_rslib.c uint16_t *c = ws->c; c 167 lib/reed_solomon/test_rslib.c c[i] = prandom_u32() & nn; c 169 lib/reed_solomon/test_rslib.c memset(c + dlen, 0, nroots * sizeof(*c)); c 170 lib/reed_solomon/test_rslib.c encode_rs16(rs, c, dlen, c + dlen, 0); c 173 lib/reed_solomon/test_rslib.c memcpy(r, c, len * sizeof(*r)); c 268 lib/reed_solomon/test_rslib.c uint16_t *c = ws->c; c 307 lib/reed_solomon/test_rslib.c if (memcmp(r, c, len * sizeof(*r))) c 169 lib/seq_buf.c int seq_buf_putc(struct seq_buf *s, unsigned char c) c 174 lib/seq_buf.c s->buffer[s->len++] = c; c 233 lib/sort.c size_t b, c, d; c 254 lib/sort.c for (b = a; c = 2*b + size, (d = c + size) < n;) c 255 lib/sort.c b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d; c 257 lib/sort.c b = c; c 262 lib/sort.c c = b; /* Where "a" belongs */ c 265 lib/sort.c do_swap(base + b, base + c, size, swap_func); c 206 lib/string.c unsigned long c, data; c 208 lib/string.c c = read_word_at_a_time(src+res); c 209 lib/string.c if (has_zero(c, &data, &constants)) { c 210 lib/string.c data = prep_zero_mask(c, data, &constants); c 212 lib/string.c *(unsigned long *)(dest+res) = c & zero_bytemask(data); c 215 lib/string.c *(unsigned long *)(dest+res) = c; c 222 lib/string.c char c; c 224 lib/string.c c = src[res]; c 225 lib/string.c dest[res] = c; c 226 lib/string.c if (!c) c 409 lib/string.c char *strchr(const char *s, int c) c 411 lib/string.c for (; *s != (char)c; ++s) c 428 lib/string.c char *strchrnul(const char *s, int c) c 430 lib/string.c while (*s && *s != (char)c) c 443 lib/string.c char *strrchr(const char *s, int c) c 447 lib/string.c if (*s == (char)c) c 465 lib/string.c char *strnchr(const char *s, size_t count, int c) c 468 lib/string.c if (*s == (char)c) c 740 lib/string.c void *memset(void *s, int c, size_t count) c 745 lib/string.c *xs++ = c; c 922 lib/string.c void *memscan(void *addr, int c, size_t size) c 927 lib/string.c if (*p == c) c 997 lib/string.c void *memchr(const void *s, int c, size_t n) c 1001 lib/string.c if ((unsigned char)c == *p++) { c 1030 lib/string.c void *memchr_inv(const void *start, int c, size_t bytes) c 1032 lib/string.c u8 value = c; c 303 lib/string_helpers.c static bool escape_passthrough(unsigned char c, char **dst, char *end) c 308 lib/string_helpers.c *out = c; c 313 lib/string_helpers.c static bool escape_space(unsigned char c, char **dst, char *end) c 318 lib/string_helpers.c switch (c) { c 349 lib/string_helpers.c static bool escape_special(unsigned char c, char **dst, char *end) c 354 lib/string_helpers.c switch (c) { c 379 lib/string_helpers.c static bool escape_null(unsigned char c, char **dst, char *end) c 383 lib/string_helpers.c if (c) c 397 lib/string_helpers.c static bool escape_octal(unsigned char c, char **dst, char *end) c 405 lib/string_helpers.c *out = ((c >> 6) & 0x07) + '0'; c 408 lib/string_helpers.c *out = ((c >> 3) & 0x07) + '0'; c 411 lib/string_helpers.c *out = ((c >> 0) & 0x07) + '0'; c 418 lib/string_helpers.c static bool escape_hex(unsigned char c, char **dst, char *end) c 429 lib/string_helpers.c *out = hex_asc_hi(c); c 432 lib/string_helpers.c *out = hex_asc_lo(c); c 505 lib/string_helpers.c unsigned char c = *src++; c 518 lib/string_helpers.c if ((flags & ESCAPE_NP && isprint(c)) || c 519 lib/string_helpers.c (is_dict && !strchr(only, c))) { c 522 lib/string_helpers.c if (flags & ESCAPE_SPACE && escape_space(c, &p, end)) c 525 lib/string_helpers.c if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end)) c 528 lib/string_helpers.c if (flags & ESCAPE_NULL && escape_null(c, &p, end)) c 532 lib/string_helpers.c if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end)) c 535 lib/string_helpers.c if (flags & ESCAPE_HEX && escape_hex(c, &p, end)) c 539 lib/string_helpers.c escape_passthrough(c, &p, end); c 553 lib/string_helpers.c unsigned char c = *src++; c 555 lib/string_helpers.c if (!isprint(c) || !isascii(c) || c == '"' || c == '\\') c 556 lib/string_helpers.c escape_hex(c, &p, end); c 558 lib/string_helpers.c escape_passthrough(c, &p, end); c 37 lib/strncpy_from_user.c unsigned long c, data; c 40 lib/strncpy_from_user.c unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); c 42 lib/strncpy_from_user.c *(unsigned long *)(dst+res) = c; c 43 lib/strncpy_from_user.c if (has_zero(c, &data, &constants)) { c 44 lib/strncpy_from_user.c data = prep_zero_mask(c, data, &constants); c 54 lib/strncpy_from_user.c char c; c 56 lib/strncpy_from_user.c unsafe_get_user(c,src+res, efault); c 57 lib/strncpy_from_user.c dst[res] = c; c 58 lib/strncpy_from_user.c if (!c) c 27 lib/strnlen_user.c unsigned long c; c 37 lib/strnlen_user.c unsafe_get_user(c, (unsigned long __user *)src, efault); c 38 lib/strnlen_user.c c |= aligned_byte_mask(align); c 42 lib/strnlen_user.c if (has_zero(c, &data, &constants)) { c 43 lib/strnlen_user.c data = prep_zero_mask(c, data, &constants); c 52 lib/strnlen_user.c unsafe_get_user(c, (unsigned long __user *)(src+res), efault); c 196 lib/test_meminit.c struct kmem_cache *c; c 202 lib/test_meminit.c c = kmem_cache_create("test_cache", size, 1, c 206 lib/test_meminit.c buf = kmem_cache_alloc(c, alloc_mask); c 212 lib/test_meminit.c kmem_cache_free(c, buf); c 229 lib/test_meminit.c kmem_cache_free(c, buf); c 243 lib/test_meminit.c kmem_cache_destroy(c); c 255 lib/test_meminit.c struct kmem_cache *c; c 261 lib/test_meminit.c c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU, c 263 lib/test_meminit.c buf = kmem_cache_alloc(c, GFP_KERNEL); c 275 lib/test_meminit.c kmem_cache_free(c, buf); c 281 lib/test_meminit.c buf = kmem_cache_alloc(c, GFP_KERNEL); c 286 lib/test_meminit.c kmem_cache_free(c, used_objects[i]); c 292 lib/test_meminit.c kmem_cache_destroy(c); c 302 lib/test_meminit.c struct kmem_cache *c; c 308 lib/test_meminit.c c = kmem_cache_create("test_cache", size, size, 0, NULL); c 310 lib/test_meminit.c num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects), c 320 lib/test_meminit.c kmem_cache_free_bulk(c, num, objects); c 65 lib/test_ubsan.c char c = 4; c 68 lib/test_ubsan.c src = &c; c 61 lib/ucs2_string.c u16 c = src[i]; c 63 lib/ucs2_string.c if (c >= 0x800) c 65 lib/ucs2_string.c else if (c >= 0x80) c 90 lib/ucs2_string.c u16 c = src[i]; c 92 lib/ucs2_string.c if (c >= 0x800) { c 96 lib/ucs2_string.c dest[j++] = 0xe0 | (c & 0xf000) >> 12; c 97 lib/ucs2_string.c dest[j++] = 0x80 | (c & 0x0fc0) >> 6; c 98 lib/ucs2_string.c dest[j++] = 0x80 | (c & 0x003f); c 99 lib/ucs2_string.c } else if (c >= 0x80) { c 103 lib/ucs2_string.c dest[j++] = 0xc0 | (c & 0x7c0) >> 6; c 104 lib/ucs2_string.c dest[j++] = 0x80 | (c & 0x03f); c 107 lib/ucs2_string.c dest[j++] = c & 0x7f; c 506 lib/vsprintf.c char c = ' ' + (spec.flags & ZEROPAD); c 510 lib/vsprintf.c *buf = c; c 605 lib/vsprintf.c char c = *s++; c 606 lib/vsprintf.c if (!c) c 609 lib/vsprintf.c *buf = c; c 865 lib/vsprintf.c char c = *s++; c 866 lib/vsprintf.c if (!c) { c 869 lib/vsprintf.c c = '/'; c 873 lib/vsprintf.c *buf = c; c 2494 lib/vsprintf.c char c; c 2504 lib/vsprintf.c c = (unsigned char) va_arg(args, int); c 2506 lib/vsprintf.c *str = c; c 2970 lib/vsprintf.c char c; c 2979 lib/vsprintf.c c = (unsigned char) get_arg(char); c 2981 lib/vsprintf.c *str = c; c 142 lib/zlib_deflate/deflate.c #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask) c 156 lib/zlib_deflate/deftree.c # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) c 160 lib/zlib_deflate/deftree.c # define send_code(s, c, tree) \ c 161 lib/zlib_deflate/deftree.c { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ c 162 lib/zlib_deflate/deftree.c send_bits(s, tree[c].Code, tree[c].Len); } c 262 lib/zlib_deflate/defutil.h #define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} c 58 lib/zstd/fse_compress.c #define FSE_STATIC_ASSERT(c) \ c 60 lib/zstd/fse_compress.c enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ c 385 lib/zstd/fse_compress.c U32 c = cached; c 388 lib/zstd/fse_compress.c Counting1[(BYTE)c]++; c 389 lib/zstd/fse_compress.c Counting2[(BYTE)(c >> 8)]++; c 390 lib/zstd/fse_compress.c Counting3[(BYTE)(c >> 16)]++; c 391 lib/zstd/fse_compress.c Counting4[c >> 24]++; c 392 lib/zstd/fse_compress.c c = cached; c 395 lib/zstd/fse_compress.c Counting1[(BYTE)c]++; c 396 lib/zstd/fse_compress.c Counting2[(BYTE)(c >> 8)]++; c 397 lib/zstd/fse_compress.c Counting3[(BYTE)(c >> 16)]++; c 398 lib/zstd/fse_compress.c Counting4[c >> 24]++; c 399 lib/zstd/fse_compress.c c = cached; c 402 lib/zstd/fse_compress.c Counting1[(BYTE)c]++; c 403 lib/zstd/fse_compress.c Counting2[(BYTE)(c >> 8)]++; c 404 lib/zstd/fse_compress.c Counting3[(BYTE)(c >> 16)]++; c 405 lib/zstd/fse_compress.c Counting4[c >> 24]++; c 406 lib/zstd/fse_compress.c c = cached; c 409 lib/zstd/fse_compress.c Counting1[(BYTE)c]++; c 410 lib/zstd/fse_compress.c Counting2[(BYTE)(c >> 8)]++; c 411 lib/zstd/fse_compress.c Counting3[(BYTE)(c >> 16)]++; c 412 lib/zstd/fse_compress.c Counting4[c >> 24]++; c 58 lib/zstd/fse_decompress.c #define FSE_STATIC_ASSERT(c) \ c 60 lib/zstd/fse_decompress.c enum { FSE_static_assert = 1 / (int)(!!(c)) }; \ c 52 lib/zstd/huf_compress.c #define HUF_STATIC_ASSERT(c) \ c 54 lib/zstd/huf_compress.c enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ c 404 lib/zstd/huf_compress.c U32 const c = count[n]; c 405 lib/zstd/huf_compress.c U32 const r = BIT_highbit32(c + 1) + 1; c 407 lib/zstd/huf_compress.c while ((pos > rank[r].base) && (c > huffNode[pos - 1].count)) c 409 lib/zstd/huf_compress.c huffNode[pos].count = c; c 58 lib/zstd/huf_decompress.c #define HUF_STATIC_ASSERT(c) \ c 60 lib/zstd/huf_decompress.c enum { HUF_static_assert = 1 / (int)(!!(c)) }; \ c 161 lib/zstd/huf_decompress.c BYTE const c = dt[val].byte; c 163 lib/zstd/huf_decompress.c return c; c 53 lib/zstd/zstd_internal.h #define ZSTD_STATIC_ASSERT(c) \ c 55 lib/zstd/zstd_internal.h enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \ c 103 mm/kasan/common.c void *memset(void *addr, int c, size_t len) c 107 mm/kasan/common.c return __memset(addr, c, len); c 16 mm/page_counter.c static void propagate_protected_usage(struct page_counter *c, c 22 mm/page_counter.c if (!c->parent) c 25 mm/page_counter.c if (c->min || atomic_long_read(&c->min_usage)) { c 26 mm/page_counter.c if (usage <= c->min) c 31 mm/page_counter.c old_protected = atomic_long_xchg(&c->min_usage, protected); c 34 mm/page_counter.c atomic_long_add(delta, &c->parent->children_min_usage); c 37 mm/page_counter.c if (c->low || atomic_long_read(&c->low_usage)) { c 38 mm/page_counter.c if (usage <= c->low) c 43 mm/page_counter.c old_protected = atomic_long_xchg(&c->low_usage, protected); c 46 mm/page_counter.c atomic_long_add(delta, &c->parent->children_low_usage); c 74 mm/page_counter.c struct page_counter *c; c 76 mm/page_counter.c for (c = counter; c; c = c->parent) { c 79 mm/page_counter.c new = atomic_long_add_return(nr_pages, &c->usage); c 85 mm/page_counter.c if (new > c->watermark) c 86 mm/page_counter.c c->watermark = new; c 103 mm/page_counter.c struct page_counter *c; c 105 mm/page_counter.c for (c = counter; c; c = c->parent) { c 121 mm/page_counter.c new = atomic_long_add_return(nr_pages, &c->usage); c 122 mm/page_counter.c if (new > c->max) { c 123 mm/page_counter.c atomic_long_sub(nr_pages, &c->usage); c 129 mm/page_counter.c c->failcnt++; c 130 mm/page_counter.c *fail = c; c 138 mm/page_counter.c if (new > c->watermark) c 139 mm/page_counter.c c->watermark = new; c 144 mm/page_counter.c for (c = counter; c != *fail; c = c->parent) c 145 mm/page_counter.c page_counter_cancel(c, nr_pages); c 157 mm/page_counter.c struct page_counter *c; c 159 mm/page_counter.c for (c = counter; c; c = c->parent) c 160 mm/page_counter.c page_counter_cancel(c, nr_pages); c 214 mm/page_counter.c struct page_counter *c; c 218 mm/page_counter.c for (c = counter; c; c = c->parent) c 219 mm/page_counter.c propagate_protected_usage(c, atomic_long_read(&c->usage)); c 231 mm/page_counter.c struct page_counter *c; c 235 mm/page_counter.c for (c = counter; c; c = c->parent) c 236 mm/page_counter.c propagate_protected_usage(c, atomic_long_read(&c->usage)); c 3739 mm/slab.c struct kmem_cache *c; c 3748 mm/slab.c c = virt_to_cache(objp); c 3749 mm/slab.c if (!c) { c 3753 mm/slab.c debug_check_no_locks_freed(objp, c->object_size); c 3755 mm/slab.c debug_check_no_obj_freed(objp, c->object_size); c 3756 mm/slab.c __cache_free(c, (void *)objp, _RET_IP_); c 3848 mm/slab.c struct kmem_cache *c; c 3859 mm/slab.c for_each_memcg_cache(c, cachep) { c 3861 mm/slab.c __do_tune_cpucache(c, limit, batchcount, shared, gfp); c 4218 mm/slab.c struct kmem_cache *c; c 4225 mm/slab.c c = virt_to_cache(objp); c 4226 mm/slab.c size = c ? c->object_size : 0; c 674 mm/slab.h static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) c 677 mm/slab.h if (c->ctor) c 679 mm/slab.h if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) c 686 mm/slab.h static inline bool slab_want_init_on_free(struct kmem_cache *c) c 689 mm/slab.h return !(c->ctor || c 690 mm/slab.h (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); c 797 mm/slab_common.c struct kmem_cache *s, *c; c 809 mm/slab_common.c c = arr->entries[idx]; c 810 mm/slab_common.c if (!c) c 813 mm/slab_common.c kmemcg_cache_deactivate(c); c 837 mm/slab_common.c struct kmem_cache *c, *c2; c 850 mm/slab_common.c c = arr->entries[i]; c 851 mm/slab_common.c if (!c) c 853 mm/slab_common.c if (shutdown_cache(c)) c 859 mm/slab_common.c list_move(&c->memcg_params.children_node, &busy); c 874 mm/slab_common.c list_for_each_entry_safe(c, c2, &s->memcg_params.children, c 876 mm/slab_common.c shutdown_cache(c); c 1004 mm/slab_common.c struct kmem_cache *c; c 1021 mm/slab_common.c for_each_memcg_cache(c, s) { c 1027 mm/slab_common.c kasan_cache_shrink(c); c 1028 mm/slab_common.c __kmem_cache_shrink(c); c 1434 mm/slab_common.c struct kmem_cache *c; c 1440 mm/slab_common.c for_each_memcg_cache(c, s) { c 1442 mm/slab_common.c get_slabinfo(c, &sinfo); c 1598 mm/slab_common.c struct kmem_cache *s, *c; c 1617 mm/slab_common.c for_each_memcg_cache(c, s) { c 1621 mm/slab_common.c css = &c->memcg_params.memcg->css; c 1624 mm/slab_common.c else if (c->flags & SLAB_DEACTIVATED) c 1628 mm/slab_common.c get_slabinfo(c, &sinfo); c 1630 mm/slab_common.c cache_name(c), css->id, status, c 582 mm/slob.c int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags) c 586 mm/slob.c c->size += sizeof(struct slob_rcu); c 588 mm/slob.c c->flags = flags; c 592 mm/slob.c static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) c 601 mm/slob.c if (c->size < PAGE_SIZE) { c 602 mm/slob.c b = slob_alloc(c->size, flags, c->align, node, 0); c 603 mm/slob.c trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, c 604 mm/slob.c SLOB_UNITS(c->size) * SLOB_UNIT, c 607 mm/slob.c b = slob_new_pages(flags, get_order(c->size), node); c 608 mm/slob.c trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, c 609 mm/slob.c PAGE_SIZE << get_order(c->size), c 613 mm/slob.c if (b && c->ctor) { c 615 mm/slob.c c->ctor(b); c 618 mm/slob.c kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); c 658 mm/slob.c void kmem_cache_free(struct kmem_cache *c, void *b) c 660 mm/slob.c kmemleak_free_recursive(b, c->flags); c 661 mm/slob.c if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { c 663 mm/slob.c slob_rcu = b + (c->size - sizeof(struct slob_rcu)); c 664 mm/slob.c slob_rcu->size = c->size; c 667 mm/slob.c __kmem_cache_free(b, c->size); c 687 mm/slob.c int __kmem_cache_shutdown(struct kmem_cache *c) c 693 mm/slob.c void __kmem_cache_release(struct kmem_cache *c) c 1836 mm/slub.c struct kmem_cache_cpu *c, gfp_t flags) c 1865 mm/slub.c c->page = page; c 1885 mm/slub.c struct kmem_cache_cpu *c) c 1927 mm/slub.c object = get_partial_node(s, n, c, flags); c 1949 mm/slub.c struct kmem_cache_cpu *c) c 1957 mm/slub.c object = get_partial_node(s, get_node(s, searchnode), c, flags); c 1961 mm/slub.c return get_any_partial(s, flags, c); c 2037 mm/slub.c void *freelist, struct kmem_cache_cpu *c) c 2170 mm/slub.c c->page = NULL; c 2171 mm/slub.c c->freelist = NULL; c 2182 mm/slub.c struct kmem_cache_cpu *c) c 2188 mm/slub.c while ((page = c->partial)) { c 2192 mm/slub.c c->partial = page->next; c 2301 mm/slub.c static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) c 2304 mm/slub.c deactivate_slab(s, c->page, c->freelist, c); c 2306 mm/slub.c c->tid = next_tid(c->tid); c 2316 mm/slub.c struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); c 2318 mm/slub.c if (c->page) c 2319 mm/slub.c flush_slab(s, c); c 2321 mm/slub.c unfreeze_partials(s, c); c 2334 mm/slub.c struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); c 2336 mm/slub.c return c->page || slub_percpu_partial(c); c 2445 mm/slub.c struct kmem_cache_cpu *c = *pc; c 2450 mm/slub.c freelist = get_partial(s, flags, node, c); c 2457 mm/slub.c c = raw_cpu_ptr(s->cpu_slab); c 2458 mm/slub.c if (c->page) c 2459 mm/slub.c flush_slab(s, c); c 2469 mm/slub.c c->page = page; c 2470 mm/slub.c *pc = c; c 2538 mm/slub.c unsigned long addr, struct kmem_cache_cpu *c) c 2543 mm/slub.c page = c->page; c 2566 mm/slub.c deactivate_slab(s, page, c->freelist, c); c 2577 mm/slub.c deactivate_slab(s, page, c->freelist, c); c 2582 mm/slub.c freelist = c->freelist; c 2589 mm/slub.c c->page = NULL; c 2602 mm/slub.c VM_BUG_ON(!c->page->frozen); c 2603 mm/slub.c c->freelist = get_freepointer(s, freelist); c 2604 mm/slub.c c->tid = next_tid(c->tid); c 2609 mm/slub.c if (slub_percpu_partial(c)) { c 2610 mm/slub.c page = c->page = slub_percpu_partial(c); c 2611 mm/slub.c slub_set_percpu_partial(c, page); c 2616 mm/slub.c freelist = new_slab_objects(s, gfpflags, node, &c); c 2623 mm/slub.c page = c->page; c 2632 mm/slub.c deactivate_slab(s, page, get_freepointer(s, freelist), c); c 2641 mm/slub.c unsigned long addr, struct kmem_cache_cpu *c) c 2653 mm/slub.c c = this_cpu_ptr(s->cpu_slab); c 2656 mm/slub.c p = ___slab_alloc(s, gfpflags, node, addr, c); c 2686 mm/slub.c struct kmem_cache_cpu *c; c 2706 mm/slub.c c = raw_cpu_ptr(s->cpu_slab); c 2708 mm/slub.c unlikely(tid != READ_ONCE(c->tid))); c 2727 mm/slub.c object = c->freelist; c 2728 mm/slub.c page = c->page; c 2730 mm/slub.c object = __slab_alloc(s, gfpflags, node, addr, c); c 2969 mm/slub.c struct kmem_cache_cpu *c; c 2980 mm/slub.c c = raw_cpu_ptr(s->cpu_slab); c 2982 mm/slub.c unlikely(tid != READ_ONCE(c->tid))); c 2987 mm/slub.c if (likely(page == c->page)) { c 2988 mm/slub.c void **freelist = READ_ONCE(c->freelist); c 3148 mm/slub.c struct kmem_cache_cpu *c; c 3161 mm/slub.c c = this_cpu_ptr(s->cpu_slab); c 3164 mm/slub.c void *object = c->freelist; c 3174 mm/slub.c c->tid = next_tid(c->tid); c 3181 mm/slub.c _RET_IP_, c); c 3185 mm/slub.c c = this_cpu_ptr(s->cpu_slab); c 3190 mm/slub.c c->freelist = get_freepointer(s, object); c 3194 mm/slub.c c->tid = next_tid(c->tid); c 4293 mm/slub.c struct kmem_cache *s, *c; c 4306 mm/slub.c for_each_memcg_cache(c, s) { c 4307 mm/slub.c c->object_size = s->object_size; c 4308 mm/slub.c c->inuse = max(c->inuse, ALIGN(size, sizeof(void *))); c 4822 mm/slub.c struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, c 4827 mm/slub.c page = READ_ONCE(c->page); c 4842 mm/slub.c page = slub_percpu_partial_read_once(c); c 5554 mm/slub.c struct kmem_cache *c; c 5577 mm/slub.c for_each_memcg_cache(c, s) c 5578 mm/slub.c attribute->store(c, buf, len); c 280 mm/swapfile.c unsigned int c) c 282 mm/swapfile.c info->data = c; c 286 mm/swapfile.c unsigned int c, unsigned int f) c 289 mm/swapfile.c info->data = c; c 225 net/9p/client.c static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc, c 228 net/9p/client.c if (likely(c->fcall_cache) && alloc_msize == c->msize) { c 229 net/9p/client.c fc->sdata = kmem_cache_alloc(c->fcall_cache, GFP_NOFS); c 230 net/9p/client.c fc->cache = c->fcall_cache; c 268 net/9p/client.c p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size) c 271 net/9p/client.c int alloc_msize = min(c->msize, max_size); c 277 net/9p/client.c if (p9_fcall_init(c, &req->tc, alloc_msize)) c 279 net/9p/client.c if (p9_fcall_init(c, &req->rc, alloc_msize)) c 290 net/9p/client.c spin_lock_irq(&c->lock); c 292 net/9p/client.c tag = idr_alloc(&c->reqs, req, P9_NOTAG, P9_NOTAG + 1, c 295 net/9p/client.c tag = idr_alloc(&c->reqs, req, 0, P9_NOTAG, GFP_NOWAIT); c 297 net/9p/client.c spin_unlock_irq(&c->lock); c 332 net/9p/client.c struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag) c 338 net/9p/client.c req = idr_find(&c->reqs, tag); c 365 net/9p/client.c static int p9_tag_remove(struct p9_client *c, struct p9_req_t *r) c 370 net/9p/client.c p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag); c 371 net/9p/client.c spin_lock_irqsave(&c->lock, flags); c 372 net/9p/client.c idr_remove(&c->reqs, tag); c 373 net/9p/client.c spin_unlock_irqrestore(&c->lock, flags); c 398 net/9p/client.c static void p9_tag_cleanup(struct p9_client *c) c 404 net/9p/client.c idr_for_each_entry(&c->reqs, req, id) { c 406 net/9p/client.c if (p9_tag_remove(c, req) == 0) c 419 net/9p/client.c void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status) c 497 net/9p/client.c static int p9_check_errors(struct p9_client *c, struct p9_req_t *req) c 504 net/9p/client.c if (req->rc.size >= c->msize) { c 514 net/9p/client.c trace_9p_protocol_dump(c, &req->rc); c 522 net/9p/client.c if (!p9_is_proto_dotl(c)) { c 524 net/9p/client.c err = p9pdu_readf(&req->rc, c->proto_version, "s?d", c 529 net/9p/client.c if (p9_is_proto_dotu(c) && ecode < 512) c 540 net/9p/client.c err = p9pdu_readf(&req->rc, c->proto_version, "d", &ecode); c 566 net/9p/client.c static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, c 579 net/9p/client.c trace_9p_protocol_dump(c, &req->rc); c 588 net/9p/client.c if (!p9_is_proto_dotl(c)) { c 610 net/9p/client.c err = p9pdu_readf(&req->rc, c->proto_version, "s?d", c 615 net/9p/client.c if (p9_is_proto_dotu(c) && ecode < 512) c 626 net/9p/client.c err = p9pdu_readf(&req->rc, c->proto_version, "d", &ecode); c 639 net/9p/client.c p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); c 653 net/9p/client.c static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) c 665 net/9p/client.c req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag); c 674 net/9p/client.c if (c->trans_mod->cancelled) c 675 net/9p/client.c c->trans_mod->cancelled(c, oldreq); c 678 net/9p/client.c p9_tag_remove(c, req); c 682 net/9p/client.c static struct p9_req_t *p9_client_prepare_req(struct p9_client *c, c 689 net/9p/client.c p9_debug(P9_DEBUG_MUX, "client %p op %d\n", c, type); c 692 net/9p/client.c if (c->status == Disconnected) c 696 net/9p/client.c if ((c->status == BeginDisconnect) && (type != P9_TCLUNK)) c 699 net/9p/client.c req = p9_tag_alloc(c, type, req_size); c 705 net/9p/client.c err = p9pdu_vwritef(&req->tc, c->proto_version, fmt, ap); c 708 net/9p/client.c p9pdu_finalize(c, &req->tc); c 709 net/9p/client.c trace_9p_client_req(c, type, req->tc.tag); c 712 net/9p/client.c p9_tag_remove(c, req); c 728 net/9p/client.c p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) c 736 net/9p/client.c req = p9_client_prepare_req(c, type, c->msize, fmt, ap); c 747 net/9p/client.c err = c->trans_mod->request(c, req); c 752 net/9p/client.c c->status = Disconnected; c 765 net/9p/client.c if ((err == -ERESTARTSYS) && (c->status == Connected) c 776 net/9p/client.c if ((err == -ERESTARTSYS) && (c->status == Connected)) { c 781 net/9p/client.c if (c->trans_mod->cancel(c, req)) c 782 net/9p/client.c p9_client_flush(c, req); c 797 net/9p/client.c err = p9_check_errors(c, req); c 798 net/9p/client.c trace_9p_client_res(c, type, req->rc.tag, err); c 802 net/9p/client.c p9_tag_remove(c, req); c 819 net/9p/client.c static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type, c 835 net/9p/client.c req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap); c 846 net/9p/client.c err = c->trans_mod->zc_request(c, req, uidata, uodata, c 850 net/9p/client.c c->status = Disconnected; c 858 net/9p/client.c if ((err == -ERESTARTSYS) && (c->status == Connected)) { c 863 net/9p/client.c if (c->trans_mod->cancel(c, req)) c 864 net/9p/client.c p9_client_flush(c, req); c 879 net/9p/client.c err = p9_check_zc_errors(c, req, uidata, in_hdrlen); c 880 net/9p/client.c trace_9p_client_res(c, type, req->rc.tag, err); c 884 net/9p/client.c p9_tag_remove(c, req); c 933 net/9p/client.c static int p9_client_version(struct p9_client *c) c 941 net/9p/client.c c->msize, c->proto_version); c 943 net/9p/client.c switch (c->proto_version) { c 945 net/9p/client.c req = p9_client_rpc(c, P9_TVERSION, "ds", c 946 net/9p/client.c c->msize, "9P2000.L"); c 949 net/9p/client.c req = p9_client_rpc(c, P9_TVERSION, "ds", c 950 net/9p/client.c c->msize, "9P2000.u"); c 953 net/9p/client.c req = p9_client_rpc(c, P9_TVERSION, "ds", c 954 net/9p/client.c c->msize, "9P2000"); c 963 net/9p/client.c err = p9pdu_readf(&req->rc, c->proto_version, "ds", &msize, &version); c 966 net/9p/client.c trace_9p_protocol_dump(c, &req->rc); c 972 net/9p/client.c c->proto_version = p9_proto_2000L; c 974 net/9p/client.c c->proto_version = p9_proto_2000u; c 976 net/9p/client.c c->proto_version = p9_proto_legacy; c 990 net/9p/client.c if (msize < c->msize) c 991 net/9p/client.c c->msize = msize; c 995 net/9p/client.c p9_tag_remove(c, req); c 180 net/9p/error.c struct errormap *c; c 188 net/9p/error.c for (c = errmap; c->name != NULL; c++) { c 189 net/9p/error.c c->namelen = strlen(c->name); c 190 net/9p/error.c bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; c 191 net/9p/error.c INIT_HLIST_NODE(&c->list); c 192 net/9p/error.c hlist_add_head(&c->list, &hash_errmap[bucket]); c 209 net/9p/error.c struct errormap *c; c 213 net/9p/error.c c = NULL; c 215 net/9p/error.c hlist_for_each_entry(c, &hash_errmap[bucket], list) { c 216 net/9p/error.c if (c->namelen == len && !memcmp(c->name, errstr, len)) { c 217 net/9p/error.c errno = c->val; c 241 net/9p/trans_rdma.c struct p9_client *c = id->context; c 242 net/9p/trans_rdma.c struct p9_trans_rdma *rdma = c->trans; c 262 net/9p/trans_rdma.c c->status = Disconnected; c 279 net/9p/trans_rdma.c c->status = Disconnected; c 294 net/9p/trans_rdma.c struct p9_rdma_context *c = c 301 net/9p/trans_rdma.c ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, c 307 net/9p/trans_rdma.c c->rc.size = wc->byte_len; c 308 net/9p/trans_rdma.c err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1); c 323 net/9p/trans_rdma.c req->rc.size = c->rc.size; c 324 net/9p/trans_rdma.c req->rc.sdata = c->rc.sdata; c 329 net/9p/trans_rdma.c kfree(c); c 345 net/9p/trans_rdma.c struct p9_rdma_context *c = c 349 net/9p/trans_rdma.c c->busa, c->req->tc.size, c 352 net/9p/trans_rdma.c p9_req_put(c->req); c 353 net/9p/trans_rdma.c kfree(c); c 383 net/9p/trans_rdma.c post_recv(struct p9_client *client, struct p9_rdma_context *c) c 389 net/9p/trans_rdma.c c->busa = ib_dma_map_single(rdma->cm_id->device, c 390 net/9p/trans_rdma.c c->rc.sdata, client->msize, c 392 net/9p/trans_rdma.c if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) c 395 net/9p/trans_rdma.c c->cqe.done = recv_done; c 397 net/9p/trans_rdma.c sge.addr = c->busa; c 402 net/9p/trans_rdma.c wr.wr_cqe = &c->cqe; c 419 net/9p/trans_rdma.c struct p9_rdma_context *c = NULL; c 472 net/9p/trans_rdma.c c = kmalloc(sizeof *c, GFP_NOFS); c 473 net/9p/trans_rdma.c if (!c) { c 477 net/9p/trans_rdma.c c->req = req; c 479 net/9p/trans_rdma.c c->busa = ib_dma_map_single(rdma->cm_id->device, c 480 net/9p/trans_rdma.c c->req->tc.sdata, c->req->tc.size, c 482 net/9p/trans_rdma.c if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { c 487 net/9p/trans_rdma.c c->cqe.done = send_done; c 489 net/9p/trans_rdma.c sge.addr = c->busa; c 490 net/9p/trans_rdma.c sge.length = c->req->tc.size; c 494 net/9p/trans_rdma.c wr.wr_cqe = &c->cqe; c 520 net/9p/trans_rdma.c kfree(c); c 341 net/atm/common.c static int c; c 356 net/atm/common.c c = *vci; c 357 net/atm/common.c else if (c < ATM_NOT_RSV_VCI || c >= 1 << vcc->dev->ci_range.vci_bits) c 358 net/atm/common.c c = ATM_NOT_RSV_VCI; c 360 net/atm/common.c old_c = c; c 362 net/atm/common.c if (!check_ci(vcc, p, c)) { c 364 net/atm/common.c *vci = c; c 368 net/atm/common.c c++; c 369 net/atm/common.c if (c >= 1 << vcc->dev->ci_range.vci_bits) c 370 net/atm/common.c c = ATM_NOT_RSV_VCI; c 372 net/atm/common.c if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) && c 378 net/atm/common.c } while (old_p != p || old_c != c); c 48 net/ax25/ax25_addr.c char c, *s; c 52 net/ax25/ax25_addr.c c = (a->ax25_call[n] >> 1) & 0x7F; c 54 net/ax25/ax25_addr.c if (c != ' ') *s++ = c; c 47 net/batman-adv/log.c char c) c 52 net/batman-adv/log.c *char_addr = c; c 116 net/batman-adv/log.c char c; c 142 net/batman-adv/log.c c = *char_addr; c 148 net/batman-adv/log.c error = __put_user(c, buf); c 142 net/bluetooth/hci_conn.c struct hci_conn *c = NULL; c 150 net/bluetooth/hci_conn.c list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) { c 151 net/bluetooth/hci_conn.c if (c == conn) c 156 net/bluetooth/hci_conn.c if (c == conn) { c 1491 net/bluetooth/hci_conn.c struct hci_conn *c, *n; c 1495 net/bluetooth/hci_conn.c list_for_each_entry_safe(c, n, &h->list, list) { c 1496 net/bluetooth/hci_conn.c c->state = BT_CLOSED; c 1498 net/bluetooth/hci_conn.c hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); c 1499 net/bluetooth/hci_conn.c hci_conn_del(c); c 1543 net/bluetooth/hci_conn.c struct hci_conn *c; c 1570 net/bluetooth/hci_conn.c list_for_each_entry(c, &hdev->conn_hash.list, list) { c 1571 net/bluetooth/hci_conn.c bacpy(&(ci + n)->bdaddr, &c->dst); c 1572 net/bluetooth/hci_conn.c (ci + n)->handle = c->handle; c 1573 net/bluetooth/hci_conn.c (ci + n)->type = c->type; c 1574 net/bluetooth/hci_conn.c (ci + n)->out = c->out; c 1575 net/bluetooth/hci_conn.c (ci + n)->state = c->state; c 1576 net/bluetooth/hci_conn.c (ci + n)->link_mode = get_link_mode(c); c 3821 net/bluetooth/hci_core.c struct hci_conn *conn = NULL, *c; c 3829 net/bluetooth/hci_core.c list_for_each_entry_rcu(c, &h->list, list) { c 3830 net/bluetooth/hci_core.c if (c->type != type || skb_queue_empty(&c->data_q)) c 3833 net/bluetooth/hci_core.c if (c->state != BT_CONNECTED && c->state != BT_CONFIG) c 3838 net/bluetooth/hci_core.c if (c->sent < min) { c 3839 net/bluetooth/hci_core.c min = c->sent; c 3840 net/bluetooth/hci_core.c conn = c; c 3880 net/bluetooth/hci_core.c struct hci_conn *c; c 3887 net/bluetooth/hci_core.c list_for_each_entry_rcu(c, &h->list, list) { c 3888 net/bluetooth/hci_core.c if (c->type == type && c->sent) { c 3890 net/bluetooth/hci_core.c &c->dst); c 3891 net/bluetooth/hci_core.c hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); c 775 net/bluetooth/hci_sock.c struct hci_mgmt_chan *c; c 777 net/bluetooth/hci_sock.c list_for_each_entry(c, &mgmt_chan_list, list) { c 778 net/bluetooth/hci_sock.c if (c->channel == channel) c 779 net/bluetooth/hci_sock.c return c; c 787 net/bluetooth/hci_sock.c struct hci_mgmt_chan *c; c 790 net/bluetooth/hci_sock.c c = __hci_mgmt_chan_find(channel); c 793 net/bluetooth/hci_sock.c return c; c 796 net/bluetooth/hci_sock.c int hci_mgmt_chan_register(struct hci_mgmt_chan *c) c 798 net/bluetooth/hci_sock.c if (c->channel < HCI_CHANNEL_CONTROL) c 802 net/bluetooth/hci_sock.c if (__hci_mgmt_chan_find(c->channel)) { c 807 net/bluetooth/hci_sock.c list_add_tail(&c->list, &mgmt_chan_list); c 815 net/bluetooth/hci_sock.c void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c) c 818 net/bluetooth/hci_sock.c list_del(&c->list); c 91 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 93 net/bluetooth/l2cap_core.c list_for_each_entry(c, &conn->chan_l, list) { c 94 net/bluetooth/l2cap_core.c if (c->dcid == cid) c 95 net/bluetooth/l2cap_core.c return c; c 103 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 105 net/bluetooth/l2cap_core.c list_for_each_entry(c, &conn->chan_l, list) { c 106 net/bluetooth/l2cap_core.c if (c->scid == cid) c 107 net/bluetooth/l2cap_core.c return c; c 117 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 120 net/bluetooth/l2cap_core.c c = __l2cap_get_chan_by_scid(conn, cid); c 121 net/bluetooth/l2cap_core.c if (c) c 122 net/bluetooth/l2cap_core.c l2cap_chan_lock(c); c 125 net/bluetooth/l2cap_core.c return c; c 134 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 137 net/bluetooth/l2cap_core.c c = __l2cap_get_chan_by_dcid(conn, cid); c 138 net/bluetooth/l2cap_core.c if (c) c 139 net/bluetooth/l2cap_core.c l2cap_chan_lock(c); c 142 net/bluetooth/l2cap_core.c return c; c 148 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 150 net/bluetooth/l2cap_core.c list_for_each_entry(c, &conn->chan_l, list) { c 151 net/bluetooth/l2cap_core.c if (c->ident == ident) c 152 net/bluetooth/l2cap_core.c return c; c 160 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 163 net/bluetooth/l2cap_core.c c = __l2cap_get_chan_by_ident(conn, ident); c 164 net/bluetooth/l2cap_core.c if (c) c 165 net/bluetooth/l2cap_core.c l2cap_chan_lock(c); c 168 net/bluetooth/l2cap_core.c return c; c 174 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 176 net/bluetooth/l2cap_core.c list_for_each_entry(c, &chan_list, global_l) { c 177 net/bluetooth/l2cap_core.c if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) c 180 net/bluetooth/l2cap_core.c if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) c 183 net/bluetooth/l2cap_core.c if (c->sport == psm && !bacmp(&c->src, src)) c 184 net/bluetooth/l2cap_core.c return c; c 487 net/bluetooth/l2cap_core.c void l2cap_chan_hold(struct l2cap_chan *c) c 489 net/bluetooth/l2cap_core.c BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref)); c 491 net/bluetooth/l2cap_core.c kref_get(&c->kref); c 494 net/bluetooth/l2cap_core.c void l2cap_chan_put(struct l2cap_chan *c) c 496 net/bluetooth/l2cap_core.c BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref)); c 498 net/bluetooth/l2cap_core.c kref_put(&c->kref, l2cap_chan_destroy); c 1786 net/bluetooth/l2cap_core.c struct l2cap_chan *c, *c1 = NULL; c 1790 net/bluetooth/l2cap_core.c list_for_each_entry(c, &chan_list, global_l) { c 1791 net/bluetooth/l2cap_core.c if (state && c->state != state) c 1794 net/bluetooth/l2cap_core.c if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) c 1797 net/bluetooth/l2cap_core.c if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) c 1800 net/bluetooth/l2cap_core.c if (c->psm == psm) { c 1805 net/bluetooth/l2cap_core.c src_match = !bacmp(&c->src, src); c 1806 net/bluetooth/l2cap_core.c dst_match = !bacmp(&c->dst, dst); c 1808 net/bluetooth/l2cap_core.c l2cap_chan_hold(c); c 1810 net/bluetooth/l2cap_core.c return c; c 1814 net/bluetooth/l2cap_core.c src_any = !bacmp(&c->src, BDADDR_ANY); c 1815 net/bluetooth/l2cap_core.c dst_any = !bacmp(&c->dst, BDADDR_ANY); c 1818 net/bluetooth/l2cap_core.c c1 = c; c 7327 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 7333 net/bluetooth/l2cap_core.c list_for_each_entry(c, &chan_list, global_l) { c 7334 net/bluetooth/l2cap_core.c if (c->state != BT_LISTEN) c 7337 net/bluetooth/l2cap_core.c if (!bacmp(&c->src, &hdev->bdaddr)) { c 7339 net/bluetooth/l2cap_core.c if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) c 7342 net/bluetooth/l2cap_core.c } else if (!bacmp(&c->src, BDADDR_ANY)) { c 7344 net/bluetooth/l2cap_core.c if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) c 7357 net/bluetooth/l2cap_core.c static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, c 7364 net/bluetooth/l2cap_core.c if (c) c 7365 net/bluetooth/l2cap_core.c c = list_next_entry(c, global_l); c 7367 net/bluetooth/l2cap_core.c c = list_entry(chan_list.next, typeof(*c), global_l); c 7369 net/bluetooth/l2cap_core.c list_for_each_entry_from(c, &chan_list, global_l) { c 7370 net/bluetooth/l2cap_core.c if (c->chan_type != L2CAP_CHAN_FIXED) c 7372 net/bluetooth/l2cap_core.c if (c->state != BT_LISTEN) c 7374 net/bluetooth/l2cap_core.c if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) c 7376 net/bluetooth/l2cap_core.c if (src_type != c->src_type) c 7379 net/bluetooth/l2cap_core.c l2cap_chan_hold(c); c 7381 net/bluetooth/l2cap_core.c return c; c 7692 net/bluetooth/l2cap_core.c struct l2cap_chan *c; c 7696 net/bluetooth/l2cap_core.c list_for_each_entry(c, &chan_list, global_l) { c 7698 net/bluetooth/l2cap_core.c &c->src, c->src_type, &c->dst, c->dst_type, c 7699 net/bluetooth/l2cap_core.c c->state, __le16_to_cpu(c->psm), c 7700 net/bluetooth/l2cap_core.c c->scid, c->dcid, c->imtu, c->omtu, c 7701 net/bluetooth/l2cap_core.c c->sec_level, c->mode); c 2590 net/bluetooth/mgmt.c struct hci_conn *c; c 2605 net/bluetooth/mgmt.c list_for_each_entry(c, &hdev->conn_hash.list, list) { c 2606 net/bluetooth/mgmt.c if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) c 2617 net/bluetooth/mgmt.c list_for_each_entry(c, &hdev->conn_hash.list, list) { c 2618 net/bluetooth/mgmt.c if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) c 2620 net/bluetooth/mgmt.c bacpy(&rp->addr[i].bdaddr, &c->dst); c 2621 net/bluetooth/mgmt.c rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type); c 2622 net/bluetooth/mgmt.c if (c->type == SCO_LINK || c->type == ESCO_LINK) c 54 net/bluetooth/sco.c #define sco_conn_lock(c) spin_lock(&c->lock); c 55 net/bluetooth/sco.c #define sco_conn_unlock(c) spin_unlock(&c->lock); c 47 net/bridge/netfilter/ebt_stp.c const struct ebt_stp_config_info *c; c 51 net/bridge/netfilter/ebt_stp.c c = &info->config; c 53 net/bridge/netfilter/ebt_stp.c NF_INVF(info, EBT_STP_FLAGS, c->flags != stpc->flags)) c 58 net/bridge/netfilter/ebt_stp.c v16 < c->root_priol || v16 > c->root_priou)) c 64 net/bridge/netfilter/ebt_stp.c c->root_addr, c 65 net/bridge/netfilter/ebt_stp.c c->root_addrmsk))) c 71 net/bridge/netfilter/ebt_stp.c v32 < c->root_costl || v32 > c->root_costu)) c 77 net/bridge/netfilter/ebt_stp.c v16 < c->sender_priol || v16 > c->sender_priou)) c 83 net/bridge/netfilter/ebt_stp.c c->sender_addr, c 84 net/bridge/netfilter/ebt_stp.c c->sender_addrmsk))) c 90 net/bridge/netfilter/ebt_stp.c v16 < c->portl || v16 > c->portu)) c 96 net/bridge/netfilter/ebt_stp.c v16 < c->msg_agel || v16 > c->msg_ageu)) c 102 net/bridge/netfilter/ebt_stp.c v16 < c->max_agel || v16 > c->max_ageu)) c 108 net/bridge/netfilter/ebt_stp.c v16 < c->hello_timel || v16 > c->hello_timeu)) c 114 net/bridge/netfilter/ebt_stp.c v16 < c->forward_delayl || v16 > c->forward_delayu)) c 39 net/bridge/netfilter/ebtables.c #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ c 905 net/can/gw.c struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]); c 907 net/can/gw.c err = cgw_chk_csum_parms(c->from_idx, c->to_idx, c 908 net/can/gw.c c->result_idx, r); c 918 net/can/gw.c if (c->from_idx < 0 || c->to_idx < 0 || c 919 net/can/gw.c c->result_idx < 0) c 921 net/can/gw.c else if (c->from_idx <= c->to_idx) c 928 net/can/gw.c struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]); c 930 net/can/gw.c err = cgw_chk_csum_parms(c->from_idx, c->to_idx, c 931 net/can/gw.c c->result_idx, r); c 941 net/can/gw.c if (c->from_idx < 0 || c->to_idx < 0 || c 942 net/can/gw.c c->result_idx < 0) c 944 net/can/gw.c else if (c->from_idx <= c->to_idx) c 663 net/can/j1939/socket.c int c; c 677 net/can/j1939/socket.c for (f = filters, c = count; c; f++, c--) { c 15 net/ceph/armor.c static int encode_bits(int c) c 17 net/ceph/armor.c return pem_key[c]; c 20 net/ceph/armor.c static int decode_bits(char c) c 22 net/ceph/armor.c if (c >= 'A' && c <= 'Z') c 23 net/ceph/armor.c return c - 'A'; c 24 net/ceph/armor.c if (c >= 'a' && c <= 'z') c 25 net/ceph/armor.c return c - 'a' + 26; c 26 net/ceph/armor.c if (c >= '0' && c <= '9') c 27 net/ceph/armor.c return c - '0' + 52; c 28 net/ceph/armor.c if (c == '+') c 30 net/ceph/armor.c if (c == '/') c 32 net/ceph/armor.c if (c == '=') c 43 net/ceph/armor.c unsigned char a, b, c; c 51 net/ceph/armor.c c = *src++; c 53 net/ceph/armor.c (c >> 6)); c 54 net/ceph/armor.c *dst++ = encode_bits(c & 63); c 80 net/ceph/armor.c int a, b, c, d; c 90 net/ceph/armor.c c = decode_bits(src[2]); c 92 net/ceph/armor.c if (a < 0 || b < 0 || c < 0 || d < 0) c 98 net/ceph/armor.c *dst++ = ((b & 15) << 4) | (c >> 2); c 101 net/ceph/armor.c *dst++ = ((c & 3) << 6) | d; c 844 net/ceph/auth_x.c __le64 a, b, c, d; c 864 net/ceph/auth_x.c *psig = penc->a ^ penc->b ^ penc->c ^ penc->d; c 367 net/ceph/ceph_common.c int (*parse_extra_token)(char *c, void *private), c 371 net/ceph/ceph_common.c const char *c; c 401 net/ceph/ceph_common.c while ((c = strsep(&options, ",")) != NULL) { c 403 net/ceph/ceph_common.c if (!*c) c 406 net/ceph/ceph_common.c token = match_token((char *)c, opt_tokens, argstr); c 409 net/ceph/ceph_common.c err = parse_extra_token((char *)c, private); c 411 net/ceph/ceph_common.c pr_err("bad option at '%s'\n", c); c 419 net/ceph/ceph_common.c pr_err("bad option arg (not int) at '%s'\n", c); c 10 net/ceph/ceph_hash.c #define mix(a, b, c) \ c 12 net/ceph/ceph_hash.c a = a - b; a = a - c; a = a ^ (c >> 13); \ c 13 net/ceph/ceph_hash.c b = b - c; b = b - a; b = b ^ (a << 8); \ c 14 net/ceph/ceph_hash.c c = c - a; c = c - b; c = c ^ (b >> 13); \ c 15 net/ceph/ceph_hash.c a = a - b; a = a - c; a = a ^ (c >> 12); \ c 16 net/ceph/ceph_hash.c b = b - c; b = b - a; b = b ^ (a << 16); \ c 17 net/ceph/ceph_hash.c c = c - a; c = c - b; c = c ^ (b >> 5); \ c 18 net/ceph/ceph_hash.c a = a - b; a = a - c; a = a ^ (c >> 3); \ c 19 net/ceph/ceph_hash.c b = b - c; b = b - a; b = b ^ (a << 10); \ c 20 net/ceph/ceph_hash.c c = c - a; c = c - b; c = c ^ (b >> 15); \ c 26 net/ceph/ceph_hash.c __u32 a, b, c; /* the internal state */ c 33 net/ceph/ceph_hash.c c = 0; /* variable initialization of internal state */ c 41 net/ceph/ceph_hash.c c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) + c 43 net/ceph/ceph_hash.c mix(a, b, c); c 49 net/ceph/ceph_hash.c c = c + length; c 52 net/ceph/ceph_hash.c c = c + ((__u32)k[10] << 24); c 55 net/ceph/ceph_hash.c c = c + ((__u32)k[9] << 16); c 58 net/ceph/ceph_hash.c c = c + ((__u32)k[8] << 8); c 86 net/ceph/ceph_hash.c mix(a, b, c); c 88 net/ceph/ceph_hash.c return c; c 97 net/ceph/ceph_hash.c unsigned char c; c 100 net/ceph/ceph_hash.c c = *str++; c 101 net/ceph/ceph_hash.c hash = (hash + (c << 4) + (c >> 4)) * 11; c 5 net/ceph/crush/crush.c void clear_choose_args(struct crush_map *c); c 13 net/ceph/crush/hash.c #define crush_hashmix(a, b, c) do { \ c 14 net/ceph/crush/hash.c a = a-b; a = a-c; a = a^(c>>13); \ c 15 net/ceph/crush/hash.c b = b-c; b = b-a; b = b^(a<<8); \ c 16 net/ceph/crush/hash.c c = c-a; c = c-b; c = c^(b>>13); \ c 17 net/ceph/crush/hash.c a = a-b; a = a-c; a = a^(c>>12); \ c 18 net/ceph/crush/hash.c b = b-c; b = b-a; b = b^(a<<16); \ c 19 net/ceph/crush/hash.c c = c-a; c = c-b; c = c^(b>>5); \ c 20 net/ceph/crush/hash.c a = a-b; a = a-c; a = a^(c>>3); \ c 21 net/ceph/crush/hash.c b = b-c; b = b-a; b = b^(a<<10); \ c 22 net/ceph/crush/hash.c c = c-a; c = c-b; c = c^(b>>15); \ c 49 net/ceph/crush/hash.c static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c) c 51 net/ceph/crush/hash.c __u32 hash = crush_hash_seed ^ a ^ b ^ c; c 55 net/ceph/crush/hash.c crush_hashmix(c, x, hash); c 58 net/ceph/crush/hash.c crush_hashmix(y, c, hash); c 62 net/ceph/crush/hash.c static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d) c 64 net/ceph/crush/hash.c __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d; c 68 net/ceph/crush/hash.c crush_hashmix(c, d, hash); c 71 net/ceph/crush/hash.c crush_hashmix(c, x, hash); c 76 net/ceph/crush/hash.c static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d, c 79 net/ceph/crush/hash.c __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e; c 83 net/ceph/crush/hash.c crush_hashmix(c, d, hash); c 87 net/ceph/crush/hash.c crush_hashmix(y, c, hash); c 114 net/ceph/crush/hash.c __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c) c 118 net/ceph/crush/hash.c return crush_hash32_rjenkins1_3(a, b, c); c 124 net/ceph/crush/hash.c __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d) c 128 net/ceph/crush/hash.c return crush_hash32_rjenkins1_4(a, b, c, d); c 134 net/ceph/crush/hash.c __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e) c 138 net/ceph/crush/hash.c return crush_hash32_rjenkins1_5(a, b, c, d, e); c 903 net/ceph/crush/mapper.c int *c = b + result_max; c 1046 net/ceph/crush/mapper.c c+osize, c 1064 net/ceph/crush/mapper.c c+osize, c 1073 net/ceph/crush/mapper.c memcpy(o, c, osize*sizeof(*o)); c 1938 net/ceph/messenger.c int ceph_parse_ips(const char *c, const char *end, c 1943 net/ceph/messenger.c const char *p = c; c 1945 net/ceph/messenger.c dout("parse_ips on '%.*s'\n", (int)(end-c), c); c 2007 net/ceph/messenger.c pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); c 176 net/ceph/osdmap.c void clear_choose_args(struct crush_map *c) c 178 net/ceph/osdmap.c while (!RB_EMPTY_ROOT(&c->choose_args)) { c 180 net/ceph/osdmap.c rb_entry(rb_first(&c->choose_args), c 183 net/ceph/osdmap.c erase_choose_arg_map(&c->choose_args, arg_map); c 261 net/ceph/osdmap.c static int decode_choose_args(void **p, void *end, struct crush_map *c) c 277 net/ceph/osdmap.c arg_map->size = c->max_buckets; c 300 net/ceph/osdmap.c arg->ids_size != c->buckets[bucket_index]->size) c 304 net/ceph/osdmap.c insert_choose_arg_map(&c->choose_args, arg_map); c 316 net/ceph/osdmap.c static void crush_finalize(struct crush_map *c) c 321 net/ceph/osdmap.c c->working_size = sizeof(struct crush_work) + c 322 net/ceph/osdmap.c c->max_buckets * sizeof(struct crush_work_bucket *); c 324 net/ceph/osdmap.c for (b = 0; b < c->max_buckets; b++) { c 325 net/ceph/osdmap.c if (!c->buckets[b]) c 328 net/ceph/osdmap.c switch (c->buckets[b]->alg) { c 334 net/ceph/osdmap.c c->working_size += sizeof(struct crush_work_bucket); c 338 net/ceph/osdmap.c c->working_size += c->buckets[b]->size * sizeof(__u32); c 344 net/ceph/osdmap.c struct crush_map *c; c 353 net/ceph/osdmap.c c = kzalloc(sizeof(*c), GFP_NOFS); c 354 net/ceph/osdmap.c if (c == NULL) c 357 net/ceph/osdmap.c c->choose_args = RB_ROOT; c 360 net/ceph/osdmap.c c->choose_local_tries = 2; c 361 net/ceph/osdmap.c c->choose_local_fallback_tries = 5; c 362 net/ceph/osdmap.c c->choose_total_tries = 19; c 363 net/ceph/osdmap.c c->chooseleaf_descend_once = 0; c 372 net/ceph/osdmap.c c->max_buckets = ceph_decode_32(p); c 373 net/ceph/osdmap.c c->max_rules = ceph_decode_32(p); c 374 net/ceph/osdmap.c c->max_devices = ceph_decode_32(p); c 376 net/ceph/osdmap.c c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); c 377 net/ceph/osdmap.c if (c->buckets == NULL) c 379 net/ceph/osdmap.c c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); c 380 net/ceph/osdmap.c if (c->rules == NULL) c 384 net/ceph/osdmap.c for (i = 0; i < c->max_buckets; i++) { c 391 net/ceph/osdmap.c c->buckets[i] = NULL; c 417 net/ceph/osdmap.c b = c->buckets[i] = kzalloc(size, GFP_NOFS); c 475 net/ceph/osdmap.c dout("rule vec is %p\n", c->rules); c 476 net/ceph/osdmap.c for (i = 0; i < c->max_rules; i++) { c 484 net/ceph/osdmap.c c->rules[i] = NULL; c 499 net/ceph/osdmap.c c->rules[i] = r; c 519 net/ceph/osdmap.c c->choose_local_tries = ceph_decode_32(p); c 520 net/ceph/osdmap.c c->choose_local_fallback_tries = ceph_decode_32(p); c 521 net/ceph/osdmap.c c->choose_total_tries = ceph_decode_32(p); c 523 net/ceph/osdmap.c c->choose_local_tries); c 525 net/ceph/osdmap.c c->choose_local_fallback_tries); c 527 net/ceph/osdmap.c c->choose_total_tries); c 530 net/ceph/osdmap.c c->chooseleaf_descend_once = ceph_decode_32(p); c 532 net/ceph/osdmap.c c->chooseleaf_descend_once); c 535 net/ceph/osdmap.c c->chooseleaf_vary_r = ceph_decode_8(p); c 537 net/ceph/osdmap.c c->chooseleaf_vary_r); c 544 net/ceph/osdmap.c c->chooseleaf_stable = ceph_decode_8(p); c 546 net/ceph/osdmap.c c->chooseleaf_stable); c 558 net/ceph/osdmap.c err = decode_choose_args(p, end, c); c 564 net/ceph/osdmap.c crush_finalize(c); c 566 net/ceph/osdmap.c return c; c 572 net/ceph/osdmap.c crush_destroy(c); c 137 net/ceph/pagelist.c struct ceph_pagelist_cursor *c) c 139 net/ceph/pagelist.c c->pl = pl; c 140 net/ceph/pagelist.c c->page_lru = pl->head.prev; c 141 net/ceph/pagelist.c c->room = pl->room; c 151 net/ceph/pagelist.c struct ceph_pagelist_cursor *c) c 155 net/ceph/pagelist.c if (pl != c->pl) c 158 net/ceph/pagelist.c while (pl->head.prev != c->page_lru) { c 164 net/ceph/pagelist.c pl->room = c->room; c 718 net/core/pktgen.c char c; c 720 net/core/pktgen.c if (get_user(c, &user_buffer[i])) c 722 net/core/pktgen.c value = hex_to_bin(c); c 737 net/core/pktgen.c char c; c 738 net/core/pktgen.c if (get_user(c, &user_buffer[i])) c 740 net/core/pktgen.c switch (c) { c 763 net/core/pktgen.c char c; c 764 net/core/pktgen.c if (get_user(c, &user_buffer[i])) c 766 net/core/pktgen.c if ((c >= '0') && (c <= '9')) { c 768 net/core/pktgen.c *num += c - '0'; c 780 net/core/pktgen.c char c; c 781 net/core/pktgen.c if (get_user(c, &user_buffer[i])) c 783 net/core/pktgen.c switch (c) { c 801 net/core/pktgen.c char c; c 815 net/core/pktgen.c if (get_user(c, &buffer[i])) c 821 net/core/pktgen.c } while (c == ','); c 5212 net/core/rtnetlink.c struct netlink_dump_control c = { c 5217 net/core/rtnetlink.c err = netlink_dump_start(rtnl, skb, nlh, &c); c 86 net/core/utils.c static inline int xdigit2bin(char c, int delim) c 90 net/core/utils.c if (c == delim || c == '\0') c 92 net/core/utils.c if (c == ':') c 94 net/core/utils.c if (c == '.') c 97 net/core/utils.c val = hex_to_bin(c); c 135 net/core/utils.c int c; c 136 net/core/utils.c c = xdigit2bin(srclen > 0 ? *s : '\0', delim); c 137 net/core/utils.c if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) { c 140 net/core/utils.c if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) { c 146 net/core/utils.c if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) { c 153 net/core/utils.c w = (w * 10) + c; c 204 net/core/utils.c int c; c 206 net/core/utils.c c = xdigit2bin(srclen > 0 ? *s : '\0', delim); c 207 net/core/utils.c if (!(c & state)) c 209 net/core/utils.c if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) { c 216 net/core/utils.c if (c & IN6PTON_DELIM) { c 248 net/core/utils.c if (c & IN6PTON_DOT) { c 257 net/core/utils.c w = (w << 4) | (0xff & c); c 1021 net/dccp/feat.c u8 c, s; c 1024 net/dccp/feat.c for (c = 0; c < clen; c++) c 1025 net/dccp/feat.c if (servlist[s] == clilist[c]) c 123 net/dsa/tag_dsa.c __wsum c = skb->csum; c 124 net/dsa/tag_dsa.c c = csum_add(c, csum_partial(new_header + 2, 2, 0)); c 125 net/dsa/tag_dsa.c c = csum_sub(c, csum_partial(dsa_header + 2, 2, 0)); c 126 net/dsa/tag_dsa.c skb->csum = c; c 138 net/dsa/tag_edsa.c __wsum c = skb->csum; c 139 net/dsa/tag_edsa.c c = csum_add(c, csum_partial(new_header + 2, 2, 0)); c 140 net/dsa/tag_edsa.c c = csum_sub(c, csum_partial(edsa_header + 2, 2, 0)); c 141 net/dsa/tag_edsa.c skb->csum = c; c 1310 net/ipv4/arp.c char c, *s; c 1314 net/ipv4/arp.c c = (a->ax25_call[n] >> 1) & 0x7F; c 1316 net/ipv4/arp.c if (c != ' ') c 1317 net/ipv4/arp.c *s++ = c; c 1139 net/ipv4/inet_diag.c struct netlink_dump_control c = { c 1142 net/ipv4/inet_diag.c return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); c 1170 net/ipv4/inet_diag.c struct netlink_dump_control c = { c 1173 net/ipv4/inet_diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); c 896 net/ipv4/ipconfig.c u8 *c; c 899 net/ipv4/ipconfig.c for (c=ext+2; c<ext+2+ext[1]; c++) c 900 net/ipv4/ipconfig.c pr_debug(" %02x", *c); c 360 net/ipv4/ipmr.c struct mfc_cache *c = (struct mfc_cache *)ptr; c 362 net/ipv4/ipmr.c return cmparg->mfc_mcastgrp != c->mfc_mcastgrp || c 363 net/ipv4/ipmr.c cmparg->mfc_origin != c->mfc_origin; c 728 net/ipv4/ipmr.c struct mr_mfc *c = container_of(head, struct mr_mfc, rcu); c 730 net/ipv4/ipmr.c kmem_cache_free(mrt_cachep, (struct mfc_cache *)c); c 733 net/ipv4/ipmr.c static void ipmr_cache_free(struct mfc_cache *c) c 735 net/ipv4/ipmr.c call_rcu(&c->_c.rcu, ipmr_cache_free_rcu); c 741 net/ipv4/ipmr.c static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) c 749 net/ipv4/ipmr.c while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) { c 766 net/ipv4/ipmr.c ipmr_cache_free(c); c 773 net/ipv4/ipmr.c struct mr_mfc *c, *next; c 788 net/ipv4/ipmr.c list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { c 789 net/ipv4/ipmr.c if (time_after(c->mfc_un.unres.expires, now)) { c 790 net/ipv4/ipmr.c unsigned long interval = c->mfc_un.unres.expires - now; c 796 net/ipv4/ipmr.c list_del(&c->list); c 797 net/ipv4/ipmr.c mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE); c 798 net/ipv4/ipmr.c ipmr_destroy_unres(mrt, (struct mfc_cache *)c); c 980 net/ipv4/ipmr.c struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); c 982 net/ipv4/ipmr.c if (c) { c 983 net/ipv4/ipmr.c c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; c 984 net/ipv4/ipmr.c c->_c.mfc_un.res.minvif = MAXVIFS; c 985 net/ipv4/ipmr.c c->_c.free = ipmr_cache_free_rcu; c 986 net/ipv4/ipmr.c refcount_set(&c->_c.mfc_un.res.refcount, 1); c 988 net/ipv4/ipmr.c return c; c 993 net/ipv4/ipmr.c struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); c 995 net/ipv4/ipmr.c if (c) { c 996 net/ipv4/ipmr.c skb_queue_head_init(&c->_c.mfc_un.unres.unresolved); c 997 net/ipv4/ipmr.c c->_c.mfc_un.unres.expires = jiffies + 10 * HZ; c 999 net/ipv4/ipmr.c return c; c 1004 net/ipv4/ipmr.c struct mfc_cache *uc, struct mfc_cache *c) c 1015 net/ipv4/ipmr.c if (mr_fill_mroute(mrt, skb, &c->_c, c 1030 net/ipv4/ipmr.c ip_mr_forward(net, mrt, skb->dev, skb, c, 0); c 1122 net/ipv4/ipmr.c struct mfc_cache *c; c 1127 net/ipv4/ipmr.c list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) { c 1128 net/ipv4/ipmr.c if (c->mfc_mcastgrp == iph->daddr && c 1129 net/ipv4/ipmr.c c->mfc_origin == iph->saddr) { c 1137 net/ipv4/ipmr.c c = ipmr_cache_alloc_unres(); c 1138 net/ipv4/ipmr.c if (!c) { c 1146 net/ipv4/ipmr.c c->_c.mfc_parent = -1; c 1147 net/ipv4/ipmr.c c->mfc_origin = iph->saddr; c 1148 net/ipv4/ipmr.c c->mfc_mcastgrp = iph->daddr; c 1159 net/ipv4/ipmr.c ipmr_cache_free(c); c 1165 net/ipv4/ipmr.c list_add(&c->_c.list, &mrt->mfc_unres_queue); c 1166 net/ipv4/ipmr.c mroute_netlink_event(mrt, c, RTM_NEWROUTE); c 1170 net/ipv4/ipmr.c c->_c.mfc_un.unres.expires); c 1174 net/ipv4/ipmr.c if (c->_c.mfc_un.unres.unresolved.qlen > 3) { c 1182 net/ipv4/ipmr.c skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); c 1195 net/ipv4/ipmr.c struct mfc_cache *c; c 1199 net/ipv4/ipmr.c c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr, c 1202 net/ipv4/ipmr.c if (!c) c 1204 net/ipv4/ipmr.c rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params); c 1205 net/ipv4/ipmr.c list_del_rcu(&c->_c.list); c 1206 net/ipv4/ipmr.c call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id); c 1207 net/ipv4/ipmr.c mroute_netlink_event(mrt, c, RTM_DELROUTE); c 1208 net/ipv4/ipmr.c mr_cache_put(&c->_c); c 1216 net/ipv4/ipmr.c struct mfc_cache *uc, *c; c 1226 net/ipv4/ipmr.c c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr, c 1229 net/ipv4/ipmr.c if (c) { c 1231 net/ipv4/ipmr.c c->_c.mfc_parent = mfc->mfcc_parent; c 1232 net/ipv4/ipmr.c ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls); c 1234 net/ipv4/ipmr.c c->_c.mfc_flags |= MFC_STATIC; c 1236 net/ipv4/ipmr.c call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c, c 1238 net/ipv4/ipmr.c mroute_netlink_event(mrt, c, RTM_NEWROUTE); c 1246 net/ipv4/ipmr.c c = ipmr_cache_alloc(); c 1247 net/ipv4/ipmr.c if (!c) c 1250 net/ipv4/ipmr.c c->mfc_origin = mfc->mfcc_origin.s_addr; c 1251 net/ipv4/ipmr.c c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; c 1252 net/ipv4/ipmr.c c->_c.mfc_parent = mfc->mfcc_parent; c 1253 net/ipv4/ipmr.c ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls); c 1255 net/ipv4/ipmr.c c->_c.mfc_flags |= MFC_STATIC; c 1257 net/ipv4/ipmr.c ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode, c 1261 net/ipv4/ipmr.c ipmr_cache_free(c); c 1264 net/ipv4/ipmr.c list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list); c 1272 net/ipv4/ipmr.c if (uc->mfc_origin == c->mfc_origin && c 1273 net/ipv4/ipmr.c uc->mfc_mcastgrp == c->mfc_mcastgrp) { c 1285 net/ipv4/ipmr.c ipmr_cache_resolve(net, mrt, uc, c); c 1288 net/ipv4/ipmr.c call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id); c 1289 net/ipv4/ipmr.c mroute_netlink_event(mrt, c, RTM_NEWROUTE); c 1297 net/ipv4/ipmr.c struct mr_mfc *c, *tmp; c 1316 net/ipv4/ipmr.c list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) { c 1317 net/ipv4/ipmr.c if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) || c 1318 net/ipv4/ipmr.c (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC))) c 1320 net/ipv4/ipmr.c rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); c 1321 net/ipv4/ipmr.c list_del_rcu(&c->list); c 1322 net/ipv4/ipmr.c cache = (struct mfc_cache *)c; c 1326 net/ipv4/ipmr.c mr_cache_put(c); c 1333 net/ipv4/ipmr.c list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { c 1334 net/ipv4/ipmr.c list_del(&c->list); c 1335 net/ipv4/ipmr.c cache = (struct mfc_cache *)c; c 1615 net/ipv4/ipmr.c struct mfc_cache *c; c 1650 net/ipv4/ipmr.c c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); c 1651 net/ipv4/ipmr.c if (c) { c 1652 net/ipv4/ipmr.c sr.pktcnt = c->_c.mfc_un.res.pkt; c 1653 net/ipv4/ipmr.c sr.bytecnt = c->_c.mfc_un.res.bytes; c 1654 net/ipv4/ipmr.c sr.wrong_if = c->_c.mfc_un.res.wrong_if; c 1690 net/ipv4/ipmr.c struct mfc_cache *c; c 1725 net/ipv4/ipmr.c c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); c 1726 net/ipv4/ipmr.c if (c) { c 1727 net/ipv4/ipmr.c sr.pktcnt = c->_c.mfc_un.res.pkt; c 1728 net/ipv4/ipmr.c sr.bytecnt = c->_c.mfc_un.res.bytes; c 1729 net/ipv4/ipmr.c sr.wrong_if = c->_c.mfc_un.res.wrong_if; c 1952 net/ipv4/ipmr.c struct mfc_cache *c, int local) c 1958 net/ipv4/ipmr.c vif = c->_c.mfc_parent; c 1959 net/ipv4/ipmr.c c->_c.mfc_un.res.pkt++; c 1960 net/ipv4/ipmr.c c->_c.mfc_un.res.bytes += skb->len; c 1961 net/ipv4/ipmr.c c->_c.mfc_un.res.lastuse = jiffies; c 1963 net/ipv4/ipmr.c if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) { c 1992 net/ipv4/ipmr.c c->_c.mfc_un.res.wrong_if++; c 2001 net/ipv4/ipmr.c c->_c.mfc_un.res.ttls[true_vifi] < 255) && c 2003 net/ipv4/ipmr.c c->_c.mfc_un.res.last_assert + c 2005 net/ipv4/ipmr.c c->_c.mfc_un.res.last_assert = jiffies; c 2019 net/ipv4/ipmr.c if (c->mfc_origin == htonl(INADDR_ANY) && c 2020 net/ipv4/ipmr.c c->mfc_mcastgrp == htonl(INADDR_ANY)) { c 2022 net/ipv4/ipmr.c true_vifi != c->_c.mfc_parent && c 2024 net/ipv4/ipmr.c c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) { c 2029 net/ipv4/ipmr.c psend = c->_c.mfc_parent; c 2034 net/ipv4/ipmr.c for (ct = c->_c.mfc_un.res.maxvif - 1; c 2035 net/ipv4/ipmr.c ct >= c->_c.mfc_un.res.minvif; ct--) { c 2037 net/ipv4/ipmr.c if ((c->mfc_origin != htonl(INADDR_ANY) || c 2039 net/ipv4/ipmr.c ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) { c 2322 net/ipv4/ipmr.c u32 portid, u32 seq, struct mfc_cache *c, int cmd, c 2343 net/ipv4/ipmr.c if (c->_c.mfc_flags & MFC_STATIC) c 2349 net/ipv4/ipmr.c if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) || c 2350 net/ipv4/ipmr.c nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp)) c 2352 net/ipv4/ipmr.c err = mr_fill_mroute(mrt, skb, &c->_c, rtm); c 2366 net/ipv4/ipmr.c u32 portid, u32 seq, struct mr_mfc *c, int cmd, c 2369 net/ipv4/ipmr.c return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c, c 67 net/ipv4/ipmr_base.c struct mr_mfc *c; c 70 net/ipv4/ipmr_base.c rhl_for_each_entry_rcu(c, tmp, list, mnode) c 71 net/ipv4/ipmr_base.c if (parent == -1 || parent == c->mfc_parent) c 72 net/ipv4/ipmr_base.c return c; c 81 net/ipv4/ipmr_base.c struct mr_mfc *c; c 85 net/ipv4/ipmr_base.c rhl_for_each_entry_rcu(c, tmp, list, mnode) c 86 net/ipv4/ipmr_base.c if (c->mfc_un.res.ttls[vifi] < 255) c 87 net/ipv4/ipmr_base.c return c; c 96 net/ipv4/ipmr_base.c struct mr_mfc *c, *proxy; c 99 net/ipv4/ipmr_base.c rhl_for_each_entry_rcu(c, tmp, list, mnode) { c 100 net/ipv4/ipmr_base.c if (c->mfc_un.res.ttls[vifi] < 255) c 101 net/ipv4/ipmr_base.c return c; c 104 net/ipv4/ipmr_base.c proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent); c 106 net/ipv4/ipmr_base.c return c; c 178 net/ipv4/ipmr_base.c struct mr_mfc *c = v; c 185 net/ipv4/ipmr_base.c if (c->list.next != it->cache) c 186 net/ipv4/ipmr_base.c return list_entry(c->list.next, struct mr_mfc, list); c 209 net/ipv4/ipmr_base.c struct mr_mfc *c, struct rtmsg *rtm) c 218 net/ipv4/ipmr_base.c if (c->mfc_parent >= MAXVIFS) { c 223 net/ipv4/ipmr_base.c if (VIF_EXISTS(mrt, c->mfc_parent) && c 225 net/ipv4/ipmr_base.c mrt->vif_table[c->mfc_parent].dev->ifindex) < 0) c 228 net/ipv4/ipmr_base.c if (c->mfc_flags & MFC_OFFLOAD) c 235 net/ipv4/ipmr_base.c for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { c 236 net/ipv4/ipmr_base.c if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { c 246 net/ipv4/ipmr_base.c nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; c 255 net/ipv4/ipmr_base.c lastuse = READ_ONCE(c->mfc_un.res.lastuse); c 258 net/ipv4/ipmr_base.c mfcs.mfcs_packets = c->mfc_un.res.pkt; c 259 net/ipv4/ipmr_base.c mfcs.mfcs_bytes = c->mfc_un.res.bytes; c 260 net/ipv4/ipmr_base.c mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if; c 272 net/ipv4/ipmr_base.c const struct mr_mfc *c, c 277 net/ipv4/ipmr_base.c for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { c 278 net/ipv4/ipmr_base.c if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { c 292 net/ipv4/ipmr_base.c u32 portid, u32 seq, struct mr_mfc *c, c 349 net/ipv4/ipmr_base.c u32 portid, u32 seq, struct mr_mfc *c, c 83 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_get(struct clusterip_config *c) c 85 net/ipv4/netfilter/ipt_CLUSTERIP.c refcount_inc(&c->refcount); c 103 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_put(struct clusterip_config *c) c 105 net/ipv4/netfilter/ipt_CLUSTERIP.c if (refcount_dec_and_test(&c->refcount)) c 106 net/ipv4/netfilter/ipt_CLUSTERIP.c call_rcu(&c->rcu, clusterip_config_rcu_free); c 113 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_entry_put(struct clusterip_config *c) c 115 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_net *cn = clusterip_pernet(c->net); c 118 net/ipv4/netfilter/ipt_CLUSTERIP.c if (refcount_dec_and_lock(&c->entries, &cn->lock)) { c 119 net/ipv4/netfilter/ipt_CLUSTERIP.c list_del_rcu(&c->list); c 128 net/ipv4/netfilter/ipt_CLUSTERIP.c proc_remove(c->pde); c 139 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c; c 142 net/ipv4/netfilter/ipt_CLUSTERIP.c list_for_each_entry_rcu(c, &cn->configs, list) { c 143 net/ipv4/netfilter/ipt_CLUSTERIP.c if (c->clusterip == clusterip) c 144 net/ipv4/netfilter/ipt_CLUSTERIP.c return c; c 153 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c; c 156 net/ipv4/netfilter/ipt_CLUSTERIP.c c = __clusterip_config_find(net, clusterip); c 157 net/ipv4/netfilter/ipt_CLUSTERIP.c if (c) { c 159 net/ipv4/netfilter/ipt_CLUSTERIP.c if (!c->pde) c 160 net/ipv4/netfilter/ipt_CLUSTERIP.c c = NULL; c 163 net/ipv4/netfilter/ipt_CLUSTERIP.c if (unlikely(!refcount_inc_not_zero(&c->refcount))) c 164 net/ipv4/netfilter/ipt_CLUSTERIP.c c = NULL; c 166 net/ipv4/netfilter/ipt_CLUSTERIP.c if (unlikely(!refcount_inc_not_zero(&c->entries))) { c 167 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_put(c); c 168 net/ipv4/netfilter/ipt_CLUSTERIP.c c = NULL; c 174 net/ipv4/netfilter/ipt_CLUSTERIP.c return c; c 178 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_init_nodelist(struct clusterip_config *c, c 184 net/ipv4/netfilter/ipt_CLUSTERIP.c set_bit(i->local_nodes[n] - 1, &c->local_nodes); c 194 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c; c 197 net/ipv4/netfilter/ipt_CLUSTERIP.c list_for_each_entry_rcu(c, &cn->configs, list) { c 200 net/ipv4/netfilter/ipt_CLUSTERIP.c if (!strcmp(dev->name, c->ifname)) { c 201 net/ipv4/netfilter/ipt_CLUSTERIP.c c->ifindex = dev->ifindex; c 202 net/ipv4/netfilter/ipt_CLUSTERIP.c dev_mc_add(dev, c->clustermac); c 206 net/ipv4/netfilter/ipt_CLUSTERIP.c if (dev->ifindex == c->ifindex) { c 207 net/ipv4/netfilter/ipt_CLUSTERIP.c dev_mc_del(dev, c->clustermac); c 208 net/ipv4/netfilter/ipt_CLUSTERIP.c c->ifindex = -1; c 212 net/ipv4/netfilter/ipt_CLUSTERIP.c if (!strcmp(dev->name, c->ifname)) { c 213 net/ipv4/netfilter/ipt_CLUSTERIP.c c->ifindex = dev->ifindex; c 214 net/ipv4/netfilter/ipt_CLUSTERIP.c dev_mc_add(dev, c->clustermac); c 215 net/ipv4/netfilter/ipt_CLUSTERIP.c } else if (dev->ifindex == c->ifindex) { c 216 net/ipv4/netfilter/ipt_CLUSTERIP.c dev_mc_del(dev, c->clustermac); c 217 net/ipv4/netfilter/ipt_CLUSTERIP.c c->ifindex = -1; c 232 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c; c 241 net/ipv4/netfilter/ipt_CLUSTERIP.c c = kzalloc(sizeof(*c), GFP_ATOMIC); c 242 net/ipv4/netfilter/ipt_CLUSTERIP.c if (!c) c 248 net/ipv4/netfilter/ipt_CLUSTERIP.c kfree(c); c 251 net/ipv4/netfilter/ipt_CLUSTERIP.c c->ifindex = dev->ifindex; c 252 net/ipv4/netfilter/ipt_CLUSTERIP.c strcpy(c->ifname, dev->name); c 253 net/ipv4/netfilter/ipt_CLUSTERIP.c memcpy(&c->clustermac, &i->clustermac, ETH_ALEN); c 254 net/ipv4/netfilter/ipt_CLUSTERIP.c dev_mc_add(dev, c->clustermac); c 257 net/ipv4/netfilter/ipt_CLUSTERIP.c c->clusterip = ip; c 258 net/ipv4/netfilter/ipt_CLUSTERIP.c c->num_total_nodes = i->num_total_nodes; c 259 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_init_nodelist(c, i); c 260 net/ipv4/netfilter/ipt_CLUSTERIP.c c->hash_mode = i->hash_mode; c 261 net/ipv4/netfilter/ipt_CLUSTERIP.c c->hash_initval = i->hash_initval; c 262 net/ipv4/netfilter/ipt_CLUSTERIP.c c->net = net; c 263 net/ipv4/netfilter/ipt_CLUSTERIP.c refcount_set(&c->refcount, 1); c 271 net/ipv4/netfilter/ipt_CLUSTERIP.c list_add_rcu(&c->list, &cn->configs); c 281 net/ipv4/netfilter/ipt_CLUSTERIP.c c->pde = proc_create_data(buffer, 0600, c 283 net/ipv4/netfilter/ipt_CLUSTERIP.c &clusterip_proc_fops, c); c 285 net/ipv4/netfilter/ipt_CLUSTERIP.c if (!c->pde) { c 292 net/ipv4/netfilter/ipt_CLUSTERIP.c refcount_set(&c->entries, 1); c 293 net/ipv4/netfilter/ipt_CLUSTERIP.c return c; c 299 net/ipv4/netfilter/ipt_CLUSTERIP.c list_del_rcu(&c->list); c 302 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_put(c); c 308 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum) c 312 net/ipv4/netfilter/ipt_CLUSTERIP.c nodenum > c->num_total_nodes) c 316 net/ipv4/netfilter/ipt_CLUSTERIP.c if (test_and_set_bit(nodenum - 1, &c->local_nodes)) c 323 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) c 326 net/ipv4/netfilter/ipt_CLUSTERIP.c nodenum > c->num_total_nodes) c 329 net/ipv4/netfilter/ipt_CLUSTERIP.c if (test_and_clear_bit(nodenum - 1, &c->local_nodes)) c 611 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c; c 629 net/ipv4/netfilter/ipt_CLUSTERIP.c c = clusterip_config_find_get(net, payload->src_ip, 0); c 630 net/ipv4/netfilter/ipt_CLUSTERIP.c if (!c) c 637 net/ipv4/netfilter/ipt_CLUSTERIP.c if (c->ifindex != state->out->ifindex) { c 639 net/ipv4/netfilter/ipt_CLUSTERIP.c c->ifindex, state->out->ifindex); c 640 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_put(c); c 645 net/ipv4/netfilter/ipt_CLUSTERIP.c memcpy(payload->src_hw, c->clustermac, arp->ar_hln); c 652 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_put(c); c 679 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c = s->private; c 685 net/ipv4/netfilter/ipt_CLUSTERIP.c local_nodes = c->local_nodes; c 751 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c = PDE_DATA(inode); c 753 net/ipv4/netfilter/ipt_CLUSTERIP.c sf->private = c; c 755 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_get(c); c 763 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c = PDE_DATA(inode); c 769 net/ipv4/netfilter/ipt_CLUSTERIP.c clusterip_config_put(c); c 777 net/ipv4/netfilter/ipt_CLUSTERIP.c struct clusterip_config *c = PDE_DATA(file_inode(file)); c 793 net/ipv4/netfilter/ipt_CLUSTERIP.c if (clusterip_add_node(c, nodenum)) c 799 net/ipv4/netfilter/ipt_CLUSTERIP.c if (clusterip_del_node(c, nodenum)) c 83 net/ipv4/raw.c char c[1]; c 457 net/ipv4/raw.c err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen); c 476 net/ipv4/raw.c memcpy(to, rfv->hdr.c + offset, copy); c 480 net/ipv4/raw.c csum_partial_copy_nocheck(rfv->hdr.c + offset, c 49 net/ipv4/syncookies.c u32 count, int c) c 54 net/ipv4/syncookies.c count, &syncookie_secret[c]); c 1468 net/ipv4/tcp.c char c = tp->urg_data; c 1478 net/ipv4/tcp.c err = memcpy_to_msg(msg, &c, 1); c 5519 net/ipv6/addrconf.c int i, c; c 5528 net/ipv6/addrconf.c for_each_possible_cpu(c) { c 5530 net/ipv6/addrconf.c buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff); c 2061 net/ipv6/ip6_fib.c struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w); c 2063 net/ipv6/ip6_fib.c .nl_net = c->net, c 2064 net/ipv6/ip6_fib.c .skip_notify = c->skip_notify, c 2067 net/ipv6/ip6_fib.c if (c->sernum != FIB6_NO_SERNUM_CHANGE && c 2068 net/ipv6/ip6_fib.c w->node->fn_sernum != c->sernum) c 2069 net/ipv6/ip6_fib.c w->node->fn_sernum = c->sernum; c 2071 net/ipv6/ip6_fib.c if (!c->func) { c 2072 net/ipv6/ip6_fib.c WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); c 2078 net/ipv6/ip6_fib.c res = c->func(rt, c->arg); c 2118 net/ipv6/ip6_fib.c struct fib6_cleaner c; c 2120 net/ipv6/ip6_fib.c c.w.root = root; c 2121 net/ipv6/ip6_fib.c c.w.func = fib6_clean_node; c 2122 net/ipv6/ip6_fib.c c.w.count = 0; c 2123 net/ipv6/ip6_fib.c c.w.skip = 0; c 2124 net/ipv6/ip6_fib.c c.w.skip_in_node = 0; c 2125 net/ipv6/ip6_fib.c c.func = func; c 2126 net/ipv6/ip6_fib.c c.sernum = sernum; c 2127 net/ipv6/ip6_fib.c c.arg = arg; c 2128 net/ipv6/ip6_fib.c c.net = net; c 2129 net/ipv6/ip6_fib.c c.skip_notify = skip_notify; c 2131 net/ipv6/ip6_fib.c fib6_walk(net, &c.w); c 342 net/ipv6/ip6mr.c struct mfc6_cache *c = (struct mfc6_cache *)ptr; c 344 net/ipv6/ip6mr.c return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) || c 345 net/ipv6/ip6mr.c !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin); c 752 net/ipv6/ip6mr.c struct mr_mfc *c = container_of(head, struct mr_mfc, rcu); c 754 net/ipv6/ip6mr.c kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c); c 757 net/ipv6/ip6mr.c static inline void ip6mr_cache_free(struct mfc6_cache *c) c 759 net/ipv6/ip6mr.c call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu); c 766 net/ipv6/ip6mr.c static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c) c 773 net/ipv6/ip6mr.c while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) { c 786 net/ipv6/ip6mr.c ip6mr_cache_free(c); c 796 net/ipv6/ip6mr.c struct mr_mfc *c, *next; c 798 net/ipv6/ip6mr.c list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { c 799 net/ipv6/ip6mr.c if (time_after(c->mfc_un.unres.expires, now)) { c 801 net/ipv6/ip6mr.c unsigned long interval = c->mfc_un.unres.expires - now; c 807 net/ipv6/ip6mr.c list_del(&c->list); c 808 net/ipv6/ip6mr.c mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); c 809 net/ipv6/ip6mr.c ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); c 976 net/ipv6/ip6mr.c struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); c 977 net/ipv6/ip6mr.c if (!c) c 979 net/ipv6/ip6mr.c c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; c 980 net/ipv6/ip6mr.c c->_c.mfc_un.res.minvif = MAXMIFS; c 981 net/ipv6/ip6mr.c c->_c.free = ip6mr_cache_free_rcu; c 982 net/ipv6/ip6mr.c refcount_set(&c->_c.mfc_un.res.refcount, 1); c 983 net/ipv6/ip6mr.c return c; c 988 net/ipv6/ip6mr.c struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); c 989 net/ipv6/ip6mr.c if (!c) c 991 net/ipv6/ip6mr.c skb_queue_head_init(&c->_c.mfc_un.unres.unresolved); c 992 net/ipv6/ip6mr.c c->_c.mfc_un.unres.expires = jiffies + 10 * HZ; c 993 net/ipv6/ip6mr.c return c; c 1001 net/ipv6/ip6mr.c struct mfc6_cache *uc, struct mfc6_cache *c) c 1014 net/ipv6/ip6mr.c if (mr_fill_mroute(mrt, skb, &c->_c, c 1025 net/ipv6/ip6mr.c ip6_mr_forward(net, mrt, skb->dev, skb, c); c 1133 net/ipv6/ip6mr.c struct mfc6_cache *c; c 1138 net/ipv6/ip6mr.c list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) { c 1139 net/ipv6/ip6mr.c if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) && c 1140 net/ipv6/ip6mr.c ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) { c 1151 net/ipv6/ip6mr.c c = ip6mr_cache_alloc_unres(); c 1152 net/ipv6/ip6mr.c if (!c) { c 1160 net/ipv6/ip6mr.c c->_c.mfc_parent = -1; c 1161 net/ipv6/ip6mr.c c->mf6c_origin = ipv6_hdr(skb)->saddr; c 1162 net/ipv6/ip6mr.c c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr; c 1174 net/ipv6/ip6mr.c ip6mr_cache_free(c); c 1180 net/ipv6/ip6mr.c list_add(&c->_c.list, &mrt->mfc_unres_queue); c 1181 net/ipv6/ip6mr.c mr6_netlink_event(mrt, c, RTM_NEWROUTE); c 1187 net/ipv6/ip6mr.c if (c->_c.mfc_un.unres.unresolved.qlen > 3) { c 1195 net/ipv6/ip6mr.c skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); c 1210 net/ipv6/ip6mr.c struct mfc6_cache *c; c 1214 net/ipv6/ip6mr.c c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr, c 1217 net/ipv6/ip6mr.c if (!c) c 1219 net/ipv6/ip6mr.c rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params); c 1220 net/ipv6/ip6mr.c list_del_rcu(&c->_c.list); c 1223 net/ipv6/ip6mr.c FIB_EVENT_ENTRY_DEL, c, mrt->id); c 1224 net/ipv6/ip6mr.c mr6_netlink_event(mrt, c, RTM_DELROUTE); c 1225 net/ipv6/ip6mr.c mr_cache_put(&c->_c); c 1405 net/ipv6/ip6mr.c struct mfc6_cache *uc, *c; c 1421 net/ipv6/ip6mr.c c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr, c 1424 net/ipv6/ip6mr.c if (c) { c 1426 net/ipv6/ip6mr.c c->_c.mfc_parent = mfc->mf6cc_parent; c 1427 net/ipv6/ip6mr.c ip6mr_update_thresholds(mrt, &c->_c, ttls); c 1429 net/ipv6/ip6mr.c c->_c.mfc_flags |= MFC_STATIC; c 1432 net/ipv6/ip6mr.c c, mrt->id); c 1433 net/ipv6/ip6mr.c mr6_netlink_event(mrt, c, RTM_NEWROUTE); c 1441 net/ipv6/ip6mr.c c = ip6mr_cache_alloc(); c 1442 net/ipv6/ip6mr.c if (!c) c 1445 net/ipv6/ip6mr.c c->mf6c_origin = mfc->mf6cc_origin.sin6_addr; c 1446 net/ipv6/ip6mr.c c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr; c 1447 net/ipv6/ip6mr.c c->_c.mfc_parent = mfc->mf6cc_parent; c 1448 net/ipv6/ip6mr.c ip6mr_update_thresholds(mrt, &c->_c, ttls); c 1450 net/ipv6/ip6mr.c c->_c.mfc_flags |= MFC_STATIC; c 1452 net/ipv6/ip6mr.c err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode, c 1456 net/ipv6/ip6mr.c ip6mr_cache_free(c); c 1459 net/ipv6/ip6mr.c list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list); c 1468 net/ipv6/ip6mr.c if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) && c 1469 net/ipv6/ip6mr.c ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) { c 1481 net/ipv6/ip6mr.c ip6mr_cache_resolve(net, mrt, uc, c); c 1485 net/ipv6/ip6mr.c c, mrt->id); c 1486 net/ipv6/ip6mr.c mr6_netlink_event(mrt, c, RTM_NEWROUTE); c 1496 net/ipv6/ip6mr.c struct mr_mfc *c, *tmp; c 1514 net/ipv6/ip6mr.c list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) { c 1515 net/ipv6/ip6mr.c if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) || c 1516 net/ipv6/ip6mr.c (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC))) c 1518 net/ipv6/ip6mr.c rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); c 1519 net/ipv6/ip6mr.c list_del_rcu(&c->list); c 1522 net/ipv6/ip6mr.c (struct mfc6_cache *)c, mrt->id); c 1523 net/ipv6/ip6mr.c mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); c 1524 net/ipv6/ip6mr.c mr_cache_put(c); c 1531 net/ipv6/ip6mr.c list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { c 1532 net/ipv6/ip6mr.c list_del(&c->list); c 1533 net/ipv6/ip6mr.c mr6_netlink_event(mrt, (struct mfc6_cache *)c, c 1535 net/ipv6/ip6mr.c ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); c 1854 net/ipv6/ip6mr.c struct mfc6_cache *c; c 1889 net/ipv6/ip6mr.c c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); c 1890 net/ipv6/ip6mr.c if (c) { c 1891 net/ipv6/ip6mr.c sr.pktcnt = c->_c.mfc_un.res.pkt; c 1892 net/ipv6/ip6mr.c sr.bytecnt = c->_c.mfc_un.res.bytes; c 1893 net/ipv6/ip6mr.c sr.wrong_if = c->_c.mfc_un.res.wrong_if; c 1929 net/ipv6/ip6mr.c struct mfc6_cache *c; c 1964 net/ipv6/ip6mr.c c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); c 1965 net/ipv6/ip6mr.c if (c) { c 1966 net/ipv6/ip6mr.c sr.pktcnt = c->_c.mfc_un.res.pkt; c 1967 net/ipv6/ip6mr.c sr.bytecnt = c->_c.mfc_un.res.bytes; c 1968 net/ipv6/ip6mr.c sr.wrong_if = c->_c.mfc_un.res.wrong_if; c 2083 net/ipv6/ip6mr.c struct mfc6_cache *c) c 2089 net/ipv6/ip6mr.c vif = c->_c.mfc_parent; c 2090 net/ipv6/ip6mr.c c->_c.mfc_un.res.pkt++; c 2091 net/ipv6/ip6mr.c c->_c.mfc_un.res.bytes += skb->len; c 2092 net/ipv6/ip6mr.c c->_c.mfc_un.res.lastuse = jiffies; c 2094 net/ipv6/ip6mr.c if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) { c 2114 net/ipv6/ip6mr.c c->_c.mfc_un.res.wrong_if++; c 2123 net/ipv6/ip6mr.c c->_c.mfc_un.res.ttls[true_vifi] < 255) && c 2125 net/ipv6/ip6mr.c c->_c.mfc_un.res.last_assert + c 2127 net/ipv6/ip6mr.c c->_c.mfc_un.res.last_assert = jiffies; c 2140 net/ipv6/ip6mr.c if (ipv6_addr_any(&c->mf6c_origin) && c 2141 net/ipv6/ip6mr.c ipv6_addr_any(&c->mf6c_mcastgrp)) { c 2143 net/ipv6/ip6mr.c true_vifi != c->_c.mfc_parent && c 2145 net/ipv6/ip6mr.c c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) { c 2150 net/ipv6/ip6mr.c psend = c->_c.mfc_parent; c 2155 net/ipv6/ip6mr.c for (ct = c->_c.mfc_un.res.maxvif - 1; c 2156 net/ipv6/ip6mr.c ct >= c->_c.mfc_un.res.minvif; ct--) { c 2158 net/ipv6/ip6mr.c if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) && c 2159 net/ipv6/ip6mr.c ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) { c 2321 net/ipv6/ip6mr.c u32 portid, u32 seq, struct mfc6_cache *c, int cmd, c 2342 net/ipv6/ip6mr.c if (c->_c.mfc_flags & MFC_STATIC) c 2348 net/ipv6/ip6mr.c if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) || c 2349 net/ipv6/ip6mr.c nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp)) c 2351 net/ipv6/ip6mr.c err = mr_fill_mroute(mrt, skb, &c->_c, rtm); c 2365 net/ipv6/ip6mr.c u32 portid, u32 seq, struct mr_mfc *c, c 2368 net/ipv6/ip6mr.c return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c, c 29 net/ipv6/proc.c #define MAX4(a, b, c, d) \ c 30 net/ipv6/proc.c max_t(u32, max_t(u32, a, b), max_t(u32, c, d)) c 711 net/ipv6/raw.c char c[4]; c 720 net/ipv6/raw.c err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen); c 722 net/ipv6/raw.c fl6->fl6_icmp_type = rfv->c[0]; c 723 net/ipv6/raw.c fl6->fl6_icmp_code = rfv->c[1]; c 728 net/ipv6/raw.c err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen); c 730 net/ipv6/raw.c fl6->fl6_mh_type = rfv->c[2]; c 744 net/ipv6/raw.c memcpy(to, rfv->c + offset, copy); c 748 net/ipv6/raw.c csum_partial_copy_nocheck(rfv->c + offset, c 299 net/ipv6/sit.c unsigned int cmax, c = 0, ca, len; c 332 net/ipv6/sit.c c = 0; c 334 net/ipv6/sit.c if (c >= cmax) c 338 net/ipv6/sit.c kp[c].addr = prl->addr; c 339 net/ipv6/sit.c kp[c].flags = prl->flags; c 340 net/ipv6/sit.c c++; c 347 net/ipv6/sit.c len = sizeof(*kp) * c; c 42 net/ipv6/syncookies.c __be16 sport, __be16 dport, u32 count, int c) c 60 net/ipv6/syncookies.c &syncookie6_secret[c]); c 1477 net/key/af_key.c static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) c 1489 net/key/af_key.c hdr->sadb_msg_type = event2keytype(c->event); c 1493 net/key/af_key.c hdr->sadb_msg_seq = c->seq; c 1494 net/key/af_key.c hdr->sadb_msg_pid = c->portid; c 1506 net/key/af_key.c struct km_event c; c 1527 net/key/af_key.c c.event = XFRM_MSG_NEWSA; c 1529 net/key/af_key.c c.event = XFRM_MSG_UPDSA; c 1530 net/key/af_key.c c.seq = hdr->sadb_msg_seq; c 1531 net/key/af_key.c c.portid = hdr->sadb_msg_pid; c 1532 net/key/af_key.c km_state_notify(x, &c); c 1542 net/key/af_key.c struct km_event c; c 1567 net/key/af_key.c c.seq = hdr->sadb_msg_seq; c 1568 net/key/af_key.c c.portid = hdr->sadb_msg_pid; c 1569 net/key/af_key.c c.event = XFRM_MSG_DELSA; c 1570 net/key/af_key.c km_state_notify(x, &c); c 1736 net/key/af_key.c static int key_notify_sa_flush(const struct km_event *c) c 1745 net/key/af_key.c hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); c 1747 net/key/af_key.c hdr->sadb_msg_seq = c->seq; c 1748 net/key/af_key.c hdr->sadb_msg_pid = c->portid; c 1754 net/key/af_key.c pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); c 1763 net/key/af_key.c struct km_event c; c 1778 net/key/af_key.c c.data.proto = proto; c 1779 net/key/af_key.c c.seq = hdr->sadb_msg_seq; c 1780 net/key/af_key.c c.portid = hdr->sadb_msg_pid; c 1781 net/key/af_key.c c.event = XFRM_MSG_FLUSHSA; c 1782 net/key/af_key.c c.net = net; c 1783 net/key/af_key.c km_state_notify(NULL, &c); c 2205 net/key/af_key.c static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) c 2224 net/key/af_key.c if (c->data.byid && c->event == XFRM_MSG_DELPOLICY) c 2227 net/key/af_key.c out_hdr->sadb_msg_type = event2poltype(c->event); c 2229 net/key/af_key.c out_hdr->sadb_msg_seq = c->seq; c 2230 net/key/af_key.c out_hdr->sadb_msg_pid = c->portid; c 2244 net/key/af_key.c struct km_event c; c 2334 net/key/af_key.c c.event = XFRM_MSG_UPDPOLICY; c 2336 net/key/af_key.c c.event = XFRM_MSG_NEWPOLICY; c 2338 net/key/af_key.c c.seq = hdr->sadb_msg_seq; c 2339 net/key/af_key.c c.portid = hdr->sadb_msg_pid; c 2341 net/key/af_key.c km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); c 2359 net/key/af_key.c struct km_event c; c 2415 net/key/af_key.c c.seq = hdr->sadb_msg_seq; c 2416 net/key/af_key.c c.portid = hdr->sadb_msg_pid; c 2417 net/key/af_key.c c.data.byid = 0; c 2418 net/key/af_key.c c.event = XFRM_MSG_DELPOLICY; c 2419 net/key/af_key.c km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); c 2644 net/key/af_key.c struct km_event c; c 2664 net/key/af_key.c c.seq = hdr->sadb_msg_seq; c 2665 net/key/af_key.c c.portid = hdr->sadb_msg_pid; c 2666 net/key/af_key.c c.data.byid = 1; c 2667 net/key/af_key.c c.event = XFRM_MSG_DELPOLICY; c 2668 net/key/af_key.c km_policy_notify(xp, dir, &c); c 2747 net/key/af_key.c static int key_notify_policy_flush(const struct km_event *c) c 2757 net/key/af_key.c hdr->sadb_msg_seq = c->seq; c 2758 net/key/af_key.c hdr->sadb_msg_pid = c->portid; c 2764 net/key/af_key.c pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); c 2772 net/key/af_key.c struct km_event c; c 2783 net/key/af_key.c c.data.type = XFRM_POLICY_TYPE_MAIN; c 2784 net/key/af_key.c c.event = XFRM_MSG_FLUSHPOLICY; c 2785 net/key/af_key.c c.portid = hdr->sadb_msg_pid; c 2786 net/key/af_key.c c.seq = hdr->sadb_msg_seq; c 2787 net/key/af_key.c c.net = net; c 2788 net/key/af_key.c km_policy_notify(NULL, 0, &c); c 2954 net/key/af_key.c struct sadb_comb *c; c 2955 net/key/af_key.c c = skb_put_zero(skb, sizeof(struct sadb_comb)); c 2957 net/key/af_key.c c->sadb_comb_auth = aalg->desc.sadb_alg_id; c 2958 net/key/af_key.c c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits; c 2959 net/key/af_key.c c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits; c 2960 net/key/af_key.c c->sadb_comb_hard_addtime = 24*60*60; c 2961 net/key/af_key.c c->sadb_comb_soft_addtime = 20*60*60; c 2962 net/key/af_key.c c->sadb_comb_hard_usetime = 8*60*60; c 2963 net/key/af_key.c c->sadb_comb_soft_usetime = 7*60*60; c 2991 net/key/af_key.c struct sadb_comb *c; c 2999 net/key/af_key.c c = skb_put(skb, sizeof(struct sadb_comb)); c 3000 net/key/af_key.c memset(c, 0, sizeof(*c)); c 3002 net/key/af_key.c c->sadb_comb_auth = aalg->desc.sadb_alg_id; c 3003 net/key/af_key.c c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits; c 3004 net/key/af_key.c c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits; c 3005 net/key/af_key.c c->sadb_comb_encrypt = ealg->desc.sadb_alg_id; c 3006 net/key/af_key.c c->sadb_comb_encrypt_minbits = ealg->desc.sadb_alg_minbits; c 3007 net/key/af_key.c c->sadb_comb_encrypt_maxbits = ealg->desc.sadb_alg_maxbits; c 3008 net/key/af_key.c c->sadb_comb_hard_addtime = 24*60*60; c 3009 net/key/af_key.c c->sadb_comb_soft_addtime = 20*60*60; c 3010 net/key/af_key.c c->sadb_comb_hard_usetime = 8*60*60; c 3011 net/key/af_key.c c->sadb_comb_soft_usetime = 7*60*60; c 3016 net/key/af_key.c static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c) c 3021 net/key/af_key.c static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) c 3028 net/key/af_key.c hard = c->data.hard; c 3052 net/key/af_key.c static int pfkey_send_notify(struct xfrm_state *x, const struct km_event *c) c 3054 net/key/af_key.c struct net *net = x ? xs_net(x) : c->net; c 3060 net/key/af_key.c switch (c->event) { c 3062 net/key/af_key.c return key_notify_sa_expire(x, c); c 3066 net/key/af_key.c return key_notify_sa(x, c); c 3068 net/key/af_key.c return key_notify_sa_flush(c); c 3072 net/key/af_key.c pr_err("pfkey: Unknown SA event %d\n", c->event); c 3079 net/key/af_key.c static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) c 3084 net/key/af_key.c switch (c->event) { c 3086 net/key/af_key.c return key_notify_policy_expire(xp, c); c 3090 net/key/af_key.c return key_notify_policy(xp, dir, c); c 3092 net/key/af_key.c if (c->data.type != XFRM_POLICY_TYPE_MAIN) c 3094 net/key/af_key.c return key_notify_policy_flush(c); c 3096 net/key/af_key.c pr_err("pfkey: Unknown policy event %d\n", c->event); c 3114 net/key/af_key.c static bool pfkey_is_alive(const struct km_event *c) c 3116 net/key/af_key.c struct netns_pfkey *net_pfkey = net_generic(c->net, pfkey_net_id); c 1419 net/llc/llc_c_ac.c u8 llc_circular_between(u8 a, u8 b, u8 c) c 1422 net/llc/llc_c_ac.c c = c - a; c 1423 net/llc/llc_c_ac.c return b <= c; c 75 net/mac80211/debugfs_key.c u32 c = key->conf.cipher; c 78 net/mac80211/debugfs_key.c c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff); c 1344 net/mac80211/ieee80211_i.h #define I802_DEBUG_INC(c) (c)++ c 1346 net/mac80211/ieee80211_i.h #define I802_DEBUG_INC(c) do { } while (0) c 2171 net/mac80211/ieee80211_i.h u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c); c 331 net/mac80211/main.c int c = 0; c 357 net/mac80211/main.c if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN) c 358 net/mac80211/main.c bss_conf->arp_addr_list[c] = ifa->ifa_address; c 360 net/mac80211/main.c c++; c 363 net/mac80211/main.c bss_conf->arp_addr_cnt = c; c 1007 net/mac80211/main.c const struct ieee80211_iface_combination *c; c 1010 net/mac80211/main.c c = &hw->wiphy->iface_combinations[i]; c 1012 net/mac80211/main.c for (j = 0; j < c->n_limits; j++) c 1013 net/mac80211/main.c if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && c 1014 net/mac80211/main.c c->limits[j].max > 1) c 43 net/mac80211/trace.h #define CHANDEF_ASSIGN(c) \ c 44 net/mac80211/trace.h __entry->control_freq = (c) ? ((c)->chan ? (c)->chan->center_freq : 0) : 0; \ c 45 net/mac80211/trace.h __entry->chan_width = (c) ? (c)->width : 0; \ c 46 net/mac80211/trace.h __entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \ c 47 net/mac80211/trace.h __entry->center_freq2 = (c) ? (c)->center_freq2 : 0; c 58 net/mac80211/trace.h #define MIN_CHANDEF_ASSIGN(c) \ c 59 net/mac80211/trace.h __entry->min_control_freq = (c)->chan ? (c)->chan->center_freq : 0; \ c 60 net/mac80211/trace.h __entry->min_chan_width = (c)->width; \ c 61 net/mac80211/trace.h __entry->min_center_freq1 = (c)->center_freq1; \ c 62 net/mac80211/trace.h __entry->min_center_freq2 = (c)->center_freq2; c 3383 net/mac80211/util.c u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c) c 3388 net/mac80211/util.c switch (c->width) { c 3390 net/mac80211/util.c c->width = NL80211_CHAN_WIDTH_20_NOHT; c 3394 net/mac80211/util.c c->width = NL80211_CHAN_WIDTH_20; c 3395 net/mac80211/util.c c->center_freq1 = c->chan->center_freq; c 3400 net/mac80211/util.c tmp = (30 + c->chan->center_freq - c->center_freq1)/20; c 3404 net/mac80211/util.c c->center_freq1 = c->center_freq1 - 20 + 40 * tmp; c 3405 net/mac80211/util.c c->width = NL80211_CHAN_WIDTH_40; c 3409 net/mac80211/util.c c->center_freq2 = 0; c 3410 net/mac80211/util.c c->width = NL80211_CHAN_WIDTH_80; c 3416 net/mac80211/util.c tmp = (70 + c->chan->center_freq - c->center_freq1)/20; c 3419 net/mac80211/util.c c->center_freq1 = c->center_freq1 - 40 + 80 * tmp; c 3420 net/mac80211/util.c c->width = NL80211_CHAN_WIDTH_80; c 3427 net/mac80211/util.c c->width = NL80211_CHAN_WIDTH_20_NOHT; c 3438 net/mac80211/util.c WARN_ON_ONCE(!cfg80211_chandef_valid(c)); c 3927 net/mac80211/util.c ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c, c 3933 net/mac80211/util.c c->num_different_channels); c 89 net/mac80211/vht.c u8 m, n, c; c 93 net/mac80211/vht.c c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c 95 net/mac80211/vht.c if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || c 103 net/mac80211/vht.c c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c 105 net/mac80211/vht.c if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || c 182 net/ncsi/internal.h #define NCSI_PACKAGE_INDEX(c) (((c) >> NCSI_PACKAGE_SHIFT) & 0x7) c 184 net/ncsi/internal.h #define NCSI_CHANNEL_INDEX(c) ((c) & ((1 << NCSI_PACKAGE_SHIFT) - 1)) c 185 net/ncsi/internal.h #define NCSI_TO_CHANNEL(p, c) (((p) << NCSI_PACKAGE_SHIFT) | (c)) c 330 net/ncsi/ncsi-manage.c struct ncsi_channel *c; c 333 net/ncsi/ncsi-manage.c c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL; c 338 net/ncsi/ncsi-manage.c *nc = c; c 1510 net/netfilter/ipset/ip_set_core.c struct netlink_dump_control c = { c 1515 net/netfilter/ipset/ip_set_core.c return netlink_dump_start(ctnl, skb, nlh, &c); c 186 net/netfilter/ipset/ip_set_hash_gen.h #define CIDR_POS(c) ((c) - 1) c 190 net/netfilter/ipset/ip_set_hash_gen.h #define CIDR_POS(c) ((c) - 2) c 142 net/netfilter/ipset/pfxlen.c #define E(a, b, c, d) \ c 145 net/netfilter/ipset/pfxlen.c htonl(c), htonl(d), \ c 157 net/netfilter/ipset/pfxlen.c #define E(a, b, c, d) \ c 159 net/netfilter/ipset/pfxlen.c (__force __be32)c, (__force __be32)d, \ c 804 net/netfilter/ipvs/ip_vs_ctl.c #define IP_VS_SHOW_STATS_COUNTER(c) dst->c = src->kstats.c - src->kstats0.c c 841 net/netfilter/ipvs/ip_vs_ctl.c #define IP_VS_ZERO_STATS_COUNTER(c) stats->kstats0.c = stats->kstats.c c 3421 net/netfilter/ipvs/ip_vs_ctl.c struct ipvs_sync_daemon_cfg *c) c 3430 net/netfilter/ipvs/ip_vs_ctl.c nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, c->mcast_ifn) || c 3431 net/netfilter/ipvs/ip_vs_ctl.c nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, c->syncid) || c 3432 net/netfilter/ipvs/ip_vs_ctl.c nla_put_u16(skb, IPVS_DAEMON_ATTR_SYNC_MAXLEN, c->sync_maxlen) || c 3433 net/netfilter/ipvs/ip_vs_ctl.c nla_put_u16(skb, IPVS_DAEMON_ATTR_MCAST_PORT, c->mcast_port) || c 3434 net/netfilter/ipvs/ip_vs_ctl.c nla_put_u8(skb, IPVS_DAEMON_ATTR_MCAST_TTL, c->mcast_ttl)) c 3437 net/netfilter/ipvs/ip_vs_ctl.c if (c->mcast_af == AF_INET6) { c 3439 net/netfilter/ipvs/ip_vs_ctl.c &c->mcast_group.in6)) c 3443 net/netfilter/ipvs/ip_vs_ctl.c if (c->mcast_af == AF_INET && c 3445 net/netfilter/ipvs/ip_vs_ctl.c c->mcast_group.ip)) c 3457 net/netfilter/ipvs/ip_vs_ctl.c struct ipvs_sync_daemon_cfg *c, c 3467 net/netfilter/ipvs/ip_vs_ctl.c if (ip_vs_genl_fill_daemon(skb, state, c)) c 3509 net/netfilter/ipvs/ip_vs_ctl.c struct ipvs_sync_daemon_cfg c; c 3513 net/netfilter/ipvs/ip_vs_ctl.c memset(&c, 0, sizeof(c)); c 3518 net/netfilter/ipvs/ip_vs_ctl.c strlcpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]), c 3519 net/netfilter/ipvs/ip_vs_ctl.c sizeof(c.mcast_ifn)); c 3520 net/netfilter/ipvs/ip_vs_ctl.c c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]); c 3524 net/netfilter/ipvs/ip_vs_ctl.c c.sync_maxlen = nla_get_u16(a); c 3528 net/netfilter/ipvs/ip_vs_ctl.c c.mcast_af = AF_INET; c 3529 net/netfilter/ipvs/ip_vs_ctl.c c.mcast_group.ip = nla_get_in_addr(a); c 3530 net/netfilter/ipvs/ip_vs_ctl.c if (!ipv4_is_multicast(c.mcast_group.ip)) c 3538 net/netfilter/ipvs/ip_vs_ctl.c c.mcast_af = AF_INET6; c 3539 net/netfilter/ipvs/ip_vs_ctl.c c.mcast_group.in6 = nla_get_in6_addr(a); c 3540 net/netfilter/ipvs/ip_vs_ctl.c addr_type = ipv6_addr_type(&c.mcast_group.in6); c 3551 net/netfilter/ipvs/ip_vs_ctl.c c.mcast_port = nla_get_u16(a); c 3555 net/netfilter/ipvs/ip_vs_ctl.c c.mcast_ttl = nla_get_u8(a); c 3563 net/netfilter/ipvs/ip_vs_ctl.c ret = start_sync_thread(ipvs, &c, c 102 net/netfilter/ipvs/ip_vs_ftp.c char *s, c; c 145 net/netfilter/ipvs/ip_vs_ftp.c c = *data; c 146 net/netfilter/ipvs/ip_vs_ftp.c if (isdigit(c)) { c 147 net/netfilter/ipvs/ip_vs_ftp.c p[i] = p[i]*10 + c - '0'; c 148 net/netfilter/ipvs/ip_vs_ftp.c } else if (c == ',' && i < 5) { c 162 net/netfilter/ipvs/ip_vs_mh.c int n, c, dt_count; c 198 net/netfilter/ipvs/ip_vs_mh.c c = ds->perm; c 199 net/netfilter/ipvs/ip_vs_mh.c while (test_bit(c, table)) { c 204 net/netfilter/ipvs/ip_vs_mh.c c = ds->perm; c 207 net/netfilter/ipvs/ip_vs_mh.c __set_bit(c, table); c 209 net/netfilter/ipvs/ip_vs_mh.c dest = rcu_dereference_protected(s->lookup[c].dest, 1); c 215 net/netfilter/ipvs/ip_vs_mh.c RCU_INIT_POINTER(s->lookup[c].dest, new_dest); c 1451 net/netfilter/ipvs/ip_vs_sync.c struct ipvs_sync_daemon_cfg *c, int id) c 1453 net/netfilter/ipvs/ip_vs_sync.c if (AF_INET6 == c->mcast_af) { c 1456 net/netfilter/ipvs/ip_vs_sync.c .sin6_port = htons(c->mcast_port + id), c 1458 net/netfilter/ipvs/ip_vs_sync.c sa->in6.sin6_addr = c->mcast_group.in6; c 1463 net/netfilter/ipvs/ip_vs_sync.c .sin_port = htons(c->mcast_port + id), c 1465 net/netfilter/ipvs/ip_vs_sync.c sa->in.sin_addr = c->mcast_group.in; c 1749 net/netfilter/ipvs/ip_vs_sync.c int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, c 1787 net/netfilter/ipvs/ip_vs_sync.c if (c->mcast_af == AF_UNSPEC) { c 1788 net/netfilter/ipvs/ip_vs_sync.c c->mcast_af = AF_INET; c 1789 net/netfilter/ipvs/ip_vs_sync.c c->mcast_group.ip = cpu_to_be32(IP_VS_SYNC_GROUP); c 1791 net/netfilter/ipvs/ip_vs_sync.c if (!c->mcast_port) c 1792 net/netfilter/ipvs/ip_vs_sync.c c->mcast_port = IP_VS_SYNC_PORT; c 1793 net/netfilter/ipvs/ip_vs_sync.c if (!c->mcast_ttl) c 1794 net/netfilter/ipvs/ip_vs_sync.c c->mcast_ttl = 1; c 1796 net/netfilter/ipvs/ip_vs_sync.c dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); c 1798 net/netfilter/ipvs/ip_vs_sync.c pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); c 1802 net/netfilter/ipvs/ip_vs_sync.c hlen = (AF_INET6 == c->mcast_af) ? c 1809 net/netfilter/ipvs/ip_vs_sync.c if (c->sync_maxlen) c 1810 net/netfilter/ipvs/ip_vs_sync.c c->sync_maxlen = clamp_t(unsigned int, c 1811 net/netfilter/ipvs/ip_vs_sync.c c->sync_maxlen, min_mtu, c 1814 net/netfilter/ipvs/ip_vs_sync.c c->sync_maxlen = mtu - hlen; c 1821 net/netfilter/ipvs/ip_vs_sync.c ipvs->mcfg = *c; c 1829 net/netfilter/ipvs/ip_vs_sync.c ipvs->bcfg = *c; c 465 net/netfilter/nf_conntrack_core.c unsigned long a, b, c, d; c 471 net/netfilter/nf_conntrack_core.c c = (unsigned long)nf_ct_net(ct); c 476 net/netfilter/nf_conntrack_core.c return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); c 478 net/netfilter/nf_conntrack_core.c return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed); c 1328 net/netfilter/nf_conntrack_netlink.c struct netlink_dump_control c = { c 1335 net/netfilter/nf_conntrack_netlink.c return netlink_dump_start(ctnl, skb, nlh, &c); c 1474 net/netfilter/nf_conntrack_netlink.c struct netlink_dump_control c = { c 1478 net/netfilter/nf_conntrack_netlink.c return netlink_dump_start(ctnl, skb, nlh, &c); c 1497 net/netfilter/nf_conntrack_netlink.c struct netlink_dump_control c = { c 1501 net/netfilter/nf_conntrack_netlink.c return netlink_dump_start(ctnl, skb, nlh, &c); c 2273 net/netfilter/nf_conntrack_netlink.c struct netlink_dump_control c = { c 2276 net/netfilter/nf_conntrack_netlink.c return netlink_dump_start(ctnl, skb, nlh, &c); c 2711 net/netfilter/nf_conntrack_netlink.c unsigned long a, b, c, d; c 2717 net/netfilter/nf_conntrack_netlink.c c = (unsigned long)exp->master; c 2721 net/netfilter/nf_conntrack_netlink.c return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed); c 2723 net/netfilter/nf_conntrack_netlink.c return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed); c 2998 net/netfilter/nf_conntrack_netlink.c struct netlink_dump_control c = { c 3023 net/netfilter/nf_conntrack_netlink.c c.data = ct; c 3025 net/netfilter/nf_conntrack_netlink.c err = netlink_dump_start(ctnl, skb, nlh, &c); c 3049 net/netfilter/nf_conntrack_netlink.c struct netlink_dump_control c = { c 3053 net/netfilter/nf_conntrack_netlink.c return netlink_dump_start(ctnl, skb, nlh, &c); c 3495 net/netfilter/nf_conntrack_netlink.c struct netlink_dump_control c = { c 3498 net/netfilter/nf_conntrack_netlink.c return netlink_dump_start(ctnl, skb, nlh, &c); c 89 net/netfilter/nf_conntrack_sip.c static int iswordc(const char c) c 91 net/netfilter/nf_conntrack_sip.c if (isalnum(c) || c == '!' || c == '"' || c == '%' || c 92 net/netfilter/nf_conntrack_sip.c (c >= '(' && c <= '+') || c == ':' || c == '<' || c == '>' || c 93 net/netfilter/nf_conntrack_sip.c c == '?' || (c >= '[' && c <= ']') || c == '_' || c == '`' || c 94 net/netfilter/nf_conntrack_sip.c c == '{' || c == '}' || c == '~' || (c >= '-' && c <= '/') || c 95 net/netfilter/nf_conntrack_sip.c c == '\'') c 511 net/netfilter/nf_conntrack_sip.c const char *c, *limit = dptr + datalen; c 521 net/netfilter/nf_conntrack_sip.c if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true)) c 523 net/netfilter/nf_conntrack_sip.c if (*c == ':') { c 524 net/netfilter/nf_conntrack_sip.c c++; c 525 net/netfilter/nf_conntrack_sip.c p = simple_strtoul(c, (char **)&c, 10); c 533 net/netfilter/nf_conntrack_sip.c *dataoff = c - dptr; c 696 net/netfilter/nf_tables_api.c struct netlink_dump_control *c) c 704 net/netfilter/nf_tables_api.c err = netlink_dump_start(nlsk, skb, nlh, c); c 725 net/netfilter/nf_tables_api.c struct netlink_dump_control c = { c 730 net/netfilter/nf_tables_api.c return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); c 1418 net/netfilter/nf_tables_api.c struct netlink_dump_control c = { c 1423 net/netfilter/nf_tables_api.c return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); c 2588 net/netfilter/nf_tables_api.c struct netlink_dump_control c = { c 2596 net/netfilter/nf_tables_api.c return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); c 3501 net/netfilter/nf_tables_api.c struct netlink_dump_control c = { c 3509 net/netfilter/nf_tables_api.c return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); c 4349 net/netfilter/nf_tables_api.c struct netlink_dump_control c = { c 4360 net/netfilter/nf_tables_api.c c.data = &dump_ctx; c 4361 net/netfilter/nf_tables_api.c return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); c 5462 net/netfilter/nf_tables_api.c struct netlink_dump_control c = { c 5470 net/netfilter/nf_tables_api.c return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); c 6143 net/netfilter/nf_tables_api.c struct netlink_dump_control c = { c 6151 net/netfilter/nf_tables_api.c return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); c 277 net/netfilter/nfnetlink_acct.c struct netlink_dump_control c = { c 284 net/netfilter/nfnetlink_acct.c return netlink_dump_start(nfnl, skb, nlh, &c); c 632 net/netfilter/nfnetlink_cthelper.c struct netlink_dump_control c = { c 635 net/netfilter/nfnetlink_cthelper.c return netlink_dump_start(nfnl, skb, nlh, &c); c 252 net/netfilter/nfnetlink_cttimeout.c struct netlink_dump_control c = { c 255 net/netfilter/nfnetlink_cttimeout.c return netlink_dump_start(ctnl, skb, nlh, &c); c 46 net/netfilter/nft_ct.c static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c, c 51 net/netfilter/nft_ct.c return k == NFT_CT_BYTES ? atomic64_read(&c[d].bytes) : c 52 net/netfilter/nft_ct.c atomic64_read(&c[d].packets); c 54 net/netfilter/nft_ct.c return nft_ct_get_eval_counter(c, k, IP_CT_DIR_ORIGINAL) + c 55 net/netfilter/nft_ct.c nft_ct_get_eval_counter(c, k, IP_CT_DIR_REPLY); c 557 net/netfilter/xt_recent.c const char *c = buf; c 572 net/netfilter/xt_recent.c switch (*c) { c 589 net/netfilter/xt_recent.c ++c; c 591 net/netfilter/xt_recent.c if (strnchr(c, size, ':') != NULL) { c 593 net/netfilter/xt_recent.c succ = in6_pton(c, size, (void *)&addr, '\n', NULL); c 596 net/netfilter/xt_recent.c succ = in4_pton(c, size, (void *)&addr, '\n', NULL); c 233 net/netlink/diag.c struct netlink_dump_control c = { c 237 net/netlink/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); c 562 net/netlink/genetlink.c struct netlink_dump_control c = { c 572 net/netlink/genetlink.c rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); c 576 net/netlink/genetlink.c struct netlink_dump_control c = { c 583 net/netlink/genetlink.c rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c); c 239 net/packet/diag.c struct netlink_dump_control c = { c 242 net/packet/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); c 97 net/rfkill/input.c bool c; c 125 net/rfkill/input.c c = __test_and_clear_bit(i, rfkill_sw_state); c 128 net/rfkill/input.c __rfkill_handle_normal_op(i, c); c 820 net/sched/act_api.c struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); c 821 net/sched/act_api.c if (!c) c 824 net/sched/act_api.c c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); c 825 net/sched/act_api.c if (!c->data) { c 826 net/sched/act_api.c kfree(c); c 829 net/sched/act_api.c c->len = nla_len(tb[TCA_ACT_COOKIE]); c 831 net/sched/act_api.c return c; c 39 net/sched/act_connmark.c struct nf_conn *c; c 60 net/sched/act_connmark.c c = nf_ct_get(skb, &ctinfo); c 61 net/sched/act_connmark.c if (c) { c 62 net/sched/act_connmark.c skb->mark = c->mark; c 79 net/sched/act_connmark.c c = nf_ct_tuplehash_to_ctrack(thash); c 82 net/sched/act_connmark.c skb->mark = c->mark; c 83 net/sched/act_connmark.c nf_ct_put(c); c 385 net/sched/act_ct.c struct tcf_ct *c = to_ct(a); c 393 net/sched/act_ct.c p = rcu_dereference_bh(c->params); c 395 net/sched/act_ct.c retval = READ_ONCE(c->tcf_action); c 678 net/sched/act_ct.c struct tcf_ct *c; c 722 net/sched/act_ct.c c = to_ct(*a); c 734 net/sched/act_ct.c spin_lock_bh(&c->tcf_lock); c 736 net/sched/act_ct.c rcu_swap_protected(c->params, params, lockdep_is_held(&c->tcf_lock)); c 737 net/sched/act_ct.c spin_unlock_bh(&c->tcf_lock); c 759 net/sched/act_ct.c struct tcf_ct *c = to_ct(a); c 761 net/sched/act_ct.c params = rcu_dereference_protected(c->params, 1); c 833 net/sched/act_ct.c struct tcf_ct *c = to_ct(a); c 837 net/sched/act_ct.c .index = c->tcf_index, c 838 net/sched/act_ct.c .refcnt = refcount_read(&c->tcf_refcnt) - ref, c 839 net/sched/act_ct.c .bindcnt = atomic_read(&c->tcf_bindcnt) - bind, c 843 net/sched/act_ct.c spin_lock_bh(&c->tcf_lock); c 844 net/sched/act_ct.c p = rcu_dereference_protected(c->params, c 845 net/sched/act_ct.c lockdep_is_held(&c->tcf_lock)); c 846 net/sched/act_ct.c opt.action = c->tcf_action; c 885 net/sched/act_ct.c tcf_tm_dump(&t, &c->tcf_tm); c 888 net/sched/act_ct.c spin_unlock_bh(&c->tcf_lock); c 892 net/sched/act_ct.c spin_unlock_bh(&c->tcf_lock); c 917 net/sched/act_ct.c struct tcf_ct *c = to_ct(a); c 924 net/sched/act_ct.c c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); c 282 net/sched/act_ipt.c struct tc_cnt c; c 294 net/sched/act_ipt.c c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind; c 295 net/sched/act_ipt.c c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref; c 301 net/sched/act_ipt.c nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || c 2165 net/sched/sch_cake.c u32 c; c 2167 net/sched/sch_cake.c for (c = 0; c < CAKE_MAX_TINS; c++) c 2168 net/sched/sch_cake.c cake_clear_tin(sch, c); c 2474 net/sched/sch_cake.c int c, ft; c 2499 net/sched/sch_cake.c for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { c 2500 net/sched/sch_cake.c cake_clear_tin(sch, c); c 2501 net/sched/sch_cake.c q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; c 933 net/sched/sch_cbq.c struct cbq_class *c; c 935 net/sched/sch_cbq.c hlist_for_each_entry(c, &q->clhash.hash[h], c 937 net/sched/sch_cbq.c if (c->split == split && c->level < level && c 938 net/sched/sch_cbq.c c->defmap & (1<<i)) { c 939 net/sched/sch_cbq.c split->defaults[i] = c; c 940 net/sched/sch_cbq.c level = c->level; c 280 net/sched/sch_htb.c struct htb_class *c; c 282 net/sched/sch_htb.c c = rb_entry(parent, struct htb_class, node[prio]); c 284 net/sched/sch_htb.c if (cl->common.classid > c->common.classid) c 314 net/sched/sch_htb.c struct htb_class *c; c 316 net/sched/sch_htb.c c = rb_entry(parent, struct htb_class, pq_node); c 317 net/sched/sch_htb.c if (cl->pq_key >= c->pq_key) c 808 net/sched/sch_netem.c const struct tc_netem_slot *c = nla_data(attr); c 810 net/sched/sch_netem.c q->slot_config = *c; c 826 net/sched/sch_netem.c const struct tc_netem_corr *c = nla_data(attr); c 828 net/sched/sch_netem.c init_crandom(&q->delay_cor, c->delay_corr); c 829 net/sched/sch_netem.c init_crandom(&q->loss_cor, c->loss_corr); c 830 net/sched/sch_netem.c init_crandom(&q->dup_cor, c->dup_corr); c 145 net/sctp/associola.c asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; c 146 net/sctp/associola.c asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; c 172 net/sctp/associola.c asoc->c.my_vtag = sctp_generate_tag(ep); c 173 net/sctp/associola.c asoc->c.my_port = ep->base.bind_addr.port; c 175 net/sctp/associola.c asoc->c.initial_tsn = sctp_generate_tsn(ep); c 177 net/sctp/associola.c asoc->next_tsn = asoc->c.initial_tsn; c 194 net/sctp/associola.c asoc->addip_serial = asoc->c.initial_tsn; c 195 net/sctp/associola.c asoc->strreset_outseq = asoc->c.initial_tsn; c 227 net/sctp/associola.c if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, c 260 net/sctp/associola.c memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, c 263 net/sctp/associola.c memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, c 267 net/sctp/associola.c p = (struct sctp_paramhdr *)asoc->c.auth_random; c 1107 net/sctp/associola.c asoc->c = new->c; c 226 net/sctp/auth.c (struct sctp_random_param *)asoc->c.auth_random, c 227 net/sctp/auth.c (struct sctp_chunks_param *)asoc->c.auth_chunks, c 228 net/sctp/auth.c (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs, gfp); c 592 net/sctp/auth.c hmacs = (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs; c 694 net/sctp/auth.c (struct sctp_chunks_param *)asoc->c.auth_chunks); c 525 net/sctp/input.c ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) c 528 net/sctp/input.c } else if (vtag != asoc->c.peer_vtag) { c 913 net/sctp/outqueue.c ctx->packet->vtag = ctx->asoc->c.my_vtag; c 235 net/sctp/sm_make_chunk.c init.init_tag = htonl(asoc->c.my_vtag); c 237 net/sctp/sm_make_chunk.c init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); c 238 net/sctp/sm_make_chunk.c init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); c 239 net/sctp/sm_make_chunk.c init.initial_tsn = htonl(asoc->c.initial_tsn); c 283 net/sctp/sm_make_chunk.c chunksize += sizeof(asoc->c.auth_random); c 286 net/sctp/sm_make_chunk.c auth_hmacs = (struct sctp_paramhdr *)asoc->c.auth_hmacs; c 293 net/sctp/sm_make_chunk.c auth_chunks = (struct sctp_paramhdr *)asoc->c.auth_chunks; c 365 net/sctp/sm_make_chunk.c sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), c 366 net/sctp/sm_make_chunk.c asoc->c.auth_random); c 402 net/sctp/sm_make_chunk.c initack.init_tag = htonl(asoc->c.my_vtag); c 404 net/sctp/sm_make_chunk.c initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); c 405 net/sctp/sm_make_chunk.c initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); c 406 net/sctp/sm_make_chunk.c initack.initial_tsn = htonl(asoc->c.initial_tsn); c 449 net/sctp/sm_make_chunk.c auth_random = (struct sctp_paramhdr *)asoc->c.auth_random; c 452 net/sctp/sm_make_chunk.c auth_hmacs = (struct sctp_paramhdr *)asoc->c.auth_hmacs; c 458 net/sctp/sm_make_chunk.c auth_chunks = (struct sctp_paramhdr *)asoc->c.auth_chunks; c 1599 net/sctp/sm_make_chunk.c SCTP_INPUT_CB(skb)->af->from_skb(&asoc->c.peer_addr, skb, 1); c 1650 net/sctp/sm_make_chunk.c cookie->c = asoc->c; c 1652 net/sctp/sm_make_chunk.c cookie->c.raw_addr_list_len = addrs_len; c 1655 net/sctp/sm_make_chunk.c cookie->c.prsctp_capable = asoc->peer.prsctp_capable; c 1658 net/sctp/sm_make_chunk.c cookie->c.adaptation_ind = asoc->peer.adaptation_ind; c 1661 net/sctp/sm_make_chunk.c cookie->c.expiration = ktime_add(asoc->cookie_life, c 1665 net/sctp/sm_make_chunk.c memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, c 1669 net/sctp/sm_make_chunk.c memcpy((__u8 *)&cookie->c.peer_init[0] + c 1681 net/sctp/sm_make_chunk.c crypto_shash_digest(desc, (u8 *)&cookie->c, bodysize, c 1737 net/sctp/sm_make_chunk.c bear_cookie = &cookie->c; c 1833 net/sctp/sm_make_chunk.c memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); c 1848 net/sctp/sm_make_chunk.c retval->next_tsn = retval->c.initial_tsn; c 1850 net/sctp/sm_make_chunk.c retval->addip_serial = retval->c.initial_tsn; c 1851 net/sctp/sm_make_chunk.c retval->strreset_outseq = retval->c.initial_tsn; c 1853 net/sctp/sm_make_chunk.c retval->peer.prsctp_capable = retval->c.prsctp_capable; c 1854 net/sctp/sm_make_chunk.c retval->peer.adaptation_ind = retval->c.adaptation_ind; c 2406 net/sctp/sm_make_chunk.c if (asoc->c.sinit_num_ostreams > c 2408 net/sctp/sm_make_chunk.c asoc->c.sinit_num_ostreams = c 2412 net/sctp/sm_make_chunk.c if (asoc->c.sinit_max_instreams > c 2414 net/sctp/sm_make_chunk.c asoc->c.sinit_max_instreams = c 2419 net/sctp/sm_make_chunk.c asoc->c.peer_vtag = asoc->peer.i.init_tag; c 2446 net/sctp/sm_make_chunk.c if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, c 2447 net/sctp/sm_make_chunk.c asoc->c.sinit_max_instreams, gfp)) c 1035 net/sctp/sm_sideeffect.c asoc->c.sinit_num_ostreams, c 1036 net/sctp/sm_sideeffect.c asoc->c.sinit_max_instreams, c 775 net/sctp/sm_statefuns.c peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; c 778 net/sctp/sm_statefuns.c &chunk->subh.cookie_hdr->c.peer_addr, c 806 net/sctp/sm_statefuns.c new_asoc->c.sinit_num_ostreams, c 807 net/sctp/sm_statefuns.c new_asoc->c.sinit_max_instreams, c 951 net/sctp/sm_statefuns.c 0, asoc->c.sinit_num_ostreams, c 952 net/sctp/sm_statefuns.c asoc->c.sinit_max_instreams, c 1376 net/sctp/sm_statefuns.c new_asoc->c.my_vtag = asoc->c.my_vtag; c 1377 net/sctp/sm_statefuns.c new_asoc->c.my_ttag = asoc->c.my_vtag; c 1378 net/sctp/sm_statefuns.c new_asoc->c.peer_ttag = 0; c 1382 net/sctp/sm_statefuns.c new_asoc->c.my_vtag = asoc->c.my_vtag; c 1383 net/sctp/sm_statefuns.c new_asoc->c.my_ttag = asoc->c.my_vtag; c 1384 net/sctp/sm_statefuns.c new_asoc->c.peer_ttag = asoc->c.peer_vtag; c 1391 net/sctp/sm_statefuns.c new_asoc->c.my_ttag = asoc->c.my_vtag; c 1392 net/sctp/sm_statefuns.c new_asoc->c.peer_ttag = asoc->c.peer_vtag; c 1401 net/sctp/sm_statefuns.c new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams; c 1402 net/sctp/sm_statefuns.c new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams; c 1403 net/sctp/sm_statefuns.c new_asoc->c.initial_tsn = asoc->c.initial_tsn; c 1419 net/sctp/sm_statefuns.c if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && c 1420 net/sctp/sm_statefuns.c (asoc->c.peer_vtag != new_asoc->c.peer_vtag) && c 1421 net/sctp/sm_statefuns.c (asoc->c.my_vtag == new_asoc->c.my_ttag) && c 1422 net/sctp/sm_statefuns.c (asoc->c.peer_vtag == new_asoc->c.peer_ttag)) c 1426 net/sctp/sm_statefuns.c if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && c 1427 net/sctp/sm_statefuns.c ((asoc->c.peer_vtag != new_asoc->c.peer_vtag) || c 1428 net/sctp/sm_statefuns.c (0 == asoc->c.peer_vtag))) { c 1433 net/sctp/sm_statefuns.c if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && c 1434 net/sctp/sm_statefuns.c (asoc->c.peer_vtag == new_asoc->c.peer_vtag)) c 1438 net/sctp/sm_statefuns.c if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && c 1439 net/sctp/sm_statefuns.c (asoc->c.peer_vtag == new_asoc->c.peer_vtag) && c 1440 net/sctp/sm_statefuns.c (0 == new_asoc->c.my_ttag) && c 1441 net/sctp/sm_statefuns.c (0 == new_asoc->c.peer_ttag)) c 1787 net/sctp/sm_statefuns.c peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; c 1850 net/sctp/sm_statefuns.c new_asoc->c.sinit_num_ostreams, c 1851 net/sctp/sm_statefuns.c new_asoc->c.sinit_max_instreams, c 1906 net/sctp/sm_statefuns.c peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; c 2036 net/sctp/sm_statefuns.c asoc->c.sinit_num_ostreams, c 2037 net/sctp/sm_statefuns.c asoc->c.sinit_max_instreams, c 6310 net/sctp/sm_statefuns.c packet->vtag = cookie->c.peer_vtag; c 155 net/sctp/socket.c list_for_each_entry(c, &msg->chunks, frag_list) { \ c 156 net/sctp/socket.c if ((clear && asoc->base.sk == c->skb->sk) || \ c 157 net/sctp/socket.c (!clear && asoc->base.sk != c->skb->sk)) \ c 158 net/sctp/socket.c cb(c); \ c 170 net/sctp/socket.c struct sctp_chunk *chunk, *c; c 1105 net/sctp/socket.c asoc->c.sinit_num_ostreams = outcnt; c 1113 net/sctp/socket.c asoc->c.sinit_max_instreams = init->sinit_max_instreams; c 5233 net/sctp/socket.c info->sctpi_tag = asoc->c.my_vtag; c 5248 net/sctp/socket.c info->sctpi_peer_tag = asoc->c.peer_vtag; c 7052 net/sctp/socket.c ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; c 237 net/smc/smc_diag.c struct netlink_dump_control c = { c 241 net/smc/smc_diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); c 318 net/sunrpc/addr.c char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')]; c 328 net/sunrpc/addr.c c = strrchr(buf, '.'); c 329 net/sunrpc/addr.c if (unlikely(c == NULL)) c 331 net/sunrpc/addr.c if (unlikely(kstrtou8(c + 1, 10, &portlo) != 0)) c 334 net/sunrpc/addr.c *c = '\0'; c 335 net/sunrpc/addr.c c = strrchr(buf, '.'); c 336 net/sunrpc/addr.c if (unlikely(c == NULL)) c 338 net/sunrpc/addr.c if (unlikely(kstrtou8(c + 1, 10, &porthi) != 0)) c 343 net/sunrpc/addr.c *c = '\0'; c 480 net/sunrpc/auth_gss/auth_gss.c char *c = strchr(service_name, '@'); c 482 net/sunrpc/auth_gss/auth_gss.c if (!c) c 488 net/sunrpc/auth_gss/auth_gss.c (int)(c - service_name), c 489 net/sunrpc/auth_gss/auth_gss.c service_name, c + 1); c 359 net/sunrpc/auth_gss/gss_krb5_mech.c struct xdr_netobj c, keyin, keyout; c 363 net/sunrpc/auth_gss/gss_krb5_mech.c c.len = GSS_KRB5_K5CLENGTH; c 364 net/sunrpc/auth_gss/gss_krb5_mech.c c.data = cdata; c 384 net/sunrpc/auth_gss/gss_krb5_mech.c err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); c 476 net/sunrpc/auth_gss/gss_krb5_mech.c struct xdr_netobj c, keyin, keyout; c 480 net/sunrpc/auth_gss/gss_krb5_mech.c c.len = GSS_KRB5_K5CLENGTH; c 481 net/sunrpc/auth_gss/gss_krb5_mech.c c.data = cdata; c 490 net/sunrpc/auth_gss/gss_krb5_mech.c err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); c 505 net/sunrpc/auth_gss/gss_krb5_mech.c err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); c 520 net/sunrpc/auth_gss/gss_krb5_mech.c err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); c 530 net/sunrpc/auth_gss/gss_krb5_mech.c err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); c 540 net/sunrpc/auth_gss/gss_krb5_mech.c err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); c 550 net/sunrpc/auth_gss/gss_krb5_mech.c err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); c 231 net/sunrpc/auth_gss/gss_rpc_upcall.c char *c; c 237 net/sunrpc/auth_gss/gss_rpc_upcall.c c = strchr(*principal, '@'); c 238 net/sunrpc/auth_gss/gss_rpc_upcall.c if (c) { c 239 net/sunrpc/auth_gss/gss_rpc_upcall.c *c = '\0'; c 242 net/sunrpc/auth_gss/gss_rpc_upcall.c c = strchr(*principal, '/'); c 243 net/sunrpc/auth_gss/gss_rpc_upcall.c if (c) c 244 net/sunrpc/auth_gss/gss_rpc_upcall.c *c = '@'; c 246 net/sunrpc/auth_gss/gss_rpc_upcall.c if (!c) { c 80 net/sunrpc/sysctl.c char tmpbuf[20], c, *s = NULL; c 96 net/sunrpc/sysctl.c while (left && __get_user(c, p) >= 0 && isspace(c)) c 1339 net/sunrpc/xdr.c char *elem = NULL, *c; c 1363 net/sunrpc/xdr.c c = buf->head->iov_base + base; c 1369 net/sunrpc/xdr.c err = desc->xcode(desc, c); c 1372 net/sunrpc/xdr.c c += desc->elem_size; c 1386 net/sunrpc/xdr.c memcpy(c, elem, avail_here); c 1388 net/sunrpc/xdr.c memcpy(elem, c, avail_here); c 1407 net/sunrpc/xdr.c c = kmap(*ppages) + base; c 1427 net/sunrpc/xdr.c memcpy(c, elem + copied, l); c 1432 net/sunrpc/xdr.c memcpy(elem + copied, c, l); c 1442 net/sunrpc/xdr.c c += l; c 1445 net/sunrpc/xdr.c err = desc->xcode(desc, c); c 1448 net/sunrpc/xdr.c c += desc->elem_size; c 1467 net/sunrpc/xdr.c memcpy(c, elem + copied, l); c 1472 net/sunrpc/xdr.c memcpy(elem + copied, c, l); c 1485 net/sunrpc/xdr.c c = kmap(*ppages); c 1497 net/sunrpc/xdr.c c = buf->tail->iov_base + base; c 1502 net/sunrpc/xdr.c memcpy(c, elem + copied, l); c 1504 net/sunrpc/xdr.c memcpy(elem + copied, c, l); c 1510 net/sunrpc/xdr.c c += l; c 1513 net/sunrpc/xdr.c err = desc->xcode(desc, c); c 1516 net/sunrpc/xdr.c c += desc->elem_size; c 85 net/tipc/addr.c u8 c; c 89 net/tipc/addr.c c = id[i]; c 90 net/tipc/addr.c if (c >= '0' && c <= '9') c 92 net/tipc/addr.c if (c >= 'A' && c <= 'Z') c 94 net/tipc/addr.c if (c >= 'a' && c <= 'z') c 96 net/tipc/addr.c if (c == '.') c 98 net/tipc/addr.c if (c == ':') c 100 net/tipc/addr.c if (c == '_') c 102 net/tipc/addr.c if (c == '-') c 104 net/tipc/addr.c if (c == '@') c 106 net/tipc/addr.c if (c != 0) c 86 net/tipc/diag.c struct netlink_dump_control c = { c 91 net/tipc/diag.c netlink_dump_start(net->diag_nlsk, skb, h, &c); c 318 net/unix/diag.c struct netlink_dump_control c = { c 321 net/unix/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); c 150 net/vmw_vsock/diag.c struct netlink_dump_control c = { c 153 net/vmw_vsock/diag.c return netlink_dump_start(net->diag_nlsk, skb, h, &c); c 220 net/wireless/chan.c static void chandef_primary_freqs(const struct cfg80211_chan_def *c, c 225 net/wireless/chan.c switch (c->width) { c 227 net/wireless/chan.c *pri40 = c->center_freq1; c 232 net/wireless/chan.c *pri80 = c->center_freq1; c 234 net/wireless/chan.c tmp = (30 + c->chan->center_freq - c->center_freq1)/20; c 238 net/wireless/chan.c *pri40 = c->center_freq1 - 20 + 40 * tmp; c 242 net/wireless/chan.c tmp = (70 + c->chan->center_freq - c->center_freq1)/20; c 246 net/wireless/chan.c *pri40 = c->center_freq1 - 60 + 40 * tmp; c 249 net/wireless/chan.c *pri80 = c->center_freq1 - 40 + 80 * tmp; c 256 net/wireless/chan.c static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) c 260 net/wireless/chan.c switch (c->width) { c 347 net/wireless/chan.c struct ieee80211_channel *c; c 353 net/wireless/chan.c c = ieee80211_get_channel(wiphy, freq); c 354 net/wireless/chan.c if (!c || !(c->flags & IEEE80211_CHAN_RADAR)) c 357 net/wireless/chan.c c->dfs_state = dfs_state; c 358 net/wireless/chan.c c->dfs_state_entered = jiffies; c 414 net/wireless/chan.c struct ieee80211_channel *c; c 421 net/wireless/chan.c c = ieee80211_get_channel(wiphy, freq); c 422 net/wireless/chan.c if (!c) c 425 net/wireless/chan.c if (c->flags & IEEE80211_CHAN_RADAR) c 493 net/wireless/chan.c struct ieee80211_channel *c; c 507 net/wireless/chan.c c = ieee80211_get_channel(wiphy, freq); c 508 net/wireless/chan.c if (!c) c 511 net/wireless/chan.c if (c->flags & IEEE80211_CHAN_DISABLED) c 514 net/wireless/chan.c if (c->flags & IEEE80211_CHAN_RADAR) { c 515 net/wireless/chan.c if (c->dfs_state == NL80211_DFS_UNAVAILABLE) c 518 net/wireless/chan.c if (c->dfs_state == NL80211_DFS_USABLE) c 682 net/wireless/chan.c struct ieee80211_channel *c; c 698 net/wireless/chan.c c = ieee80211_get_channel(wiphy, freq); c 699 net/wireless/chan.c if (!c) c 702 net/wireless/chan.c if (c->flags & IEEE80211_CHAN_DISABLED) c 705 net/wireless/chan.c if ((c->flags & IEEE80211_CHAN_RADAR) && c 706 net/wireless/chan.c (c->dfs_state != NL80211_DFS_AVAILABLE) && c 707 net/wireless/chan.c !(c->dfs_state == NL80211_DFS_USABLE && dfs_offload)) c 753 net/wireless/chan.c struct ieee80211_channel *c; c 761 net/wireless/chan.c c = ieee80211_get_channel(wiphy, freq); c 762 net/wireless/chan.c if (!c) c 765 net/wireless/chan.c if (c->flags & IEEE80211_CHAN_DISABLED) c 768 net/wireless/chan.c if (!(c->flags & IEEE80211_CHAN_RADAR)) c 771 net/wireless/chan.c if (c->dfs_cac_ms > dfs_cac_ms) c 772 net/wireless/chan.c dfs_cac_ms = c->dfs_cac_ms; c 810 net/wireless/chan.c struct ieee80211_channel *c; c 817 net/wireless/chan.c c = ieee80211_get_channel(wiphy, freq); c 818 net/wireless/chan.c if (!c || c->flags & prohibited_flags) c 548 net/wireless/core.c const struct ieee80211_iface_combination *c; c 555 net/wireless/core.c c = &wiphy->iface_combinations[i]; c 561 net/wireless/core.c if (WARN_ON((c->max_interfaces < 2) && !c->radar_detect_widths)) c 565 net/wireless/core.c if (WARN_ON(!c->num_different_channels)) c 572 net/wireless/core.c if (WARN_ON(c->num_different_channels > c 577 net/wireless/core.c if (WARN_ON(c->radar_detect_widths && c 578 net/wireless/core.c (c->num_different_channels > 1))) c 581 net/wireless/core.c if (WARN_ON(!c->n_limits)) c 584 net/wireless/core.c for (j = 0; j < c->n_limits; j++) { c 585 net/wireless/core.c u16 types = c->limits[j].types; c 592 net/wireless/core.c if (WARN_ON(!c->limits[j].max)) c 601 net/wireless/core.c c->limits[j].max > 1)) c 606 net/wireless/core.c c->limits[j].max > 1)) c 620 net/wireless/core.c c->beacon_int_min_gcd)) { c 624 net/wireless/core.c cnt += c->limits[j].max; c 639 net/wireless/core.c if (WARN_ON(cnt < c->max_interfaces)) c 769 net/wireless/mlme.c struct ieee80211_channel *c; c 788 net/wireless/mlme.c c = &sband->channels[i]; c 790 net/wireless/mlme.c if (!(c->flags & IEEE80211_CHAN_RADAR)) c 793 net/wireless/mlme.c if (c->dfs_state != NL80211_DFS_UNAVAILABLE && c 794 net/wireless/mlme.c c->dfs_state != NL80211_DFS_AVAILABLE) c 797 net/wireless/mlme.c if (c->dfs_state == NL80211_DFS_UNAVAILABLE) { c 802 net/wireless/mlme.c cfg80211_any_wiphy_oper_chan(wiphy, c)) c 809 net/wireless/mlme.c timeout = c->dfs_state_entered + c 813 net/wireless/mlme.c c->dfs_state = NL80211_DFS_USABLE; c 814 net/wireless/mlme.c c->dfs_state_entered = jiffies; c 816 net/wireless/mlme.c cfg80211_chandef_create(&chandef, c, c 824 net/wireless/mlme.c c->dfs_state, c 1355 net/wireless/nl80211.c const struct ieee80211_iface_combination *c; c 1358 net/wireless/nl80211.c c = &wiphy->iface_combinations[i]; c 1369 net/wireless/nl80211.c for (j = 0; j < c->n_limits; j++) { c 1376 net/wireless/nl80211.c c->limits[j].max)) c 1379 net/wireless/nl80211.c c->limits[j].types)) c 1386 net/wireless/nl80211.c if (c->beacon_int_infra_match && c 1390 net/wireless/nl80211.c c->num_different_channels) || c 1392 net/wireless/nl80211.c c->max_interfaces)) c 1396 net/wireless/nl80211.c c->radar_detect_widths) || c 1398 net/wireless/nl80211.c c->radar_detect_regions))) c 1400 net/wireless/nl80211.c if (c->beacon_int_min_gcd && c 1402 net/wireless/nl80211.c c->beacon_int_min_gcd)) c 3713 net/wireless/nl80211.c static void get_key_callback(void *c, struct key_params *params) c 3716 net/wireless/nl80211.c struct get_key_cookie *cookie = c; c 2042 net/wireless/reg.c struct ieee80211_channel *c = &sband->channels[i]; c 2044 net/wireless/reg.c if (c->center_freq == (channel->center_freq - 20)) c 2045 net/wireless/reg.c channel_before = c; c 2046 net/wireless/reg.c if (c->center_freq == (channel->center_freq + 20)) c 2047 net/wireless/reg.c channel_after = c; c 1719 net/wireless/util.c void (*iter)(const struct ieee80211_iface_combination *c, c 1760 net/wireless/util.c const struct ieee80211_iface_combination *c; c 1764 net/wireless/util.c c = &wiphy->iface_combinations[i]; c 1766 net/wireless/util.c if (num_interfaces > c->max_interfaces) c 1768 net/wireless/util.c if (params->num_different_channels > c->num_different_channels) c 1771 net/wireless/util.c limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits, c 1779 net/wireless/util.c for (j = 0; j < c->n_limits; j++) { c 1790 net/wireless/util.c (c->radar_detect_widths & params->radar_detect)) c 1793 net/wireless/util.c if (params->radar_detect && c->radar_detect_regions && c 1794 net/wireless/util.c !(c->radar_detect_regions & BIT(region))) c 1806 net/wireless/util.c if (c->beacon_int_min_gcd && c 1807 net/wireless/util.c beacon_int_gcd < c->beacon_int_min_gcd) c 1809 net/wireless/util.c if (!c->beacon_int_min_gcd && beacon_int_different) c 1817 net/wireless/util.c (*iter)(c, data); c 1827 net/wireless/util.c cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c, c 114 net/wireless/wext-compat.c int i, c = 0; c 193 net/wireless/wext-compat.c for (i = 0; i < sband->n_channels && c < IW_MAX_FREQUENCIES; i++) { c 197 net/wireless/wext-compat.c range->freq[c].i = c 200 net/wireless/wext-compat.c range->freq[c].m = chan->center_freq; c 201 net/wireless/wext-compat.c range->freq[c].e = 6; c 202 net/wireless/wext-compat.c c++; c 206 net/wireless/wext-compat.c range->num_channels = c; c 207 net/wireless/wext-compat.c range->num_frequency = c; c 163 net/xdp/xsk_diag.c struct netlink_dump_control c = { .dump = xsk_diag_dump }; c 173 net/xdp/xsk_diag.c return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c); c 40 net/xfrm/xfrm_replay.c struct km_event c; c 75 net/xfrm/xfrm_replay.c c.event = XFRM_MSG_NEWAE; c 76 net/xfrm/xfrm_replay.c c.data.aevent = event; c 77 net/xfrm/xfrm_replay.c km_state_notify(x, &c); c 279 net/xfrm/xfrm_replay.c struct km_event c; c 319 net/xfrm/xfrm_replay.c c.event = XFRM_MSG_NEWAE; c 320 net/xfrm/xfrm_replay.c c.data.aevent = event; c 321 net/xfrm/xfrm_replay.c km_state_notify(x, &c); c 331 net/xfrm/xfrm_replay.c struct km_event c; c 385 net/xfrm/xfrm_replay.c c.event = XFRM_MSG_NEWAE; c 386 net/xfrm/xfrm_replay.c c.data.aevent = event; c 387 net/xfrm/xfrm_replay.c km_state_notify(x, &c); c 179 net/xfrm/xfrm_state.c static bool km_is_alive(const struct km_event *c); c 1054 net/xfrm/xfrm_state.c struct km_event c; c 1104 net/xfrm/xfrm_state.c c.net = net; c 1109 net/xfrm/xfrm_state.c if (!km_is_alive(&c)) { c 1781 net/xfrm/xfrm_state.c int c = cmp(src[i]); c 1783 net/xfrm/xfrm_state.c class[i] = c; c 1784 net/xfrm/xfrm_state.c count[c]++; c 2120 net/xfrm/xfrm_state.c void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) c 2127 net/xfrm/xfrm_state.c km->notify_policy(xp, dir, c); c 2131 net/xfrm/xfrm_state.c void km_state_notify(struct xfrm_state *x, const struct km_event *c) c 2137 net/xfrm/xfrm_state.c km->notify(x, c); c 2146 net/xfrm/xfrm_state.c struct km_event c; c 2148 net/xfrm/xfrm_state.c c.data.hard = hard; c 2149 net/xfrm/xfrm_state.c c.portid = portid; c 2150 net/xfrm/xfrm_state.c c.event = XFRM_MSG_EXPIRE; c 2151 net/xfrm/xfrm_state.c km_state_notify(x, &c); c 2194 net/xfrm/xfrm_state.c struct km_event c; c 2196 net/xfrm/xfrm_state.c c.data.hard = hard; c 2197 net/xfrm/xfrm_state.c c.portid = portid; c 2198 net/xfrm/xfrm_state.c c.event = XFRM_MSG_POLEXPIRE; c 2199 net/xfrm/xfrm_state.c km_policy_notify(pol, dir, &c); c 2247 net/xfrm/xfrm_state.c static bool km_is_alive(const struct km_event *c) c 2254 net/xfrm/xfrm_state.c if (km->is_alive && km->is_alive(c)) { c 678 net/xfrm/xfrm_user.c struct km_event c; c 706 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 707 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 708 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; c 710 net/xfrm/xfrm_user.c km_state_notify(x, &c); c 756 net/xfrm/xfrm_user.c struct km_event c; c 776 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 777 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 778 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; c 779 net/xfrm/xfrm_user.c km_state_notify(x, &c); c 1651 net/xfrm/xfrm_user.c struct km_event c; c 1680 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; c 1681 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 1682 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 1683 net/xfrm/xfrm_user.c km_policy_notify(xp, p->dir, &c); c 1863 net/xfrm/xfrm_user.c struct km_event c; c 1924 net/xfrm/xfrm_user.c c.data.byid = p->index; c 1925 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; c 1926 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 1927 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 1928 net/xfrm/xfrm_user.c km_policy_notify(xp, p->dir, &c); c 1940 net/xfrm/xfrm_user.c struct km_event c; c 1950 net/xfrm/xfrm_user.c c.data.proto = p->proto; c 1951 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; c 1952 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 1953 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 1954 net/xfrm/xfrm_user.c c.net = net; c 1955 net/xfrm/xfrm_user.c km_state_notify(NULL, &c); c 1974 net/xfrm/xfrm_user.c static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) c 1980 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); c 1992 net/xfrm/xfrm_user.c id->flags = c->data.aevent; c 2043 net/xfrm/xfrm_user.c struct km_event c; c 2067 net/xfrm/xfrm_user.c c.data.aevent = p->flags; c 2068 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 2069 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 2071 net/xfrm/xfrm_user.c err = build_aevent(r_skb, x, &c); c 2085 net/xfrm/xfrm_user.c struct km_event c; c 2120 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; c 2121 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 2122 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 2123 net/xfrm/xfrm_user.c c.data.aevent = XFRM_AE_CU; c 2124 net/xfrm/xfrm_user.c km_state_notify(x, &c); c 2135 net/xfrm/xfrm_user.c struct km_event c; c 2150 net/xfrm/xfrm_user.c c.data.type = type; c 2151 net/xfrm/xfrm_user.c c.event = nlh->nlmsg_type; c 2152 net/xfrm/xfrm_user.c c.seq = nlh->nlmsg_seq; c 2153 net/xfrm/xfrm_user.c c.portid = nlh->nlmsg_pid; c 2154 net/xfrm/xfrm_user.c c.net = net; c 2155 net/xfrm/xfrm_user.c km_policy_notify(NULL, 0, &c); c 2662 net/xfrm/xfrm_user.c struct netlink_dump_control c = { c 2667 net/xfrm/xfrm_user.c return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); c 2698 net/xfrm/xfrm_user.c static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) c 2704 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); c 2710 net/xfrm/xfrm_user.c ue->hard = (c->data.hard != 0) ? 1 : 0; c 2726 net/xfrm/xfrm_user.c static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) c 2735 net/xfrm/xfrm_user.c if (build_expire(skb, x, c) < 0) { c 2743 net/xfrm/xfrm_user.c static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) c 2753 net/xfrm/xfrm_user.c err = build_aevent(skb, x, c); c 2759 net/xfrm/xfrm_user.c static int xfrm_notify_sa_flush(const struct km_event *c) c 2761 net/xfrm/xfrm_user.c struct net *net = c->net; c 2771 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); c 2778 net/xfrm/xfrm_user.c p->proto = c->data.proto; c 2829 net/xfrm/xfrm_user.c static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) c 2841 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELSA) { c 2852 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); c 2858 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELSA) { c 2888 net/xfrm/xfrm_user.c static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) c 2891 net/xfrm/xfrm_user.c switch (c->event) { c 2893 net/xfrm/xfrm_user.c return xfrm_exp_state_notify(x, c); c 2895 net/xfrm/xfrm_user.c return xfrm_aevent_state_notify(x, c); c 2899 net/xfrm/xfrm_user.c return xfrm_notify_sa(x, c); c 2901 net/xfrm/xfrm_user.c return xfrm_notify_sa_flush(c); c 2904 net/xfrm/xfrm_user.c c->event); c 3049 net/xfrm/xfrm_user.c int dir, const struct km_event *c) c 3052 net/xfrm/xfrm_user.c int hard = c->data.hard; c 3056 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); c 3081 net/xfrm/xfrm_user.c static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) c 3091 net/xfrm/xfrm_user.c err = build_polexpire(skb, xp, dir, c); c 3097 net/xfrm/xfrm_user.c static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) c 3109 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELPOLICY) { c 3121 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); c 3127 net/xfrm/xfrm_user.c if (c->event == XFRM_MSG_DELPOLICY) { c 3133 net/xfrm/xfrm_user.c if (c->data.byid) c 3166 net/xfrm/xfrm_user.c static int xfrm_notify_policy_flush(const struct km_event *c) c 3168 net/xfrm/xfrm_user.c struct net *net = c->net; c 3177 net/xfrm/xfrm_user.c nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); c 3181 net/xfrm/xfrm_user.c err = copy_to_user_policy_type(c->data.type, skb); c 3194 net/xfrm/xfrm_user.c static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) c 3197 net/xfrm/xfrm_user.c switch (c->event) { c 3201 net/xfrm/xfrm_user.c return xfrm_notify_policy(xp, dir, c); c 3203 net/xfrm/xfrm_user.c return xfrm_notify_policy_flush(c); c 3205 net/xfrm/xfrm_user.c return xfrm_exp_policy_notify(xp, dir, c); c 3208 net/xfrm/xfrm_user.c c->event); c 3315 net/xfrm/xfrm_user.c static bool xfrm_is_alive(const struct km_event *c) c 3317 net/xfrm/xfrm_user.c return (bool)xfrm_acquire_is_on(c->net); c 81 samples/bpf/cpustat_user.c int c, i; c 83 samples/bpf/cpustat_user.c for (c = 0; c < MAX_CPU; c++) { c 85 samples/bpf/cpustat_user.c key = c * MAX_CSTATE_ENTRIES + i; c 87 samples/bpf/cpustat_user.c stat_data[c].cstate[i] = value; c 91 samples/bpf/cpustat_user.c key = c * MAX_PSTATE_ENTRIES + i; c 93 samples/bpf/cpustat_user.c stat_data[c].pstate[i] = value; c 65 samples/bpf/lathist_user.c int c, i; c 70 samples/bpf/lathist_user.c for (c = 0; c < MAX_CPU; c++) { c 72 samples/bpf/lathist_user.c key = c * MAX_ENTRIES + i; c 75 samples/bpf/lathist_user.c cpu_hist[c].data[i] = value; c 76 samples/bpf/lathist_user.c if (value > cpu_hist[c].max) c 77 samples/bpf/lathist_user.c cpu_hist[c].max = value; c 99 samples/bpf/tracex3_user.c int c = num_colors * cnt[key] / (max_cnt + 1); c 102 samples/bpf/tracex3_user.c printf("%s", sym[c]); c 104 samples/bpf/tracex3_user.c printf("%s %s", color[c], nocolor); c 246 samples/bpf/xdpsock_user.c unsigned char c; c 265 samples/bpf/xdpsock_user.c c = *line++; c 266 samples/bpf/xdpsock_user.c printf("%c", (c < 33 || c == 255) ? 0x2E : c); c 396 samples/bpf/xdpsock_user.c int option_index, c; c 401 samples/bpf/xdpsock_user.c c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:mu", c 403 samples/bpf/xdpsock_user.c if (c == -1) c 406 samples/bpf/xdpsock_user.c switch (c) { c 36 samples/seccomp/user-trap.c char buf[CMSG_SPACE(sizeof(int))] = {0}, c = 'c'; c 38 samples/seccomp/user-trap.c .iov_base = &c, c 65 samples/seccomp/user-trap.c char buf[CMSG_SPACE(sizeof(int))] = {0}, c = 'c'; c 67 samples/seccomp/user-trap.c .iov_base = &c, c 1212 scripts/asn1_compiler.c const struct element *c; c 1246 scripts/asn1_compiler.c for (c = e->children; c; c = c->next) c 1247 scripts/asn1_compiler.c dump_element(c, level + 3); c 132 scripts/basic/fixdep.c static void xputchar(int c) c 136 scripts/basic/fixdep.c ret = putchar(c); c 148 scripts/basic/fixdep.c int c, prev_c = '/', i; c 152 scripts/basic/fixdep.c c = m[i]; c 153 scripts/basic/fixdep.c if (c == '_') c 154 scripts/basic/fixdep.c c = '/'; c 156 scripts/basic/fixdep.c c = tolower(c); c 157 scripts/basic/fixdep.c if (c != '/' || prev_c != '/') c 158 scripts/basic/fixdep.c xputchar(c); c 159 scripts/basic/fixdep.c prev_c = c; c 10 scripts/dtc/checks.c #define TRACE(c, ...) \ c 12 scripts/dtc/checks.c fprintf(stderr, "=== %s: ", (c)->name); \ c 17 scripts/dtc/checks.c #define TRACE(c, fmt, ...) do { } while (0) c 29 scripts/dtc/checks.c typedef void (*check_fn)(struct check *c, struct dt_info *dti, struct node *node); c 61 scripts/dtc/checks.c static inline void PRINTF(5, 6) check_msg(struct check *c, struct dt_info *dti, c 71 scripts/dtc/checks.c if (!(c->warn && (quiet < 1)) && !(c->error && (quiet < 2))) c 90 scripts/dtc/checks.c (c->error) ? "ERROR" : "Warning", c->name); c 119 scripts/dtc/checks.c #define FAIL(c, dti, node, ...) \ c 121 scripts/dtc/checks.c TRACE((c), "\t\tFAILED at %s:%d", __FILE__, __LINE__); \ c 122 scripts/dtc/checks.c (c)->status = FAILED; \ c 123 scripts/dtc/checks.c check_msg((c), dti, node, NULL, __VA_ARGS__); \ c 126 scripts/dtc/checks.c #define FAIL_PROP(c, dti, node, prop, ...) \ c 128 scripts/dtc/checks.c TRACE((c), "\t\tFAILED at %s:%d", __FILE__, __LINE__); \ c 129 scripts/dtc/checks.c (c)->status = FAILED; \ c 130 scripts/dtc/checks.c check_msg((c), dti, node, prop, __VA_ARGS__); \ c 134 scripts/dtc/checks.c static void check_nodes_props(struct check *c, struct dt_info *dti, struct node *node) c 138 scripts/dtc/checks.c TRACE(c, "%s", node->fullpath); c 139 scripts/dtc/checks.c if (c->fn) c 140 scripts/dtc/checks.c c->fn(c, dti, node); c 143 scripts/dtc/checks.c check_nodes_props(c, dti, child); c 146 scripts/dtc/checks.c static bool run_check(struct check *c, struct dt_info *dti) c 152 scripts/dtc/checks.c assert(!c->inprogress); c 154 scripts/dtc/checks.c if (c->status != UNCHECKED) c 157 scripts/dtc/checks.c c->inprogress = true; c 159 scripts/dtc/checks.c for (i = 0; i < c->num_prereqs; i++) { c 160 scripts/dtc/checks.c struct check *prq = c->prereq[i]; c 163 scripts/dtc/checks.c c->status = PREREQ; c 164 scripts/dtc/checks.c check_msg(c, dti, NULL, NULL, "Failed prerequisite '%s'", c 165 scripts/dtc/checks.c c->prereq[i]->name); c 169 scripts/dtc/checks.c if (c->status != UNCHECKED) c 172 scripts/dtc/checks.c check_nodes_props(c, dti, dt); c 174 scripts/dtc/checks.c if (c->status == UNCHECKED) c 175 scripts/dtc/checks.c c->status = PASSED; c 177 scripts/dtc/checks.c TRACE(c, "\tCompleted, status %d", c->status); c 180 scripts/dtc/checks.c c->inprogress = false; c 181 scripts/dtc/checks.c if ((c->status != PASSED) && (c->error)) c 191 scripts/dtc/checks.c static inline void check_always_fail(struct check *c, struct dt_info *dti, c 194 scripts/dtc/checks.c FAIL(c, dti, node, "always_fail check"); c 198 scripts/dtc/checks.c static void check_is_string(struct check *c, struct dt_info *dti, c 202 scripts/dtc/checks.c char *propname = c->data; c 209 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "property is not a string"); c 216 scripts/dtc/checks.c static void check_is_string_list(struct check *c, struct dt_info *dti, c 221 scripts/dtc/checks.c char *propname = c->data; c 233 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "property is not a string list"); c 245 scripts/dtc/checks.c static void check_is_cell(struct check *c, struct dt_info *dti, c 249 scripts/dtc/checks.c char *propname = c->data; c 256 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "property is not a single cell"); c 267 scripts/dtc/checks.c static void check_duplicate_node_names(struct check *c, struct dt_info *dti, c 277 scripts/dtc/checks.c FAIL(c, dti, child2, "Duplicate node name"); c 281 scripts/dtc/checks.c static void check_duplicate_property_names(struct check *c, struct dt_info *dti, c 291 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "Duplicate property name"); c 303 scripts/dtc/checks.c static void check_node_name_chars(struct check *c, struct dt_info *dti, c 306 scripts/dtc/checks.c int n = strspn(node->name, c->data); c 309 scripts/dtc/checks.c FAIL(c, dti, node, "Bad character '%c' in node name", c 314 scripts/dtc/checks.c static void check_node_name_chars_strict(struct check *c, struct dt_info *dti, c 317 scripts/dtc/checks.c int n = strspn(node->name, c->data); c 320 scripts/dtc/checks.c FAIL(c, dti, node, "Character '%c' not recommended in node name", c 325 scripts/dtc/checks.c static void check_node_name_format(struct check *c, struct dt_info *dti, c 329 scripts/dtc/checks.c FAIL(c, dti, node, "multiple '@' characters in node name"); c 333 scripts/dtc/checks.c static void check_unit_address_vs_reg(struct check *c, struct dt_info *dti, c 352 scripts/dtc/checks.c FAIL(c, dti, node, "node has a reg or ranges property, but no unit name"); c 355 scripts/dtc/checks.c FAIL(c, dti, node, "node has a unit name, but no reg property"); c 360 scripts/dtc/checks.c static void check_property_name_chars(struct check *c, struct dt_info *dti, c 366 scripts/dtc/checks.c int n = strspn(prop->name, c->data); c 369 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "Bad character '%c' in property name", c 375 scripts/dtc/checks.c static void check_property_name_chars_strict(struct check *c, c 383 scripts/dtc/checks.c int n = strspn(name, c->data); c 398 scripts/dtc/checks.c n = strspn(name, c->data); c 401 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "Character '%c' not recommended in property name", c 414 scripts/dtc/checks.c static void check_duplicate_label(struct check *c, struct dt_info *dti, c 435 scripts/dtc/checks.c FAIL(c, dti, node, "Duplicate label '%s' on " DESCLABEL_FMT c 441 scripts/dtc/checks.c static void check_duplicate_label_node(struct check *c, struct dt_info *dti, c 448 scripts/dtc/checks.c check_duplicate_label(c, dti, l->label, node, NULL, NULL); c 454 scripts/dtc/checks.c check_duplicate_label(c, dti, l->label, node, prop, NULL); c 457 scripts/dtc/checks.c check_duplicate_label(c, dti, m->ref, node, prop, m); c 462 scripts/dtc/checks.c static cell_t check_phandle_prop(struct check *c, struct dt_info *dti, c 475 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "bad length (%d) %s property", c 487 scripts/dtc/checks.c FAIL(c, dti, node, "%s is a reference to another node", c 501 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "bad value (0x%x) in %s property", c 509 scripts/dtc/checks.c static void check_explicit_phandles(struct check *c, struct dt_info *dti, c 519 scripts/dtc/checks.c phandle = check_phandle_prop(c, dti, node, "phandle"); c 521 scripts/dtc/checks.c linux_phandle = check_phandle_prop(c, dti, node, "linux,phandle"); c 528 scripts/dtc/checks.c FAIL(c, dti, node, "mismatching 'phandle' and 'linux,phandle'" c 536 scripts/dtc/checks.c FAIL(c, dti, node, "duplicated phandle 0x%x (seen before at %s)", c 545 scripts/dtc/checks.c static void check_name_properties(struct check *c, struct dt_info *dti, c 561 scripts/dtc/checks.c FAIL(c, dti, node, "\"name\" property is incorrect (\"%s\" instead" c 579 scripts/dtc/checks.c static void fixup_phandle_references(struct check *c, struct dt_info *dti, c 596 scripts/dtc/checks.c FAIL(c, dti, node, "Reference to non-existent node or " c 614 scripts/dtc/checks.c static void fixup_path_references(struct check *c, struct dt_info *dti, c 630 scripts/dtc/checks.c FAIL(c, dti, node, "Reference to non-existent node or label \"%s\"\n", c 645 scripts/dtc/checks.c static void fixup_omit_unused_nodes(struct check *c, struct dt_info *dti, c 669 scripts/dtc/checks.c static void check_names_is_string_list(struct check *c, struct dt_info *dti, c 679 scripts/dtc/checks.c c->data = prop->name; c 680 scripts/dtc/checks.c check_is_string_list(c, dti, node); c 685 scripts/dtc/checks.c static void check_alias_paths(struct check *c, struct dt_info *dti, c 695 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "aliases property is not a valid node (%s)", c 700 scripts/dtc/checks.c FAIL(c, dti, node, "aliases property name must include only lowercase and '-'"); c 705 scripts/dtc/checks.c static void fixup_addr_size_cells(struct check *c, struct dt_info *dti, c 729 scripts/dtc/checks.c static void check_reg_format(struct check *c, struct dt_info *dti, c 740 scripts/dtc/checks.c FAIL(c, dti, node, "Root node has a \"reg\" property"); c 745 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "property is empty"); c 752 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "property has invalid length (%d bytes) " c 758 scripts/dtc/checks.c static void check_ranges_format(struct check *c, struct dt_info *dti, c 769 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "Root node has a \"ranges\" property"); c 781 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "empty \"ranges\" property but its " c 786 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "empty \"ranges\" property but its " c 791 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "\"ranges\" property has invalid length (%d bytes) " c 803 scripts/dtc/checks.c static void check_pci_bridge(struct check *c, struct dt_info *dti, struct node *node) c 816 scripts/dtc/checks.c FAIL(c, dti, node, "node name is not \"pci\" or \"pcie\""); c 820 scripts/dtc/checks.c FAIL(c, dti, node, "missing ranges for PCI bridge (or not a bridge)"); c 823 scripts/dtc/checks.c FAIL(c, dti, node, "incorrect #address-cells for PCI bridge"); c 825 scripts/dtc/checks.c FAIL(c, dti, node, "incorrect #size-cells for PCI bridge"); c 832 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "value must be 2 cells"); c 837 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "1st cell must be less than or equal to 2nd cell"); c 839 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "maximum bus number must be less than 256"); c 844 scripts/dtc/checks.c static void check_pci_device_bus_num(struct check *c, struct dt_info *dti, struct node *node) c 869 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "PCI bus number %d out of range, expected (%d - %d)", c 874 scripts/dtc/checks.c static void check_pci_device_reg(struct check *c, struct dt_info *dti, struct node *node) c 887 scripts/dtc/checks.c FAIL(c, dti, node, "missing PCI reg property"); c 893 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "PCI reg config space address cells 2 and 3 must be 0"); c 900 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "PCI reg address is not configuration space"); c 902 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "PCI reg config space address register number must be 0"); c 914 scripts/dtc/checks.c FAIL(c, dti, node, "PCI unit address format error, expected \"%s\"", c 940 scripts/dtc/checks.c static void check_simple_bus_bridge(struct check *c, struct dt_info *dti, struct node *node) c 948 scripts/dtc/checks.c static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct node *node) c 972 scripts/dtc/checks.c FAIL(c, dti, node, "missing or empty reg/ranges property"); c 982 scripts/dtc/checks.c FAIL(c, dti, node, "simple-bus unit address format error, expected \"%s\"", c 991 scripts/dtc/checks.c static void check_i2c_bus_bridge(struct check *c, struct dt_info *dti, struct node *node) c 1010 scripts/dtc/checks.c FAIL(c, dti, node, "incorrect #address-cells for I2C bus"); c 1012 scripts/dtc/checks.c FAIL(c, dti, node, "incorrect #size-cells for I2C bus"); c 1017 scripts/dtc/checks.c static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node) c 1034 scripts/dtc/checks.c FAIL(c, dti, node, "missing or empty reg property"); c 1041 scripts/dtc/checks.c FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"", c 1047 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"", c 1058 scripts/dtc/checks.c static void check_spi_bus_bridge(struct check *c, struct dt_info *dti, struct node *node) c 1084 scripts/dtc/checks.c FAIL(c, dti, node, "node name for SPI buses should be 'spi'"); c 1092 scripts/dtc/checks.c FAIL(c, dti, node, "incorrect #address-cells for SPI bus"); c 1094 scripts/dtc/checks.c FAIL(c, dti, node, "incorrect #size-cells for SPI bus"); c 1099 scripts/dtc/checks.c static void check_spi_bus_reg(struct check *c, struct dt_info *dti, struct node *node) c 1118 scripts/dtc/checks.c FAIL(c, dti, node, "missing or empty reg property"); c 1125 scripts/dtc/checks.c FAIL(c, dti, node, "SPI bus unit address format error, expected \"%s\"", c 1130 scripts/dtc/checks.c static void check_unit_address_format(struct check *c, struct dt_info *dti, c 1142 scripts/dtc/checks.c FAIL(c, dti, node, "unit name should not have leading \"0x\""); c 1147 scripts/dtc/checks.c FAIL(c, dti, node, "unit name should not have leading 0s"); c 1155 scripts/dtc/checks.c static void check_avoid_default_addr_size(struct check *c, struct dt_info *dti, c 1170 scripts/dtc/checks.c FAIL(c, dti, node, "Relying on default #address-cells value"); c 1173 scripts/dtc/checks.c FAIL(c, dti, node, "Relying on default #size-cells value"); c 1178 scripts/dtc/checks.c static void check_avoid_unnecessary_addr_size(struct check *c, struct dt_info *dti, c 1198 scripts/dtc/checks.c FAIL(c, dti, node, "unnecessary #address-cells/#size-cells without \"ranges\" or child \"reg\" property"); c 1216 scripts/dtc/checks.c static void check_unique_unit_address_common(struct check *c, c 1248 scripts/dtc/checks.c FAIL(c, dti, childb, "duplicate unit-address (also used in node %s)", childa->fullpath); c 1253 scripts/dtc/checks.c static void check_unique_unit_address(struct check *c, struct dt_info *dti, c 1256 scripts/dtc/checks.c check_unique_unit_address_common(c, dti, node, false); c 1260 scripts/dtc/checks.c static void check_unique_unit_address_if_enabled(struct check *c, struct dt_info *dti, c 1263 scripts/dtc/checks.c check_unique_unit_address_common(c, dti, node, true); c 1268 scripts/dtc/checks.c static void check_obsolete_chosen_interrupt_controller(struct check *c, c 1286 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, c 1292 scripts/dtc/checks.c static void check_chosen_node_is_root(struct check *c, struct dt_info *dti, c 1299 scripts/dtc/checks.c FAIL(c, dti, node, "chosen node must be at root node"); c 1303 scripts/dtc/checks.c static void check_chosen_node_bootargs(struct check *c, struct dt_info *dti, c 1315 scripts/dtc/checks.c c->data = prop->name; c 1316 scripts/dtc/checks.c check_is_string(c, dti, node); c 1320 scripts/dtc/checks.c static void check_chosen_node_stdout_path(struct check *c, struct dt_info *dti, c 1333 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, "Use 'stdout-path' instead"); c 1336 scripts/dtc/checks.c c->data = prop->name; c 1337 scripts/dtc/checks.c check_is_string(c, dti, node); c 1347 scripts/dtc/checks.c static void check_property_phandle_args(struct check *c, c 1357 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, c 1390 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, c 1397 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, c 1409 scripts/dtc/checks.c FAIL(c, dti, node, "Missing property '%s' in node %s or bad phandle (referred from %s[%d])", c 1417 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, c 1424 scripts/dtc/checks.c static void check_provider_cells_property(struct check *c, c 1428 scripts/dtc/checks.c struct provider *provider = c->data; c 1435 scripts/dtc/checks.c check_property_phandle_args(c, dti, node, prop, provider); c 1480 scripts/dtc/checks.c static void check_gpios_property(struct check *c, c 1499 scripts/dtc/checks.c check_property_phandle_args(c, dti, node, prop, &provider); c 1505 scripts/dtc/checks.c static void check_deprecated_gpio_property(struct check *c, c 1521 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, c 1542 scripts/dtc/checks.c static void check_interrupts_property(struct check *c, c 1556 scripts/dtc/checks.c FAIL_PROP(c, dti, node, irq_prop, "size (%d) is invalid, expected multiple of %zu", c 1573 scripts/dtc/checks.c FAIL_PROP(c, dti, parent, prop, "Invalid phandle"); c 1579 scripts/dtc/checks.c FAIL_PROP(c, dti, parent, prop, "Bad phandle"); c 1583 scripts/dtc/checks.c FAIL(c, dti, irq_node, c 1593 scripts/dtc/checks.c FAIL(c, dti, node, "Missing interrupt-parent"); c 1599 scripts/dtc/checks.c FAIL(c, dti, irq_node, "Missing #interrupt-cells in interrupt-parent"); c 1605 scripts/dtc/checks.c FAIL_PROP(c, dti, node, prop, c 1620 scripts/dtc/checks.c static void check_graph_nodes(struct check *c, struct dt_info *dti, c 1643 scripts/dtc/checks.c static void check_graph_child_address(struct check *c, struct dt_info *dti, c 1663 scripts/dtc/checks.c FAIL(c, dti, node, "graph node has single child node '%s', #address-cells/#size-cells are not necessary", c 1668 scripts/dtc/checks.c static void check_graph_reg(struct check *c, struct dt_info *dti, c 1680 scripts/dtc/checks.c FAIL(c, dti, node, "graph node malformed 'reg' property"); c 1686 scripts/dtc/checks.c FAIL(c, dti, node, "graph node unit address error, expected \"%s\"", c 1690 scripts/dtc/checks.c FAIL_PROP(c, dti, node, get_property(node, "#address-cells"), c 1694 scripts/dtc/checks.c FAIL_PROP(c, dti, node, get_property(node, "#size-cells"), c 1699 scripts/dtc/checks.c static void check_graph_port(struct check *c, struct dt_info *dti, c 1706 scripts/dtc/checks.c FAIL(c, dti, node, "graph port node name should be 'port'"); c 1708 scripts/dtc/checks.c check_graph_reg(c, dti, node); c 1712 scripts/dtc/checks.c static struct node *get_remote_endpoint(struct check *c, struct dt_info *dti, c 1730 scripts/dtc/checks.c FAIL_PROP(c, dti, endpoint, prop, "graph phandle is not valid"); c 1735 scripts/dtc/checks.c static void check_graph_endpoint(struct check *c, struct dt_info *dti, c 1744 scripts/dtc/checks.c FAIL(c, dti, node, "graph endpoint node name should be 'endpoint'"); c 1746 scripts/dtc/checks.c check_graph_reg(c, dti, node); c 1748 scripts/dtc/checks.c remote_node = get_remote_endpoint(c, dti, node); c 1752 scripts/dtc/checks.c if (get_remote_endpoint(c, dti, remote_node) != node) c 1753 scripts/dtc/checks.c FAIL(c, dti, node, "graph connection to node '%s' is not bidirectional", c 1831 scripts/dtc/checks.c static void enable_warning_error(struct check *c, bool warn, bool error) c 1836 scripts/dtc/checks.c if ((warn && !c->warn) || (error && !c->error)) c 1837 scripts/dtc/checks.c for (i = 0; i < c->num_prereqs; i++) c 1838 scripts/dtc/checks.c enable_warning_error(c->prereq[i], warn, error); c 1840 scripts/dtc/checks.c c->warn = c->warn || warn; c 1841 scripts/dtc/checks.c c->error = c->error || error; c 1844 scripts/dtc/checks.c static void disable_warning_error(struct check *c, bool warn, bool error) c 1850 scripts/dtc/checks.c if ((warn && c->warn) || (error && c->error)) { c 1856 scripts/dtc/checks.c if (cc->prereq[j] == c) c 1861 scripts/dtc/checks.c c->warn = c->warn && !warn; c 1862 scripts/dtc/checks.c c->error = c->error && !error; c 1878 scripts/dtc/checks.c struct check *c = check_table[i]; c 1880 scripts/dtc/checks.c if (streq(c->name, name)) { c 1882 scripts/dtc/checks.c enable_warning_error(c, warn, error); c 1884 scripts/dtc/checks.c disable_warning_error(c, warn, error); c 1898 scripts/dtc/checks.c struct check *c = check_table[i]; c 1900 scripts/dtc/checks.c if (c->warn || c->error) c 1901 scripts/dtc/checks.c error = error || run_check(c, dti); c 67 scripts/dtc/data.c char c = s[i++]; c 69 scripts/dtc/data.c if (c == '\\') c 70 scripts/dtc/data.c c = get_escape_char(s, &i); c 72 scripts/dtc/data.c q[d.len++] = c; c 186 scripts/dtc/dtc.h #define for_each_child_withdel(n, c) \ c 187 scripts/dtc/dtc.h for ((c) = (n)->children; (c); (c) = (c)->next_sibling) c 189 scripts/dtc/dtc.h #define for_each_child(n, c) \ c 190 scripts/dtc/dtc.h for_each_child_withdel(n, c) \ c 191 scripts/dtc/dtc.h if (!(c)->deleted) c 302 scripts/dtc/fdtget.c int c = getopt(argc, argv, "d:hlpt:"); c 303 scripts/dtc/fdtget.c if (c == -1) c 306 scripts/dtc/fdtget.c switch (c) { c 295 scripts/dtc/fdtput.c int c = getopt(argc, argv, "chpt:v"); c 296 scripts/dtc/fdtput.c if (c == -1) c 308 scripts/dtc/fdtput.c switch (c) { c 16 scripts/dtc/libfdt/fdt_addresses.c const fdt32_t *c; c 20 scripts/dtc/libfdt/fdt_addresses.c c = fdt_getprop(fdt, nodeoffset, name, &len); c 21 scripts/dtc/libfdt/fdt_addresses.c if (!c) c 24 scripts/dtc/libfdt/fdt_addresses.c if (len != sizeof(*c)) c 27 scripts/dtc/libfdt/fdt_addresses.c val = fdt32_to_cpu(*c); c 451 scripts/dtc/livetree.c struct node *c; c 463 scripts/dtc/livetree.c for_each_child(tree, c) { c 464 scripts/dtc/livetree.c prop = get_property_by_label(c, label, node); c 478 scripts/dtc/livetree.c struct node *c; c 490 scripts/dtc/livetree.c for_each_child(tree, c) { c 491 scripts/dtc/livetree.c m = get_marker_label(c, label, node, prop); c 770 scripts/dtc/livetree.c struct node *c; c 774 scripts/dtc/livetree.c for_each_child_withdel(node, c) c 775 scripts/dtc/livetree.c sort_node(c); c 812 scripts/dtc/livetree.c struct node *c; c 817 scripts/dtc/livetree.c for_each_child(node, c) c 818 scripts/dtc/livetree.c if (any_label_tree(dti, c)) c 829 scripts/dtc/livetree.c struct node *c; c 861 scripts/dtc/livetree.c for_each_child(node, c) c 862 scripts/dtc/livetree.c generate_label_tree_internal(dti, an, c, allocph); c 867 scripts/dtc/livetree.c struct node *c; c 879 scripts/dtc/livetree.c for_each_child(node, c) { c 880 scripts/dtc/livetree.c if (any_fixup_tree(dti, c)) c 912 scripts/dtc/livetree.c struct node *c; c 926 scripts/dtc/livetree.c for_each_child(node, c) c 927 scripts/dtc/livetree.c generate_fixups_tree_internal(dti, fn, c); c 932 scripts/dtc/livetree.c struct node *c; c 944 scripts/dtc/livetree.c for_each_child(node, c) { c 945 scripts/dtc/livetree.c if (any_local_fixup_tree(dti, c)) c 993 scripts/dtc/livetree.c struct node *c; c 1007 scripts/dtc/livetree.c for_each_child(node, c) c 1008 scripts/dtc/livetree.c generate_local_fixups_tree_internal(dti, lfn, c); c 42 scripts/dtc/treesource.c static bool isstring(char c) c 44 scripts/dtc/treesource.c return (isprint((unsigned char)c) c 45 scripts/dtc/treesource.c || (c == '\0') c 46 scripts/dtc/treesource.c || strchr("\a\b\t\n\v\f\r", c)); c 60 scripts/dtc/treesource.c char c = *s++; c 61 scripts/dtc/treesource.c switch (c) { c 93 scripts/dtc/treesource.c if (isprint((unsigned char)c)) c 94 scripts/dtc/treesource.c fprintf(f, "%c", c); c 96 scripts/dtc/treesource.c fprintf(f, "\\x%02"PRIx8, c); c 185 scripts/dtc/util.c char c = s[*i]; c 189 scripts/dtc/util.c switch (c) { c 227 scripts/dtc/util.c val = c; c 62 scripts/gcc-plugins/randomize_layout_plugin.c partial_name_hash(unsigned long c, unsigned long prevhash) c 64 scripts/gcc-plugins/randomize_layout_plugin.c return (prevhash + (c << 4) + (c >> 4)) * 11; c 142 scripts/gcc-plugins/randomize_layout_plugin.c typedef struct ranctx { u64 a; u64 b; u64 c; u64 d; } ranctx; c 147 scripts/gcc-plugins/randomize_layout_plugin.c x->a = x->b ^ rot(x->c, 13); c 148 scripts/gcc-plugins/randomize_layout_plugin.c x->b = x->c + rot(x->d, 37); c 149 scripts/gcc-plugins/randomize_layout_plugin.c x->c = x->d + e; c 159 scripts/gcc-plugins/randomize_layout_plugin.c x->c = seed[2]; c 120 scripts/genksyms/genksyms.c static unsigned long partial_crc32_one(unsigned char c, unsigned long crc) c 122 scripts/genksyms/genksyms.c return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8); c 414 scripts/genksyms/genksyms.c int c, in_string = 0; c 416 scripts/genksyms/genksyms.c while ((c = fgetc(f)) != EOF) { c 417 scripts/genksyms/genksyms.c if (!in_string && c == ' ') { c 421 scripts/genksyms/genksyms.c } else if (c == '"') { c 423 scripts/genksyms/genksyms.c } else if (c == '\n') { c 426 scripts/genksyms/genksyms.c ungetc(c, f); c 433 scripts/genksyms/genksyms.c *node.string++ = c; c 302 scripts/kallsyms.c int c, rlen, total=0; c 305 scripts/kallsyms.c c = *data; c 308 scripts/kallsyms.c if (best_table[c][0]==c && best_table_len[c]==1) { c 309 scripts/kallsyms.c *result++ = c; c 313 scripts/kallsyms.c rlen = expand_symbol(best_table[c], best_table_len[c], result); c 602 scripts/kallsyms.c unsigned int i, j, c; c 606 scripts/kallsyms.c c = table[i].sym[j]; c 607 scripts/kallsyms.c best_table[c][0]=c; c 608 scripts/kallsyms.c best_table_len[c]=1; c 133 scripts/kconfig/confdata.c char *d, c; c 142 scripts/kconfig/confdata.c while ((c = *s++)) c 143 scripts/kconfig/confdata.c *d++ = (c == '_') ? '/' : tolower(c); c 296 scripts/kconfig/confdata.c static int add_byte(int c, char **lineptr, size_t slen, size_t *n) c 311 scripts/kconfig/confdata.c (*lineptr)[slen] = c; c 322 scripts/kconfig/confdata.c int c = getc(stream); c 324 scripts/kconfig/confdata.c switch (c) { c 326 scripts/kconfig/confdata.c if (add_byte(c, &line, slen, n) < 0) c 338 scripts/kconfig/confdata.c if (add_byte(c, &line, slen, n) < 0) c 521 scripts/kconfig/lxdialog/util.c int i, in_paren = 0, c; c 524 scripts/kconfig/lxdialog/util.c c = tolower(string[i]); c 526 scripts/kconfig/lxdialog/util.c if (strchr("<[(", c)) c 528 scripts/kconfig/lxdialog/util.c if (strchr(">])", c) && in_paren > 0) c 531 scripts/kconfig/lxdialog/util.c if ((!in_paren) && isalpha(c) && strchr(exempt, c) == 0) c 288 scripts/kconfig/mconf.c static int show_textbox_ext(const char *title, char *text, int r, int c, c 291 scripts/kconfig/mconf.c static void show_textbox(const char *title, const char *text, int r, int c); c 754 scripts/kconfig/mconf.c static int show_textbox_ext(const char *title, char *text, int r, int c, int c 759 scripts/kconfig/mconf.c return dialog_textbox(title, text, r, c, keys, vscroll, hscroll, c 763 scripts/kconfig/mconf.c static void show_textbox(const char *title, const char *text, int r, int c) c 765 scripts/kconfig/mconf.c show_textbox_ext(title, (char *) text, r, c, (int []) {0}, NULL, NULL, c 1023 scripts/kconfig/nconf.c char c = (char) key; c 1037 scripts/kconfig/nconf.c if (isalnum(c) || isgraph(c) || c == ' ') { c 1038 scripts/kconfig/nconf.c state->pattern[strlen(state->pattern)] = c; c 495 scripts/kconfig/preprocess.c static char *__expand_string(const char **str, bool (*is_end)(char c), c 539 scripts/kconfig/preprocess.c static bool is_end_of_str(char c) c 541 scripts/kconfig/preprocess.c return !c; c 559 scripts/kconfig/preprocess.c static bool is_end_of_token(char c) c 561 scripts/kconfig/preprocess.c return !(isalnum(c) || c == '_' || c == '-'); c 233 scripts/mod/file2alias.c unsigned long long c, dec = 0; c 243 scripts/mod/file2alias.c c = (*bcd >> (i << 2)) & 0xf; c 244 scripts/mod/file2alias.c c = c > 9 ? 9 : c; /* force to bcd just in case */ c 246 scripts/mod/file2alias.c c = c * 10; c 247 scripts/mod/file2alias.c dec += c; c 256 scripts/mod/file2alias.c for (c=1,j=0 ; j < i ; j++) c 257 scripts/mod/file2alias.c c = c * 10; c 258 scripts/mod/file2alias.c c = (dec / c) % 10; c 259 scripts/mod/file2alias.c *bcd += c << (i << 2); c 11 scripts/mod/mk_elfconfig.c union { short s; char c[2]; } endian_test; c 49 scripts/mod/mk_elfconfig.c if (memcmp(endian_test.c, "\x01\x02", 2) == 0) c 51 scripts/mod/mk_elfconfig.c else if (memcmp(endian_test.c, "\x02\x01", 2) == 0) c 67 scripts/mod/sumversion.c #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) c 68 scripts/mod/sumversion.c #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (uint32_t)0x5A827999,s)) c 69 scripts/mod/sumversion.c #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (uint32_t)0x6ED9EBA1,s)) c 90 scripts/mod/sumversion.c uint32_t a, b, c, d; c 94 scripts/mod/sumversion.c c = hash[2]; c 97 scripts/mod/sumversion.c ROUND1(a, b, c, d, in[0], 3); c 98 scripts/mod/sumversion.c ROUND1(d, a, b, c, in[1], 7); c 99 scripts/mod/sumversion.c ROUND1(c, d, a, b, in[2], 11); c 100 scripts/mod/sumversion.c ROUND1(b, c, d, a, in[3], 19); c 101 scripts/mod/sumversion.c ROUND1(a, b, c, d, in[4], 3); c 102 scripts/mod/sumversion.c ROUND1(d, a, b, c, in[5], 7); c 103 scripts/mod/sumversion.c ROUND1(c, d, a, b, in[6], 11); c 104 scripts/mod/sumversion.c ROUND1(b, c, d, a, in[7], 19); c 105 scripts/mod/sumversion.c ROUND1(a, b, c, d, in[8], 3); c 106 scripts/mod/sumversion.c ROUND1(d, a, b, c, in[9], 7); c 107 scripts/mod/sumversion.c ROUND1(c, d, a, b, in[10], 11); c 108 scripts/mod/sumversion.c ROUND1(b, c, d, a, in[11], 19); c 109 scripts/mod/sumversion.c ROUND1(a, b, c, d, in[12], 3); c 110 scripts/mod/sumversion.c ROUND1(d, a, b, c, in[13], 7); c 111 scripts/mod/sumversion.c ROUND1(c, d, a, b, in[14], 11); c 112 scripts/mod/sumversion.c ROUND1(b, c, d, a, in[15], 19); c 114 scripts/mod/sumversion.c ROUND2(a, b, c, d,in[ 0], 3); c 115 scripts/mod/sumversion.c ROUND2(d, a, b, c, in[4], 5); c 116 scripts/mod/sumversion.c ROUND2(c, d, a, b, in[8], 9); c 117 scripts/mod/sumversion.c ROUND2(b, c, d, a, in[12], 13); c 118 scripts/mod/sumversion.c ROUND2(a, b, c, d, in[1], 3); c 119 scripts/mod/sumversion.c ROUND2(d, a, b, c, in[5], 5); c 120 scripts/mod/sumversion.c ROUND2(c, d, a, b, in[9], 9); c 121 scripts/mod/sumversion.c ROUND2(b, c, d, a, in[13], 13); c 122 scripts/mod/sumversion.c ROUND2(a, b, c, d, in[2], 3); c 123 scripts/mod/sumversion.c ROUND2(d, a, b, c, in[6], 5); c 124 scripts/mod/sumversion.c ROUND2(c, d, a, b, in[10], 9); c 125 scripts/mod/sumversion.c ROUND2(b, c, d, a, in[14], 13); c 126 scripts/mod/sumversion.c ROUND2(a, b, c, d, in[3], 3); c 127 scripts/mod/sumversion.c ROUND2(d, a, b, c, in[7], 5); c 128 scripts/mod/sumversion.c ROUND2(c, d, a, b, in[11], 9); c 129 scripts/mod/sumversion.c ROUND2(b, c, d, a, in[15], 13); c 131 scripts/mod/sumversion.c ROUND3(a, b, c, d,in[ 0], 3); c 132 scripts/mod/sumversion.c ROUND3(d, a, b, c, in[8], 9); c 133 scripts/mod/sumversion.c ROUND3(c, d, a, b, in[4], 11); c 134 scripts/mod/sumversion.c ROUND3(b, c, d, a, in[12], 15); c 135 scripts/mod/sumversion.c ROUND3(a, b, c, d, in[2], 3); c 136 scripts/mod/sumversion.c ROUND3(d, a, b, c, in[10], 9); c 137 scripts/mod/sumversion.c ROUND3(c, d, a, b, in[6], 11); c 138 scripts/mod/sumversion.c ROUND3(b, c, d, a, in[14], 15); c 139 scripts/mod/sumversion.c ROUND3(a, b, c, d, in[1], 3); c 140 scripts/mod/sumversion.c ROUND3(d, a, b, c, in[9], 9); c 141 scripts/mod/sumversion.c ROUND3(c, d, a, b, in[5], 11); c 142 scripts/mod/sumversion.c ROUND3(b, c, d, a, in[13], 15); c 143 scripts/mod/sumversion.c ROUND3(a, b, c, d, in[3], 3); c 144 scripts/mod/sumversion.c ROUND3(d, a, b, c, in[11], 9); c 145 scripts/mod/sumversion.c ROUND3(c, d, a, b, in[7], 11); c 146 scripts/mod/sumversion.c ROUND3(b, c, d, a, in[15], 15); c 150 scripts/mod/sumversion.c hash[2] += c; c 225 scripts/mod/sumversion.c static inline void add_char(unsigned char c, struct md4_ctx *md) c 227 scripts/mod/sumversion.c md4_update(md, &c, 1); c 86 scripts/pnmtologo.c int c, val; c 90 scripts/pnmtologo.c c = fgetc(fp); c 91 scripts/pnmtologo.c if (c == EOF) c 93 scripts/pnmtologo.c if (c == '#') { c 96 scripts/pnmtologo.c c = fgetc(fp); c 97 scripts/pnmtologo.c if (c == EOF) c 99 scripts/pnmtologo.c } while (c != '\n'); c 101 scripts/pnmtologo.c } while (isspace(c)); c 105 scripts/pnmtologo.c while (isdigit(c)) { c 106 scripts/pnmtologo.c val = 10*val+c-'0'; c 112 scripts/pnmtologo.c c = fgetc(fp); c 113 scripts/pnmtologo.c if (c == EOF) c 208 scripts/pnmtologo.c static inline int is_black(struct color c) c 210 scripts/pnmtologo.c return c.red == 0 && c.green == 0 && c.blue == 0; c 213 scripts/pnmtologo.c static inline int is_white(struct color c) c 215 scripts/pnmtologo.c return c.red == 255 && c.green == 255 && c.blue == 255; c 218 scripts/pnmtologo.c static inline int is_gray(struct color c) c 220 scripts/pnmtologo.c return c.red == c.green && c.red == c.blue; c 623 scripts/recordmcount.c int c; c 626 scripts/recordmcount.c while ((c = getopt(argc, argv, "w")) >= 0) { c 627 scripts/recordmcount.c switch (c) { c 235 scripts/unifdef.c #define endsym(c) (!isalnum((unsigned char)c) && c != '_') c 366 scripts/unifdef.c const char *c = copyright; c 368 scripts/unifdef.c while (*++c != '$') c 369 scripts/unifdef.c if (*c == '\0') c 371 scripts/unifdef.c while (*++c != '$') c 372 scripts/unifdef.c putc(*c, stderr); c 398 security/apparmor/include/label.h struct aa_label *c; c 402 security/apparmor/include/label.h c = rcu_dereference(*l); c 403 security/apparmor/include/label.h } while (c && !kref_get_unless_zero(&c->count)); c 406 security/apparmor/include/label.h return c; c 129 security/apparmor/include/match.h const char c); c 275 security/apparmor/include/policy.h struct aa_profile *c; c 279 security/apparmor/include/policy.h c = rcu_dereference(*p); c 280 security/apparmor/include/policy.h } while (c && !kref_get_unless_zero(&c->label.count)); c 283 security/apparmor/include/policy.h return c; c 480 security/apparmor/match.c const char c) c 491 security/apparmor/match.c match_char(state, def, base, next, check, equiv[(u8) c]); c 493 security/apparmor/match.c match_char(state, def, base, next, check, (u8) c); c 494 security/apparmor/policy_unpack.c int c, j, pos, size2 = unpack_strdup(e, &str, NULL); c 506 security/apparmor/policy_unpack.c for (c = j = 0; j < size2 - 1; j++) { c 509 security/apparmor/policy_unpack.c c++; c 522 security/apparmor/policy_unpack.c if (c == 1) c 524 security/apparmor/policy_unpack.c else if (c > 1) c 526 security/apparmor/policy_unpack.c } else if (c) c 167 security/integrity/ima/ima_template.c char c; c 172 security/integrity/ima/ima_template.c c = template_fmt[i]; c 173 security/integrity/ima/ima_template.c if (c == '|') c 42 security/keys/keyctl_pkey.c char *c = params->info, *p, *q; c 45 security/keys/keyctl_pkey.c while ((p = strsep(&c, " \t"))) { c 130 security/keys/trusted.c unsigned char c; c 143 security/keys/trusted.c c = !!h3; c 167 security/keys/trusted.c TPM_NONCE_SIZE, h2, 1, &c, 0, 0); c 745 security/keys/trusted.c static int getoptions(char *c, struct trusted_key_payload *pay, c 749 security/keys/trusted.c char *p = c; c 765 security/keys/trusted.c while ((p = strsep(&c, " \t"))) { c 871 security/keys/trusted.c char *c; c 874 security/keys/trusted.c c = strsep(&datablob, " \t"); c 875 security/keys/trusted.c if (!c) c 877 security/keys/trusted.c key_cmd = match_token(c, key_tokens, args); c 881 security/keys/trusted.c c = strsep(&datablob, " \t"); c 882 security/keys/trusted.c if (!c) c 884 security/keys/trusted.c ret = kstrtol(c, 10, &keylen); c 895 security/keys/trusted.c c = strsep(&datablob, " \t"); c 896 security/keys/trusted.c if (!c) c 898 security/keys/trusted.c p->blob_len = strlen(c) / 2; c 901 security/keys/trusted.c ret = hex2bin(p->blob, c, p->blob_len); c 1570 security/security.c void security_cred_getsecid(const struct cred *c, u32 *secid) c 1573 security/security.c call_void_hook(cred_getsecid, c, secid); c 797 security/selinux/avc.c struct avc_callback_node *c; c 800 security/selinux/avc.c c = kmalloc(sizeof(*c), GFP_KERNEL); c 801 security/selinux/avc.c if (!c) { c 806 security/selinux/avc.c c->callback = callback; c 807 security/selinux/avc.c c->events = events; c 808 security/selinux/avc.c c->next = avc_callbacks; c 809 security/selinux/avc.c avc_callbacks = c; c 966 security/selinux/avc.c struct avc_callback_node *c; c 971 security/selinux/avc.c for (c = avc_callbacks; c; c = c->next) { c 972 security/selinux/avc.c if (c->events & AVC_CALLBACK_RESET) { c 973 security/selinux/avc.c tmprc = c->callback(AVC_CALLBACK_RESET); c 2611 security/selinux/hooks.c char c; c 2613 security/selinux/hooks.c for (len = 0; (c = s[len]) != '\0'; len++) { c 2614 security/selinux/hooks.c if (c == '"') c 2616 security/selinux/hooks.c if (c == ',' && !open_quote) c 2642 security/selinux/hooks.c char c = *p; c 2643 security/selinux/hooks.c if (c != '"') c 2644 security/selinux/hooks.c *q++ = c; c 3916 security/selinux/hooks.c static void selinux_cred_getsecid(const struct cred *c, u32 *secid) c 3918 security/selinux/hooks.c *secid = cred_sid(c); c 36 security/selinux/ss/context.h static inline void mls_context_init(struct context *c) c 38 security/selinux/ss/context.h memset(&c->range, 0, sizeof(c->range)); c 106 security/selinux/ss/context.h static inline void mls_context_destroy(struct context *c) c 108 security/selinux/ss/context.h ebitmap_destroy(&c->range.level[0].cat); c 109 security/selinux/ss/context.h ebitmap_destroy(&c->range.level[1].cat); c 110 security/selinux/ss/context.h mls_context_init(c); c 113 security/selinux/ss/context.h static inline void context_init(struct context *c) c 115 security/selinux/ss/context.h memset(c, 0, sizeof(*c)); c 142 security/selinux/ss/context.h static inline void context_destroy(struct context *c) c 144 security/selinux/ss/context.h c->user = c->role = c->type = 0; c 145 security/selinux/ss/context.h kfree(c->str); c 146 security/selinux/ss/context.h c->str = NULL; c 147 security/selinux/ss/context.h c->len = 0; c 148 security/selinux/ss/context.h mls_context_destroy(c); c 193 security/selinux/ss/mls.c int mls_context_isvalid(struct policydb *p, struct context *c) c 200 security/selinux/ss/mls.c if (!mls_range_isvalid(p, &c->range)) c 203 security/selinux/ss/mls.c if (c->role == OBJECT_R_VAL) c 209 security/selinux/ss/mls.c if (!c->user || c->user > p->p_users.nprim) c 211 security/selinux/ss/mls.c usrdatum = p->user_val_to_struct[c->user - 1]; c 212 security/selinux/ss/mls.c if (!mls_range_contains(usrdatum->range, c->range)) c 31 security/selinux/ss/mls.h int mls_context_isvalid(struct policydb *p, struct context *c); c 355 security/selinux/ss/policydb.c static void ocontext_destroy(struct ocontext *c, int i) c 357 security/selinux/ss/policydb.c if (!c) c 360 security/selinux/ss/policydb.c context_destroy(&c->context[0]); c 361 security/selinux/ss/policydb.c context_destroy(&c->context[1]); c 364 security/selinux/ss/policydb.c kfree(c->u.name); c 365 security/selinux/ss/policydb.c kfree(c); c 764 security/selinux/ss/policydb.c struct ocontext *c, *ctmp; c 788 security/selinux/ss/policydb.c c = p->ocontexts[i]; c 789 security/selinux/ss/policydb.c while (c) { c 790 security/selinux/ss/policydb.c ctmp = c; c 791 security/selinux/ss/policydb.c c = c->next; c 801 security/selinux/ss/policydb.c c = g->head; c 802 security/selinux/ss/policydb.c while (c) { c 803 security/selinux/ss/policydb.c ctmp = c; c 804 security/selinux/ss/policydb.c c = c->next; c 852 security/selinux/ss/policydb.c struct ocontext *head, *c; c 862 security/selinux/ss/policydb.c for (c = head; c; c = c->next) { c 864 security/selinux/ss/policydb.c if (!c->context[0].user) { c 866 security/selinux/ss/policydb.c c->u.name); c 870 security/selinux/ss/policydb.c if (c->sid[0] == SECSID_NULL || c->sid[0] > SECINITSID_NUM) { c 872 security/selinux/ss/policydb.c c->u.name); c 877 security/selinux/ss/policydb.c rc = sidtab_set_initial(s, c->sid[0], &c->context[0]); c 880 security/selinux/ss/policydb.c c->u.name); c 915 security/selinux/ss/policydb.c int policydb_context_isvalid(struct policydb *p, struct context *c) c 920 security/selinux/ss/policydb.c if (!c->role || c->role > p->p_roles.nprim) c 923 security/selinux/ss/policydb.c if (!c->user || c->user > p->p_users.nprim) c 926 security/selinux/ss/policydb.c if (!c->type || c->type > p->p_types.nprim) c 929 security/selinux/ss/policydb.c if (c->role != OBJECT_R_VAL) { c 933 security/selinux/ss/policydb.c role = p->role_val_to_struct[c->role - 1]; c 934 security/selinux/ss/policydb.c if (!role || !ebitmap_get_bit(&role->types, c->type - 1)) c 941 security/selinux/ss/policydb.c usrdatum = p->user_val_to_struct[c->user - 1]; c 945 security/selinux/ss/policydb.c if (!ebitmap_get_bit(&usrdatum->roles, c->role - 1)) c 950 security/selinux/ss/policydb.c if (!mls_context_isvalid(p, c)) c 1019 security/selinux/ss/policydb.c static int context_read_and_validate(struct context *c, c 1031 security/selinux/ss/policydb.c c->user = le32_to_cpu(buf[0]); c 1032 security/selinux/ss/policydb.c c->role = le32_to_cpu(buf[1]); c 1033 security/selinux/ss/policydb.c c->type = le32_to_cpu(buf[2]); c 1035 security/selinux/ss/policydb.c rc = mls_read_range_helper(&c->range, fp); c 1043 security/selinux/ss/policydb.c if (!policydb_context_isvalid(p, c)) { c 1045 security/selinux/ss/policydb.c context_destroy(c); c 1188 security/selinux/ss/policydb.c struct constraint_node *c, *lc; c 1196 security/selinux/ss/policydb.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 1197 security/selinux/ss/policydb.c if (!c) c 1201 security/selinux/ss/policydb.c lc->next = c; c 1203 security/selinux/ss/policydb.c *nodep = c; c 1208 security/selinux/ss/policydb.c c->permissions = le32_to_cpu(buf[0]); c 1220 security/selinux/ss/policydb.c c->expr = e; c 1274 security/selinux/ss/policydb.c lc = c; c 1960 security/selinux/ss/policydb.c struct ocontext *l, *c; c 2033 security/selinux/ss/policydb.c for (l = NULL, c = genfs->head; c; c 2034 security/selinux/ss/policydb.c l = c, c = c->next) { c 2036 security/selinux/ss/policydb.c if (!strcmp(newc->u.name, c->u.name) && c 2037 security/selinux/ss/policydb.c (!c->v.sclass || !newc->v.sclass || c 2038 security/selinux/ss/policydb.c newc->v.sclass == c->v.sclass)) { c 2040 security/selinux/ss/policydb.c genfs->fstype, c->u.name); c 2044 security/selinux/ss/policydb.c len2 = strlen(c->u.name); c 2049 security/selinux/ss/policydb.c newc->next = c; c 2075 security/selinux/ss/policydb.c struct ocontext *l, *c; c 2087 security/selinux/ss/policydb.c c = kzalloc(sizeof(*c), GFP_KERNEL); c 2088 security/selinux/ss/policydb.c if (!c) c 2091 security/selinux/ss/policydb.c l->next = c; c 2093 security/selinux/ss/policydb.c p->ocontexts[i] = c; c 2094 security/selinux/ss/policydb.c l = c; c 2102 security/selinux/ss/policydb.c c->sid[0] = le32_to_cpu(buf[0]); c 2103 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], p, fp); c 2114 security/selinux/ss/policydb.c rc = str_read(&c->u.name, GFP_KERNEL, fp, len); c 2118 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], p, fp); c 2121 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[1], p, fp); c 2129 security/selinux/ss/policydb.c c->u.port.protocol = le32_to_cpu(buf[0]); c 2130 security/selinux/ss/policydb.c c->u.port.low_port = le32_to_cpu(buf[1]); c 2131 security/selinux/ss/policydb.c c->u.port.high_port = le32_to_cpu(buf[2]); c 2132 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], p, fp); c 2140 security/selinux/ss/policydb.c c->u.node.addr = nodebuf[0]; /* network order */ c 2141 security/selinux/ss/policydb.c c->u.node.mask = nodebuf[1]; /* network order */ c 2142 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], p, fp); c 2152 security/selinux/ss/policydb.c c->v.behavior = le32_to_cpu(buf[0]); c 2154 security/selinux/ss/policydb.c if (c->v.behavior == SECURITY_FS_USE_MNTPOINT) c 2156 security/selinux/ss/policydb.c if (c->v.behavior > SECURITY_FS_USE_MAX) c 2160 security/selinux/ss/policydb.c rc = str_read(&c->u.name, GFP_KERNEL, fp, len); c 2164 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], p, fp); c 2175 security/selinux/ss/policydb.c c->u.node6.addr[k] = nodebuf[k]; c 2177 security/selinux/ss/policydb.c c->u.node6.mask[k] = nodebuf[k+4]; c 2178 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], p, fp); c 2191 security/selinux/ss/policydb.c c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]); c 2205 security/selinux/ss/policydb.c c->u.ibpkey.low_pkey = pkey_lo; c 2206 security/selinux/ss/policydb.c c->u.ibpkey.high_pkey = pkey_hi; c 2208 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], c 2223 security/selinux/ss/policydb.c rc = str_read(&c->u.ibendport.dev_name, GFP_KERNEL, fp, len); c 2233 security/selinux/ss/policydb.c c->u.ibendport.port = port; c 2235 security/selinux/ss/policydb.c rc = context_read_and_validate(&c->context[0], c 2709 security/selinux/ss/policydb.c static int context_write(struct policydb *p, struct context *c, c 2715 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(c->user); c 2716 security/selinux/ss/policydb.c buf[1] = cpu_to_le32(c->role); c 2717 security/selinux/ss/policydb.c buf[2] = cpu_to_le32(c->type); c 2723 security/selinux/ss/policydb.c rc = mls_write_range_helper(&c->range, fp); c 2809 security/selinux/ss/policydb.c struct constraint_node *c; c 2815 security/selinux/ss/policydb.c for (c = node; c; c = c->next) { c 2817 security/selinux/ss/policydb.c for (e = c->expr; e; e = e->next) c 2819 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(c->permissions); c 2824 security/selinux/ss/policydb.c for (e = c->expr; e; e = e->next) { c 2860 security/selinux/ss/policydb.c struct constraint_node *c; c 2873 security/selinux/ss/policydb.c for (c = cladatum->constraints; c; c = c->next) c 2909 security/selinux/ss/policydb.c for (c = cladatum->validatetrans; c; c = c->next) c 3083 security/selinux/ss/policydb.c struct ocontext *c; c 3086 security/selinux/ss/policydb.c for (c = p->ocontexts[i]; c; c = c->next) c 3092 security/selinux/ss/policydb.c for (c = p->ocontexts[i]; c; c = c->next) { c 3095 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(c->sid[0]); c 3099 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3105 security/selinux/ss/policydb.c len = strlen(c->u.name); c 3110 security/selinux/ss/policydb.c rc = put_entry(c->u.name, 1, len, fp); c 3113 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3116 security/selinux/ss/policydb.c rc = context_write(p, &c->context[1], fp); c 3121 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(c->u.port.protocol); c 3122 security/selinux/ss/policydb.c buf[1] = cpu_to_le32(c->u.port.low_port); c 3123 security/selinux/ss/policydb.c buf[2] = cpu_to_le32(c->u.port.high_port); c 3127 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3132 security/selinux/ss/policydb.c nodebuf[0] = c->u.node.addr; /* network order */ c 3133 security/selinux/ss/policydb.c nodebuf[1] = c->u.node.mask; /* network order */ c 3137 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3142 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(c->v.behavior); c 3143 security/selinux/ss/policydb.c len = strlen(c->u.name); c 3148 security/selinux/ss/policydb.c rc = put_entry(c->u.name, 1, len, fp); c 3151 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3157 security/selinux/ss/policydb.c nodebuf[j] = c->u.node6.addr[j]; /* network order */ c 3159 security/selinux/ss/policydb.c nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */ c 3163 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3169 security/selinux/ss/policydb.c prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix); c 3175 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey); c 3176 security/selinux/ss/policydb.c buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey); c 3181 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3186 security/selinux/ss/policydb.c len = strlen(c->u.ibendport.dev_name); c 3188 security/selinux/ss/policydb.c buf[1] = cpu_to_le32(c->u.ibendport.port); c 3192 security/selinux/ss/policydb.c rc = put_entry(c->u.ibendport.dev_name, 1, len, fp); c 3195 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 3208 security/selinux/ss/policydb.c struct ocontext *c; c 3230 security/selinux/ss/policydb.c for (c = genfs->head; c; c = c->next) c 3236 security/selinux/ss/policydb.c for (c = genfs->head; c; c = c->next) { c 3237 security/selinux/ss/policydb.c len = strlen(c->u.name); c 3242 security/selinux/ss/policydb.c rc = put_entry(c->u.name, 1, len, fp); c 3245 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(c->v.sclass); c 3249 security/selinux/ss/policydb.c rc = context_write(p, &c->context[0], fp); c 313 security/selinux/ss/policydb.h extern int policydb_context_isvalid(struct policydb *p, struct context *c); c 269 security/selinux/ss/services.c struct context *c; c 394 security/selinux/ss/services.c c = scontext; c 396 security/selinux/ss/services.c c = tcontext; c 398 security/selinux/ss/services.c c = xcontext; c 399 security/selinux/ss/services.c if (!c) { c 405 security/selinux/ss/services.c val1 = c->user; c 407 security/selinux/ss/services.c val1 = c->role; c 409 security/selinux/ss/services.c val1 = c->type; c 2264 security/selinux/ss/services.c struct ocontext *c; c 2272 security/selinux/ss/services.c c = policydb->ocontexts[OCON_PORT]; c 2273 security/selinux/ss/services.c while (c) { c 2274 security/selinux/ss/services.c if (c->u.port.protocol == protocol && c 2275 security/selinux/ss/services.c c->u.port.low_port <= port && c 2276 security/selinux/ss/services.c c->u.port.high_port >= port) c 2278 security/selinux/ss/services.c c = c->next; c 2281 security/selinux/ss/services.c if (c) { c 2282 security/selinux/ss/services.c if (!c->sid[0]) { c 2284 security/selinux/ss/services.c &c->context[0], c 2285 security/selinux/ss/services.c &c->sid[0]); c 2289 security/selinux/ss/services.c *out_sid = c->sid[0]; c 2310 security/selinux/ss/services.c struct ocontext *c; c 2318 security/selinux/ss/services.c c = policydb->ocontexts[OCON_IBPKEY]; c 2319 security/selinux/ss/services.c while (c) { c 2320 security/selinux/ss/services.c if (c->u.ibpkey.low_pkey <= pkey_num && c 2321 security/selinux/ss/services.c c->u.ibpkey.high_pkey >= pkey_num && c 2322 security/selinux/ss/services.c c->u.ibpkey.subnet_prefix == subnet_prefix) c 2325 security/selinux/ss/services.c c = c->next; c 2328 security/selinux/ss/services.c if (c) { c 2329 security/selinux/ss/services.c if (!c->sid[0]) { c 2331 security/selinux/ss/services.c &c->context[0], c 2332 security/selinux/ss/services.c &c->sid[0]); c 2336 security/selinux/ss/services.c *out_sid = c->sid[0]; c 2356 security/selinux/ss/services.c struct ocontext *c; c 2364 security/selinux/ss/services.c c = policydb->ocontexts[OCON_IBENDPORT]; c 2365 security/selinux/ss/services.c while (c) { c 2366 security/selinux/ss/services.c if (c->u.ibendport.port == port_num && c 2367 security/selinux/ss/services.c !strncmp(c->u.ibendport.dev_name, c 2372 security/selinux/ss/services.c c = c->next; c 2375 security/selinux/ss/services.c if (c) { c 2376 security/selinux/ss/services.c if (!c->sid[0]) { c 2378 security/selinux/ss/services.c &c->context[0], c 2379 security/selinux/ss/services.c &c->sid[0]); c 2383 security/selinux/ss/services.c *out_sid = c->sid[0]; c 2403 security/selinux/ss/services.c struct ocontext *c; c 2410 security/selinux/ss/services.c c = policydb->ocontexts[OCON_NETIF]; c 2411 security/selinux/ss/services.c while (c) { c 2412 security/selinux/ss/services.c if (strcmp(name, c->u.name) == 0) c 2414 security/selinux/ss/services.c c = c->next; c 2417 security/selinux/ss/services.c if (c) { c 2418 security/selinux/ss/services.c if (!c->sid[0] || !c->sid[1]) { c 2420 security/selinux/ss/services.c &c->context[0], c 2421 security/selinux/ss/services.c &c->sid[0]); c 2425 security/selinux/ss/services.c &c->context[1], c 2426 security/selinux/ss/services.c &c->sid[1]); c 2430 security/selinux/ss/services.c *if_sid = c->sid[0]; c 2468 security/selinux/ss/services.c struct ocontext *c; c 2485 security/selinux/ss/services.c c = policydb->ocontexts[OCON_NODE]; c 2486 security/selinux/ss/services.c while (c) { c 2487 security/selinux/ss/services.c if (c->u.node.addr == (addr & c->u.node.mask)) c 2489 security/selinux/ss/services.c c = c->next; c 2498 security/selinux/ss/services.c c = policydb->ocontexts[OCON_NODE6]; c 2499 security/selinux/ss/services.c while (c) { c 2500 security/selinux/ss/services.c if (match_ipv6_addrmask(addrp, c->u.node6.addr, c 2501 security/selinux/ss/services.c c->u.node6.mask)) c 2503 security/selinux/ss/services.c c = c->next; c 2513 security/selinux/ss/services.c if (c) { c 2514 security/selinux/ss/services.c if (!c->sid[0]) { c 2516 security/selinux/ss/services.c &c->context[0], c 2517 security/selinux/ss/services.c &c->sid[0]); c 2521 security/selinux/ss/services.c *out_sid = c->sid[0]; c 2679 security/selinux/ss/services.c struct ocontext *c; c 2698 security/selinux/ss/services.c for (c = genfs->head; c; c = c->next) { c 2699 security/selinux/ss/services.c len = strlen(c->u.name); c 2700 security/selinux/ss/services.c if ((!c->v.sclass || sclass == c->v.sclass) && c 2701 security/selinux/ss/services.c (strncmp(c->u.name, path, len) == 0)) c 2706 security/selinux/ss/services.c if (!c) c 2709 security/selinux/ss/services.c if (!c->sid[0]) { c 2710 security/selinux/ss/services.c rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]); c 2715 security/selinux/ss/services.c *sid = c->sid[0]; c 2754 security/selinux/ss/services.c struct ocontext *c; c 2763 security/selinux/ss/services.c c = policydb->ocontexts[OCON_FSUSE]; c 2764 security/selinux/ss/services.c while (c) { c 2765 security/selinux/ss/services.c if (strcmp(fstype, c->u.name) == 0) c 2767 security/selinux/ss/services.c c = c->next; c 2770 security/selinux/ss/services.c if (c) { c 2771 security/selinux/ss/services.c sbsec->behavior = c->v.behavior; c 2772 security/selinux/ss/services.c if (!c->sid[0]) { c 2773 security/selinux/ss/services.c rc = sidtab_context_to_sid(sidtab, &c->context[0], c 2774 security/selinux/ss/services.c &c->sid[0]); c 2778 security/selinux/ss/services.c sbsec->sid = c->sid[0]; c 52 security/tomoyo/audit.c const unsigned char c = kaddr[offset++]; c 59 security/tomoyo/audit.c } else if (c == '\\') { c 62 security/tomoyo/audit.c } else if (c > ' ' && c < 127) { c 63 security/tomoyo/audit.c *cp++ = c; c 64 security/tomoyo/audit.c } else if (!c) { c 70 security/tomoyo/audit.c *cp++ = (c >> 6) + '0'; c 71 security/tomoyo/audit.c *cp++ = ((c >> 3) & 7) + '0'; c 72 security/tomoyo/audit.c *cp++ = (c & 7) + '0'; c 74 security/tomoyo/audit.c if (c) c 610 security/tomoyo/common.c const u8 c = tomoyo_index2category[i]; c 612 security/tomoyo/common.c tomoyo_category_keywords[c]; c 2673 security/tomoyo/common.c char c; c 2689 security/tomoyo/common.c if (get_user(c, buffer)) { c 2695 security/tomoyo/common.c cp0[head->w.avail++] = c; c 2696 security/tomoyo/common.c if (c != '\n') c 138 security/tomoyo/condition.c const unsigned char c = kaddr[offset++]; c 140 security/tomoyo/condition.c if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) { c 141 security/tomoyo/condition.c if (c == '\\') { c 144 security/tomoyo/condition.c } else if (c > ' ' && c < 127) { c 145 security/tomoyo/condition.c arg_ptr[arg_len++] = c; c 148 security/tomoyo/condition.c arg_ptr[arg_len++] = (c >> 6) + '0'; c 150 security/tomoyo/condition.c ((c >> 3) & 7) + '0'; c 151 security/tomoyo/condition.c arg_ptr[arg_len++] = (c & 7) + '0'; c 156 security/tomoyo/condition.c if (c) c 652 security/tomoyo/domain.c const unsigned char c = env_page.data[offset++]; c 654 security/tomoyo/domain.c if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) { c 655 security/tomoyo/domain.c if (c == '=') { c 657 security/tomoyo/domain.c } else if (c == '\\') { c 660 security/tomoyo/domain.c } else if (c > ' ' && c < 127) { c 661 security/tomoyo/domain.c arg_ptr[arg_len++] = c; c 664 security/tomoyo/domain.c arg_ptr[arg_len++] = (c >> 6) + '0'; c 666 security/tomoyo/domain.c = ((c >> 3) & 7) + '0'; c 667 security/tomoyo/domain.c arg_ptr[arg_len++] = (c & 7) + '0'; c 672 security/tomoyo/domain.c if (c) c 33 security/tomoyo/realpath.c const unsigned char c = p[i]; c 35 security/tomoyo/realpath.c if (c == '\\') c 37 security/tomoyo/realpath.c else if (c > ' ' && c < 127) c 50 security/tomoyo/realpath.c const unsigned char c = p[i]; c 52 security/tomoyo/realpath.c if (c == '\\') { c 55 security/tomoyo/realpath.c } else if (c > ' ' && c < 127) { c 56 security/tomoyo/realpath.c *cp++ = c; c 59 security/tomoyo/realpath.c *cp++ = (c >> 6) + '0'; c 60 security/tomoyo/realpath.c *cp++ = ((c >> 3) & 7) + '0'; c 61 security/tomoyo/realpath.c *cp++ = (c & 7) + '0'; c 190 security/tomoyo/util.c char c = *(cp + 1); c 192 security/tomoyo/util.c if (c == 'x' || c == 'X') { c 195 security/tomoyo/util.c } else if (c >= '0' && c <= '7') { c 328 security/tomoyo/util.c static inline bool tomoyo_alphabet_char(const char c) c 330 security/tomoyo/util.c return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); c 354 security/tomoyo/util.c static inline bool tomoyo_valid(const unsigned char c) c 356 security/tomoyo/util.c return c > ' ' && c < 127; c 366 security/tomoyo/util.c static inline bool tomoyo_invalid(const unsigned char c) c 368 security/tomoyo/util.c return c && (c <= ' ' || c >= 127); c 437 security/tomoyo/util.c unsigned char c; c 444 security/tomoyo/util.c c = *string++; c 445 security/tomoyo/util.c if (c == '\\') { c 448 security/tomoyo/util.c c = *string++; c 449 security/tomoyo/util.c switch (c) { c 485 security/tomoyo/util.c c = tomoyo_make_byte(c, d, e); c 486 security/tomoyo/util.c if (c <= ' ' || c >= 127) c 490 security/tomoyo/util.c } else if (in_repetition && c == '/') { c 492 security/tomoyo/util.c } else if (c <= ' ' || c >= 127) { c 615 security/tomoyo/util.c char c; c 620 security/tomoyo/util.c while ((c = *filename++) != '\0') { c 621 security/tomoyo/util.c if (c != '\\') { c 625 security/tomoyo/util.c c = *filename++; c 626 security/tomoyo/util.c switch (c) { c 634 security/tomoyo/util.c c = *filename++; c 635 security/tomoyo/util.c if (c < '0' || c > '7') c 637 security/tomoyo/util.c c = *filename++; c 638 security/tomoyo/util.c if (c < '0' || c > '7') c 682 security/tomoyo/util.c char c; c 691 security/tomoyo/util.c c = *filename; c 695 security/tomoyo/util.c if (c == '/') { c 697 security/tomoyo/util.c } else if (c == '\\') { c 707 security/tomoyo/util.c if (c != '\\') c 713 security/tomoyo/util.c if (!isdigit(c)) c 717 security/tomoyo/util.c if (!isxdigit(c)) c 721 security/tomoyo/util.c if (!tomoyo_alphabet_char(c)) c 728 security/tomoyo/util.c if (c == '\\' && tomoyo_byte_range(filename + 1) c 742 security/tomoyo/util.c c = filename[i]; c 743 security/tomoyo/util.c if (c == '.' && *pattern == '@') c 745 security/tomoyo/util.c if (c != '\\') c 757 security/tomoyo/util.c c = *pattern; c 758 security/tomoyo/util.c if (c == '$') { c 761 security/tomoyo/util.c } else if (c == 'X') { c 764 security/tomoyo/util.c } else if (c == 'A') { c 62 sound/aoa/codecs/onyx.c #define codec_to_onyx(c) container_of(c, struct onyx, codec) c 312 sound/aoa/codecs/onyx.c u8 c; c 315 sound/aoa/codecs/onyx.c onyx_read_register(onyx, ONYX_REG_DAC_CONTROL, &c); c 318 sound/aoa/codecs/onyx.c ucontrol->value.integer.value[0] = !(c & ONYX_MUTE_LEFT); c 319 sound/aoa/codecs/onyx.c ucontrol->value.integer.value[1] = !(c & ONYX_MUTE_RIGHT); c 328 sound/aoa/codecs/onyx.c u8 v = 0, c = 0; c 336 sound/aoa/codecs/onyx.c c = v; c 337 sound/aoa/codecs/onyx.c c &= ~(ONYX_MUTE_RIGHT | ONYX_MUTE_LEFT); c 339 sound/aoa/codecs/onyx.c c |= ONYX_MUTE_LEFT; c 341 sound/aoa/codecs/onyx.c c |= ONYX_MUTE_RIGHT; c 342 sound/aoa/codecs/onyx.c err = onyx_write_register(onyx, ONYX_REG_DAC_CONTROL, c); c 347 sound/aoa/codecs/onyx.c return !err ? (v != c) : err; c 369 sound/aoa/codecs/onyx.c u8 c; c 376 sound/aoa/codecs/onyx.c onyx_read_register(onyx, address, &c); c 379 sound/aoa/codecs/onyx.c ucontrol->value.integer.value[0] = !!(c & mask) ^ polarity; c 388 sound/aoa/codecs/onyx.c u8 v = 0, c = 0; c 403 sound/aoa/codecs/onyx.c c = v; c 404 sound/aoa/codecs/onyx.c c &= ~(mask); c 406 sound/aoa/codecs/onyx.c c |= mask; c 407 sound/aoa/codecs/onyx.c err = onyx_write_register(onyx, address, c); c 412 sound/aoa/codecs/onyx.c return !err ? (v != c) : err; c 26 sound/aoa/codecs/toonie.c #define codec_to_toonie(c) container_of(c, struct toonie, codec) c 23 sound/aoa/core/core.c static int attach_codec_to_fabric(struct aoa_codec *c) c 27 sound/aoa/core/core.c if (!try_module_get(c->owner)) c 32 sound/aoa/core/core.c err = fabric->found_codec(c); c 34 sound/aoa/core/core.c module_put(c->owner); c 36 sound/aoa/core/core.c c->name); c 39 sound/aoa/core/core.c c->fabric = fabric; c 42 sound/aoa/core/core.c if (c->init) c 43 sound/aoa/core/core.c err = c->init(c); c 45 sound/aoa/core/core.c printk(KERN_ERR "snd-aoa: codec %s didn't init\n", c->name); c 46 sound/aoa/core/core.c c->fabric = NULL; c 48 sound/aoa/core/core.c fabric->remove_codec(c); c 49 sound/aoa/core/core.c module_put(c->owner); c 53 sound/aoa/core/core.c fabric->attached_codec(c); c 86 sound/aoa/core/core.c struct aoa_codec *c; c 107 sound/aoa/core/core.c list_for_each_entry(c, &codec_list, list) { c 108 sound/aoa/core/core.c if (c->fabric != fabric) c 109 sound/aoa/core/core.c attach_codec_to_fabric(c); c 117 sound/aoa/core/core.c struct aoa_codec *c; c 122 sound/aoa/core/core.c list_for_each_entry(c, &codec_list, list) { c 123 sound/aoa/core/core.c if (c->fabric) c 124 sound/aoa/core/core.c aoa_fabric_unlink_codec(c); c 855 sound/aoa/fabrics/layout.c struct snd_kcontrol *detected, *c; c 883 sound/aoa/fabrics/layout.c c = ldev->headphone_ctrl; c 884 sound/aoa/fabrics/layout.c if (c) c 885 sound/aoa/fabrics/layout.c snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &c->id); c 886 sound/aoa/fabrics/layout.c c = ldev->speaker_ctrl; c 887 sound/aoa/fabrics/layout.c if (c) c 888 sound/aoa/fabrics/layout.c snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &c->id); c 889 sound/aoa/fabrics/layout.c c = ldev->lineout_ctrl; c 890 sound/aoa/fabrics/layout.c if (c) c 891 sound/aoa/fabrics/layout.c snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &c->id); c 21 sound/aoa/soundbus/i2sbus/control.c int i2sbus_control_init(struct macio_dev* dev, struct i2sbus_control **c) c 23 sound/aoa/soundbus/i2sbus/control.c *c = kzalloc(sizeof(struct i2sbus_control), GFP_KERNEL); c 24 sound/aoa/soundbus/i2sbus/control.c if (!*c) c 27 sound/aoa/soundbus/i2sbus/control.c INIT_LIST_HEAD(&(*c)->list); c 29 sound/aoa/soundbus/i2sbus/control.c (*c)->macio = dev->bus->chip; c 33 sound/aoa/soundbus/i2sbus/control.c void i2sbus_control_destroy(struct i2sbus_control *c) c 35 sound/aoa/soundbus/i2sbus/control.c kfree(c); c 39 sound/aoa/soundbus/i2sbus/control.c int i2sbus_control_add_dev(struct i2sbus_control *c, c 68 sound/aoa/soundbus/i2sbus/control.c list_add(&i2sdev->item, &c->list); c 73 sound/aoa/soundbus/i2sbus/control.c void i2sbus_control_remove_dev(struct i2sbus_control *c, c 78 sound/aoa/soundbus/i2sbus/control.c if (list_empty(&c->list)) c 79 sound/aoa/soundbus/i2sbus/control.c i2sbus_control_destroy(c); c 82 sound/aoa/soundbus/i2sbus/control.c int i2sbus_control_enable(struct i2sbus_control *c, c 86 sound/aoa/soundbus/i2sbus/control.c struct macio_chip *macio = c->macio; c 109 sound/aoa/soundbus/i2sbus/control.c int i2sbus_control_cell(struct i2sbus_control *c, c 114 sound/aoa/soundbus/i2sbus/control.c struct macio_chip *macio = c->macio; c 152 sound/aoa/soundbus/i2sbus/control.c int i2sbus_control_clock(struct i2sbus_control *c, c 157 sound/aoa/soundbus/i2sbus/control.c struct macio_chip *macio = c->macio; c 111 sound/aoa/soundbus/i2sbus/i2sbus.h struct i2sbus_control **c); c 112 sound/aoa/soundbus/i2sbus/i2sbus.h extern void i2sbus_control_destroy(struct i2sbus_control *c); c 113 sound/aoa/soundbus/i2sbus/i2sbus.h extern int i2sbus_control_add_dev(struct i2sbus_control *c, c 115 sound/aoa/soundbus/i2sbus/i2sbus.h extern void i2sbus_control_remove_dev(struct i2sbus_control *c, c 117 sound/aoa/soundbus/i2sbus/i2sbus.h extern int i2sbus_control_enable(struct i2sbus_control *c, c 119 sound/aoa/soundbus/i2sbus/i2sbus.h extern int i2sbus_control_cell(struct i2sbus_control *c, c 122 sound/aoa/soundbus/i2sbus/i2sbus.h extern int i2sbus_control_clock(struct i2sbus_control *c, c 1330 sound/core/control.c kctl->tlv.c = snd_ctl_elem_user_tlv; c 1430 sound/core/control.c if (kctl->tlv.c == NULL) c 1438 sound/core/control.c return kctl->tlv.c(kctl, op_flag, size, buf); c 497 sound/core/hwdep.c entry->c.text.read = snd_hwdep_proc_read; c 114 sound/core/info.c if (entry->c.ops->llseek) { c 115 sound/core/info.c offset = entry->c.ops->llseek(entry, c 162 sound/core/info.c size = entry->c.ops->read(entry, data->file_private_data, c 183 sound/core/info.c size = entry->c.ops->write(entry, data->file_private_data, c 197 sound/core/info.c if (entry->c.ops->poll) c 198 sound/core/info.c return entry->c.ops->poll(entry, c 201 sound/core/info.c if (entry->c.ops->read) c 203 sound/core/info.c if (entry->c.ops->write) c 214 sound/core/info.c if (!entry->c.ops->ioctl) c 216 sound/core/info.c return entry->c.ops->ioctl(entry, data->file_private_data, c 230 sound/core/info.c if (!entry->c.ops->mmap) c 232 sound/core/info.c return entry->c.ops->mmap(entry, data->file_private_data, c 248 sound/core/info.c if (((mode == O_RDONLY || mode == O_RDWR) && !entry->c.ops->read) || c 249 sound/core/info.c ((mode == O_WRONLY || mode == O_RDWR) && !entry->c.ops->write)) { c 254 sound/core/info.c if (entry->c.ops->open) { c 255 sound/core/info.c err = entry->c.ops->open(entry, mode, &data->file_private_data); c 277 sound/core/info.c if (entry->c.ops->release) c 278 sound/core/info.c entry->c.ops->release(entry, file->f_flags & O_ACCMODE, c 313 sound/core/info.c if (!entry->c.text.write) c 359 sound/core/info.c if (!entry->c.text.read) { c 363 sound/core/info.c entry->c.text.read(entry, data->rbuffer); c 409 sound/core/info.c if (data->wbuffer && entry->c.text.write) c 410 sound/core/info.c entry->c.text.write(entry, data->wbuffer); c 609 sound/core/info.c int c = -1; c 616 sound/core/info.c c = buffer->buffer[buffer->curr++]; c 619 sound/core/info.c if (c == '\n') c 623 sound/core/info.c *line++ = c; c 645 sound/core/info.c int c; c 650 sound/core/info.c c = *src++; c 651 sound/core/info.c while (--len > 0 && *src && *src != c) { c 654 sound/core/info.c if (*src == c) c 888 sound/core/info.c entry->c.text.write = write; c 912 sound/core/info.c entry->c.text.read = snd_info_version_read; c 105 sound/core/info_oss.c entry->c.text.read = snd_sndstat_proc_read; c 663 sound/core/init.c int c; c 666 sound/core/init.c c = buf[idx]; c 667 sound/core/init.c if (!isalnum(c) && c != '_' && c != '-') c 858 sound/core/init.c entry->c.text.read = snd_card_info_read; c 866 sound/core/init.c entry->c.text.read = snd_card_module_info_read; c 30 sound/core/memory.c size_t c = count; c 31 sound/core/memory.c if (c > sizeof(buf)) c 32 sound/core/memory.c c = sizeof(buf); c 33 sound/core/memory.c memcpy_fromio(buf, (void __iomem *)src, c); c 34 sound/core/memory.c if (copy_to_user(dst, buf, c)) c 36 sound/core/memory.c count -= c; c 37 sound/core/memory.c dst += c; c 38 sound/core/memory.c src += c; c 62 sound/core/memory.c size_t c = count; c 63 sound/core/memory.c if (c > sizeof(buf)) c 64 sound/core/memory.c c = sizeof(buf); c 65 sound/core/memory.c if (copy_from_user(buf, src, c)) c 67 sound/core/memory.c memcpy_toio(dst, buf, c); c 68 sound/core/memory.c count -= c; c 69 sound/core/memory.c dst += c; c 70 sound/core/memory.c src += c; c 1236 sound/core/oss/mixer_oss.c entry->c.text.read = snd_mixer_oss_proc_read; c 1237 sound/core/oss/mixer_oss.c entry->c.text.write = snd_mixer_oss_proc_write; c 319 sound/core/oss/pcm_oss.c int *c, int *cdir) c 323 sound/core/oss/pcm_oss.c *c = a - b; c 326 sound/core/oss/pcm_oss.c (*c)--; c 328 sound/core/oss/pcm_oss.c (*c)++; c 3034 sound/core/oss/pcm_oss.c entry->c.text.read = snd_pcm_oss_proc_read; c 3035 sound/core/oss/pcm_oss.c entry->c.text.write = snd_pcm_oss_proc_write; c 55 sound/core/oss/pcm_plugin.c struct snd_pcm_plugin_channel *c; c 77 sound/core/oss/pcm_plugin.c c = plugin->buf_channels; c 79 sound/core/oss/pcm_plugin.c for (channel = 0; channel < format->channels; channel++, c++) { c 80 sound/core/oss/pcm_plugin.c c->frames = frames; c 81 sound/core/oss/pcm_plugin.c c->enabled = 1; c 82 sound/core/oss/pcm_plugin.c c->wanted = 0; c 83 sound/core/oss/pcm_plugin.c c->area.addr = plugin->buf; c 84 sound/core/oss/pcm_plugin.c c->area.first = channel * width; c 85 sound/core/oss/pcm_plugin.c c->area.step = format->channels * width; c 91 sound/core/oss/pcm_plugin.c for (channel = 0; channel < format->channels; channel++, c++) { c 92 sound/core/oss/pcm_plugin.c c->frames = frames; c 93 sound/core/oss/pcm_plugin.c c->enabled = 1; c 94 sound/core/oss/pcm_plugin.c c->wanted = 0; c 95 sound/core/oss/pcm_plugin.c c->area.addr = plugin->buf + (channel * size); c 96 sound/core/oss/pcm_plugin.c c->area.first = 0; c 97 sound/core/oss/pcm_plugin.c c->area.step = width; c 525 sound/core/pcm.c entry->c.text.write = snd_pcm_xrun_debug_write; c 582 sound/core/pcm.c entry->c.text.write = snd_pcm_xrun_injection_write; c 463 sound/core/pcm_compat.c } c; c 482 sound/core/pcm_compat.c get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || c 483 sound/core/pcm_compat.c get_user(scontrol.avail_min, &src->c.control.avail_min)) c 517 sound/core/pcm_compat.c put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || c 518 sound/core/pcm_compat.c put_user(scontrol.avail_min, &src->c.control.avail_min)) c 552 sound/core/pcm_compat.c } c; c 571 sound/core/pcm_compat.c get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || c 572 sound/core/pcm_compat.c get_user(scontrol.avail_min, &src->c.control.avail_min)) c 605 sound/core/pcm_compat.c put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) || c 606 sound/core/pcm_compat.c put_user(scontrol.avail_min, &src->c.control.avail_min)) c 29 sound/core/pcm_drm_eld.c const struct snd_interval *c; c 35 sound/core/pcm_drm_eld.c c = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 44 sound/core/pcm_drm_eld.c if (c->min <= max_channels) c 56 sound/core/pcm_drm_eld.c struct snd_interval *c = hw_param_interval(params, rule->var); c 77 sound/core/pcm_drm_eld.c return snd_interval_refine(c, &t); c 543 sound/core/pcm_lib.c unsigned int c, unsigned int *r) c 546 sound/core/pcm_lib.c if (c == 0) { c 550 sound/core/pcm_lib.c n = div_u64_rem(n, c, r); c 646 sound/core/pcm_lib.c void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) c 649 sound/core/pcm_lib.c snd_interval_none(c); c 652 sound/core/pcm_lib.c c->empty = 0; c 653 sound/core/pcm_lib.c c->min = mul(a->min, b->min); c 654 sound/core/pcm_lib.c c->openmin = (a->openmin || b->openmin); c 655 sound/core/pcm_lib.c c->max = mul(a->max, b->max); c 656 sound/core/pcm_lib.c c->openmax = (a->openmax || b->openmax); c 657 sound/core/pcm_lib.c c->integer = (a->integer && b->integer); c 670 sound/core/pcm_lib.c void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) c 674 sound/core/pcm_lib.c snd_interval_none(c); c 677 sound/core/pcm_lib.c c->empty = 0; c 678 sound/core/pcm_lib.c c->min = div32(a->min, b->max, &r); c 679 sound/core/pcm_lib.c c->openmin = (r || a->openmin || b->openmax); c 681 sound/core/pcm_lib.c c->max = div32(a->max, b->min, &r); c 683 sound/core/pcm_lib.c c->max++; c 684 sound/core/pcm_lib.c c->openmax = 1; c 686 sound/core/pcm_lib.c c->openmax = (a->openmax || b->openmin); c 688 sound/core/pcm_lib.c c->max = UINT_MAX; c 689 sound/core/pcm_lib.c c->openmax = 0; c 691 sound/core/pcm_lib.c c->integer = 0; c 706 sound/core/pcm_lib.c unsigned int k, struct snd_interval *c) c 710 sound/core/pcm_lib.c snd_interval_none(c); c 713 sound/core/pcm_lib.c c->empty = 0; c 714 sound/core/pcm_lib.c c->min = muldiv32(a->min, b->min, k, &r); c 715 sound/core/pcm_lib.c c->openmin = (r || a->openmin || b->openmin); c 716 sound/core/pcm_lib.c c->max = muldiv32(a->max, b->max, k, &r); c 718 sound/core/pcm_lib.c c->max++; c 719 sound/core/pcm_lib.c c->openmax = 1; c 721 sound/core/pcm_lib.c c->openmax = (a->openmax || b->openmax); c 722 sound/core/pcm_lib.c c->integer = 0; c 737 sound/core/pcm_lib.c const struct snd_interval *b, struct snd_interval *c) c 741 sound/core/pcm_lib.c snd_interval_none(c); c 744 sound/core/pcm_lib.c c->empty = 0; c 745 sound/core/pcm_lib.c c->min = muldiv32(a->min, k, b->max, &r); c 746 sound/core/pcm_lib.c c->openmin = (r || a->openmin || b->openmax); c 748 sound/core/pcm_lib.c c->max = muldiv32(a->max, k, b->min, &r); c 750 sound/core/pcm_lib.c c->max++; c 751 sound/core/pcm_lib.c c->openmax = 1; c 753 sound/core/pcm_lib.c c->openmax = (a->openmax || b->openmin); c 755 sound/core/pcm_lib.c c->max = UINT_MAX; c 756 sound/core/pcm_lib.c c->openmax = 0; c 758 sound/core/pcm_lib.c c->integer = 0; c 1115 sound/core/pcm_lib.c struct snd_pcm_hw_rule *c; c 1122 sound/core/pcm_lib.c new = krealloc(constrs->rules, new_rules * sizeof(*c), c 1131 sound/core/pcm_lib.c c = &constrs->rules[constrs->rules_num]; c 1132 sound/core/pcm_lib.c c->cond = cond; c 1133 sound/core/pcm_lib.c c->func = func; c 1134 sound/core/pcm_lib.c c->var = var; c 1135 sound/core/pcm_lib.c c->private = private; c 1138 sound/core/pcm_lib.c if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { c 1142 sound/core/pcm_lib.c c->deps[k++] = dep; c 2012 sound/core/pcm_lib.c int c, err; c 2021 sound/core/pcm_lib.c for (c = 0; c < channels; ++c, ++bufs) { c 2023 sound/core/pcm_lib.c err = fill_silence(substream, c, hwoff, NULL, frames); c 2025 sound/core/pcm_lib.c err = transfer(substream, c, hwoff, *bufs + off, c 2360 sound/core/pcm_lib.c int c, count = 0; c 2386 sound/core/pcm_lib.c for (c = 0; c < map->channels; c++) { c 2387 sound/core/pcm_lib.c if (put_user(map->map[c], dst)) c 2430 sound/core/pcm_lib.c .tlv.c = pcm_chmap_ctl_tlv, c 14 sound/core/pcm_local.h const struct snd_interval *b, struct snd_interval *c); c 16 sound/core/pcm_local.h const struct snd_interval *b, struct snd_interval *c); c 19 sound/core/pcm_local.h unsigned int k, struct snd_interval *c); c 21 sound/core/pcm_local.h const struct snd_interval *b, struct snd_interval *c); c 179 sound/core/pcm_memory.c entry->c.text.write = snd_pcm_lib_preallocate_proc_write; c 2773 sound/core/pcm_native.c if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control))) c 2785 sound/core/pcm_native.c sync_ptr.c.control.appl_ptr); c 2791 sound/core/pcm_native.c sync_ptr.c.control.appl_ptr = control->appl_ptr; c 2794 sound/core/pcm_native.c control->avail_min = sync_ptr.c.control.avail_min; c 2796 sound/core/pcm_native.c sync_ptr.c.control.avail_min = control->avail_min; c 1736 sound/core/rawmidi.c entry->c.text.read = snd_rawmidi_proc_info_read; c 281 sound/core/seq/oss/seq_oss.c entry->c.text.read = info_read; c 83 sound/core/seq/oss/seq_oss_event.c return snd_seq_oss_synth_raw_event(dp, q->c[1], q->c, ev); c 167 sound/core/seq/oss/seq_oss_event.c return snd_seq_oss_synth_raw_event(dp, q->e.dev, q->c, ev); c 88 sound/core/seq/oss/seq_oss_event.h unsigned char c[LONG_EVENT_SIZE]; c 455 sound/core/seq/oss/seq_oss_midi.c int c; c 466 sound/core/seq/oss/seq_oss_midi.c for (c = 0; c < 16; c++) { c 468 sound/core/seq/oss/seq_oss_midi.c ev.data.control.channel = c; c 622 sound/core/seq/oss/seq_oss_midi.c snd_seq_oss_midi_putc(struct seq_oss_devinfo *dp, int dev, unsigned char c, struct snd_seq_event *ev) c 628 sound/core/seq/oss/seq_oss_midi.c if (snd_midi_event_encode_byte(mdev->coder, c, ev)) { c 28 sound/core/seq/oss/seq_oss_midi.h int snd_seq_oss_midi_putc(struct seq_oss_devinfo *dp, int dev, unsigned char c, c 95 sound/core/seq/oss/seq_oss_readq.c rec.c[0] = SEQ_MIDIPUTC; c 96 sound/core/seq/oss/seq_oss_readq.c rec.c[2] = dev; c 99 sound/core/seq/oss/seq_oss_readq.c rec.c[1] = *data++; c 103 sound/core/seq/oss/seq_oss_rw.c fmt = (*(unsigned short *)rec.c) & 0xffff; c 119 sound/core/seq/oss/seq_oss_rw.c if (copy_from_user(rec.c + SHORT_EVENT_SIZE, c 442 sound/core/seq/oss/seq_oss_synth.c const char __user *buf, int p, int c) c 460 sound/core/seq/oss/seq_oss_synth.c rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c); c 26 sound/core/seq/oss/seq_oss_synth.h const char __user *buf, int p, int c); c 226 sound/core/seq/seq_clientmgr.c int c; c 248 sound/core/seq/seq_clientmgr.c for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN; c 249 sound/core/seq/seq_clientmgr.c c < SNDRV_SEQ_MAX_CLIENTS; c 250 sound/core/seq/seq_clientmgr.c c++) { c 251 sound/core/seq/seq_clientmgr.c if (clienttab[c] || clienttablock[c]) c 253 sound/core/seq/seq_clientmgr.c clienttab[client->number = c] = client; c 322 sound/core/seq/seq_clientmgr.c int c, mode; /* client id */ c 363 sound/core/seq/seq_clientmgr.c c = client->number; c 368 sound/core/seq/seq_clientmgr.c sprintf(client->name, "Client-%d", c); c 372 sound/core/seq/seq_clientmgr.c snd_seq_system_client_ev_client_start(c); c 2448 sound/core/seq/seq_clientmgr.c int c; c 2458 sound/core/seq/seq_clientmgr.c for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) { c 2459 sound/core/seq/seq_clientmgr.c client = snd_seq_client_use_ptr(c); c 2468 sound/core/seq/seq_clientmgr.c c, client->name, c 30 sound/core/seq/seq_info.c entry->c.text.read = read; c 176 sound/core/seq/seq_midi_event.c bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c, c 182 sound/core/seq/seq_midi_event.c if (c >= MIDI_CMD_COMMON_CLOCK) { c 184 sound/core/seq/seq_midi_event.c ev->type = status_event[ST_SPECIAL + c - 0xf0].event; c 191 sound/core/seq/seq_midi_event.c if ((c & 0x80) && c 192 sound/core/seq/seq_midi_event.c (c != MIDI_CMD_COMMON_SYSEX_END || dev->type != ST_SYSEX)) { c 194 sound/core/seq/seq_midi_event.c dev->buf[0] = c; c 195 sound/core/seq/seq_midi_event.c if ((c & 0xf0) == 0xf0) /* system messages */ c 196 sound/core/seq/seq_midi_event.c dev->type = (c & 0x0f) + ST_SPECIAL; c 198 sound/core/seq/seq_midi_event.c dev->type = (c >> 4) & 0x07; c 204 sound/core/seq/seq_midi_event.c dev->buf[dev->read++] = c; c 209 sound/core/seq/seq_midi_event.c dev->buf[1] = c; c 224 sound/core/seq/seq_midi_event.c if (c == MIDI_CMD_COMMON_SYSEX_END || c 231 sound/core/seq/seq_midi_event.c if (c != MIDI_CMD_COMMON_SYSEX_END) c 216 sound/core/seq/seq_ports.c struct snd_seq_client *c; c 221 sound/core/seq/seq_ports.c aport = get_client_port(&subs->info.dest, &c); c 223 sound/core/seq/seq_ports.c aport = get_client_port(&subs->info.sender, &c); c 237 sound/core/seq/seq_ports.c delete_and_unsubscribe_port(c, aport, subs, !is_src, true); c 240 sound/core/seq/seq_ports.c snd_seq_client_unlock(c); c 269 sound/core/seq_device.c info_entry->c.text.read = snd_seq_device_info; c 377 sound/core/sound.c entry->c.text.read = snd_minor_info_read; c 241 sound/core/sound_oss.c entry->c.text.read = snd_minor_info_oss_read; c 1270 sound/core/timer.c entry->c.text.read = snd_timer_proc_read; c 227 sound/core/vmaster.c return slave->slave.tlv.c(&slave->slave, op_flag, size, tlv); c 273 sound/core/vmaster.c slave->tlv.c = slave_tlv_cmd; c 133 sound/drivers/mts64.c static u8 mts64_map_midi_input(u8 c); c 142 sound/drivers/mts64.c static void mts64_write_command(struct parport *p, u8 c); c 143 sound/drivers/mts64.c static void mts64_write_data(struct parport *p, u8 c); c 144 sound/drivers/mts64.c static void mts64_write_midi(struct mts64 *mts, u8 c, int midiport); c 154 sound/drivers/mts64.c u8 c; c 156 sound/drivers/mts64.c c = parport_read_control(p); c 157 sound/drivers/mts64.c c |= MTS64_CTL_READOUT; c 158 sound/drivers/mts64.c parport_write_control(p, c); c 167 sound/drivers/mts64.c u8 c; c 169 sound/drivers/mts64.c c = parport_read_control(p); c 170 sound/drivers/mts64.c c &= ~MTS64_CTL_READOUT; c 171 sound/drivers/mts64.c parport_write_control(p, c); c 183 sound/drivers/mts64.c u8 c; c 186 sound/drivers/mts64.c c = parport_read_status(p); c 187 sound/drivers/mts64.c c &= MTS64_STAT_BSY; c 188 sound/drivers/mts64.c if (c != 0) c 262 sound/drivers/mts64.c static u8 mts64_map_midi_input(u8 c) c 266 sound/drivers/mts64.c return map[c]; c 279 sound/drivers/mts64.c u8 c; c 286 sound/drivers/mts64.c c = mts64_read(p); c 288 sound/drivers/mts64.c c &= 0x00ff; c 289 sound/drivers/mts64.c if (c != MTS64_CMD_PROBE) c 327 sound/drivers/mts64.c u8 c = 0; c 333 sound/drivers/mts64.c c >>= 1; c 336 sound/drivers/mts64.c c |= 0x80; c 339 sound/drivers/mts64.c return c; c 381 sound/drivers/mts64.c static void mts64_write_command(struct parport *p, u8 c) c 385 sound/drivers/mts64.c parport_write_data(p, c); c 394 sound/drivers/mts64.c static void mts64_write_data(struct parport *p, u8 c) c 398 sound/drivers/mts64.c parport_write_data(p, c); c 410 sound/drivers/mts64.c static void mts64_write_midi(struct mts64 *mts, u8 c, c 420 sound/drivers/mts64.c mts64_write_data(p, c); c 101 sound/drivers/opl4/opl4_proc.c entry->c.ops = &snd_opl4_mem_proc_ops; c 196 sound/drivers/serial-u16550.c unsigned char c, status; c 205 sound/drivers/serial-u16550.c c = inb(uart->base + UART_RX); c 208 sound/drivers/serial-u16550.c if (c & 0x80) c 209 sound/drivers/serial-u16550.c uart->rstatus = c; c 214 sound/drivers/serial-u16550.c if (c <= SNDRV_SERIAL_MAX_INS && c > 0) c 215 sound/drivers/serial-u16550.c substream = c - 1; c 216 sound/drivers/serial-u16550.c if (c != 0xf5) c 223 sound/drivers/serial-u16550.c &c, 1); c 226 sound/drivers/serial-u16550.c snd_rawmidi_receive(uart->midi_input[substream], &c, 1); c 320 sound/drivers/serial-u16550.c unsigned char c; c 337 sound/drivers/serial-u16550.c c = inb(io_base + UART_IER); c 339 sound/drivers/serial-u16550.c if ((c & 0xf0) != 0) c 344 sound/drivers/serial-u16550.c c = inb(io_base + UART_SCR); c 346 sound/drivers/serial-u16550.c if (c != 0xaa) c 351 sound/drivers/serial-u16550.c c = inb(io_base + UART_SCR); c 353 sound/drivers/serial-u16550.c if (c != 0x55) c 422 sound/drivers/vx/vx_core.c unsigned int c = ((u32)boot->data[0] << 16) | ((u32)boot->data[1] << 8) | boot->data[2]; c 423 sound/drivers/vx/vx_core.c if (boot->size != (c + 2) * 3) c 318 sound/drivers/vx/vx_mixer.c unsigned int i, c; c 326 sound/drivers/vx/vx_mixer.c for (c = 0; c < 2; c++) { c 329 sound/drivers/vx/vx_mixer.c if (c == 0) { c 336 sound/drivers/vx/vx_mixer.c vx_adjust_audio_level(chip, i, c, &info); c 337 sound/drivers/vx/vx_mixer.c chip->audio_gain[c][i] = CVAL_0DB; c 900 sound/drivers/vx/vx_mixer.c unsigned int i, c; c 963 sound/drivers/vx/vx_mixer.c for (c = 0; c < 2; c++) { c 966 sound/drivers/vx/vx_mixer.c int val = (i * 2) | (c << 8); c 967 sound/drivers/vx/vx_mixer.c if (c == 1) { c 974 sound/drivers/vx/vx_mixer.c sprintf(name, "%s VU Meter", dir[c]); c 981 sound/drivers/vx/vx_mixer.c sprintf(name, "%s Peak Meter", dir[c]); c 159 sound/firewire/amdtp-am824.c int i, c; c 169 sound/firewire/amdtp-am824.c for (c = 0; c < channels; ++c) { c 170 sound/firewire/amdtp-am824.c buffer[p->pcm_positions[c]] = c 190 sound/firewire/amdtp-am824.c int i, c; c 200 sound/firewire/amdtp-am824.c for (c = 0; c < channels; ++c) { c 201 sound/firewire/amdtp-am824.c *dst = be32_to_cpu(buffer[p->pcm_positions[c]]) << 8; c 214 sound/firewire/amdtp-am824.c unsigned int i, c, channels = p->pcm_channels; c 217 sound/firewire/amdtp-am824.c for (c = 0; c < channels; ++c) c 218 sound/firewire/amdtp-am824.c buffer[p->pcm_positions[c]] = cpu_to_be32(0x40000000); c 632 sound/firewire/bebob/bebob_maudio.c unsigned int i, c, channels; c 650 sound/firewire/bebob/bebob_maudio.c for (c = 2; c < channels + 2; c++) c 651 sound/firewire/bebob/bebob_maudio.c target[i++] = be16_to_cpu(buf[c]) << 16; c 691 sound/firewire/bebob/bebob_maudio.c unsigned int c, channels; c 702 sound/firewire/bebob/bebob_maudio.c for (c = 0; c < channels; c++) c 703 sound/firewire/bebob/bebob_maudio.c be32_to_cpus(&buf[c]); c 16 sound/firewire/bebob/bebob_pcm.c const struct snd_interval *c = c 28 sound/firewire/bebob/bebob_pcm.c if (!snd_interval_test(c, formations[i].pcm)) c 42 sound/firewire/bebob/bebob_pcm.c struct snd_interval *c = c 64 sound/firewire/bebob/bebob_pcm.c return snd_interval_refine(c, &t); c 77 sound/firewire/bebob/bebob_proc.c unsigned int i, c, channels, size; c 91 sound/firewire/bebob/bebob_proc.c for (i = 0, c = 1; i < channels; i++) { c 93 sound/firewire/bebob/bebob_proc.c spec->labels[i / 2], c++, buf[i]); c 97 sound/firewire/bebob/bebob_proc.c c = 1; c 46 sound/firewire/cmp.c void cmp_error(struct cmp_connection *c, const char *fmt, ...) c 51 sound/firewire/cmp.c dev_err(&c->resources.unit->device, "%cPCR%u: %pV", c 52 sound/firewire/cmp.c (c->direction == CMP_INPUT) ? 'i' : 'o', c 53 sound/firewire/cmp.c c->pcr_index, &(struct va_format){ fmt, &va }); c 57 sound/firewire/cmp.c static u64 mpr_address(struct cmp_connection *c) c 59 sound/firewire/cmp.c if (c->direction == CMP_INPUT) c 65 sound/firewire/cmp.c static u64 pcr_address(struct cmp_connection *c) c 67 sound/firewire/cmp.c if (c->direction == CMP_INPUT) c 68 sound/firewire/cmp.c return CSR_REGISTER_BASE + CSR_IPCR(c->pcr_index); c 70 sound/firewire/cmp.c return CSR_REGISTER_BASE + CSR_OPCR(c->pcr_index); c 73 sound/firewire/cmp.c static int pcr_modify(struct cmp_connection *c, c 74 sound/firewire/cmp.c __be32 (*modify)(struct cmp_connection *c, __be32 old), c 75 sound/firewire/cmp.c int (*check)(struct cmp_connection *c, __be32 pcr), c 81 sound/firewire/cmp.c buffer[0] = c->last_pcr_value; c 84 sound/firewire/cmp.c buffer[1] = modify(c, buffer[0]); c 87 sound/firewire/cmp.c c->resources.unit, TCODE_LOCK_COMPARE_SWAP, c 88 sound/firewire/cmp.c pcr_address(c), buffer, 8, c 89 sound/firewire/cmp.c FW_FIXED_GENERATION | c->resources.generation); c 102 sound/firewire/cmp.c err = check(c, buffer[0]); c 107 sound/firewire/cmp.c c->last_pcr_value = buffer[1]; c 120 sound/firewire/cmp.c int cmp_connection_init(struct cmp_connection *c, c 129 sound/firewire/cmp.c c->direction = direction; c 131 sound/firewire/cmp.c mpr_address(c), &mpr_be, 4, 0); c 139 sound/firewire/cmp.c err = fw_iso_resources_init(&c->resources, unit); c 143 sound/firewire/cmp.c c->connected = false; c 144 sound/firewire/cmp.c mutex_init(&c->mutex); c 145 sound/firewire/cmp.c c->last_pcr_value = cpu_to_be32(0x80000000); c 146 sound/firewire/cmp.c c->pcr_index = pcr_index; c 147 sound/firewire/cmp.c c->max_speed = (mpr & MPR_SPEED_MASK) >> MPR_SPEED_SHIFT; c 148 sound/firewire/cmp.c if (c->max_speed == SCODE_BETA) c 149 sound/firewire/cmp.c c->max_speed += (mpr & MPR_XSPEED_MASK) >> MPR_XSPEED_SHIFT; c 160 sound/firewire/cmp.c int cmp_connection_check_used(struct cmp_connection *c, bool *used) c 166 sound/firewire/cmp.c c->resources.unit, TCODE_READ_QUADLET_REQUEST, c 167 sound/firewire/cmp.c pcr_address(c), &pcr, 4, 0); c 180 sound/firewire/cmp.c void cmp_connection_destroy(struct cmp_connection *c) c 182 sound/firewire/cmp.c WARN_ON(c->connected); c 183 sound/firewire/cmp.c mutex_destroy(&c->mutex); c 184 sound/firewire/cmp.c fw_iso_resources_destroy(&c->resources); c 188 sound/firewire/cmp.c int cmp_connection_reserve(struct cmp_connection *c, c 193 sound/firewire/cmp.c mutex_lock(&c->mutex); c 195 sound/firewire/cmp.c if (WARN_ON(c->resources.allocated)) { c 200 sound/firewire/cmp.c c->speed = min(c->max_speed, c 201 sound/firewire/cmp.c fw_parent_device(c->resources.unit)->max_speed); c 203 sound/firewire/cmp.c err = fw_iso_resources_allocate(&c->resources, max_payload_bytes, c 204 sound/firewire/cmp.c c->speed); c 206 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 212 sound/firewire/cmp.c void cmp_connection_release(struct cmp_connection *c) c 214 sound/firewire/cmp.c mutex_lock(&c->mutex); c 215 sound/firewire/cmp.c fw_iso_resources_free(&c->resources); c 216 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 220 sound/firewire/cmp.c static __be32 ipcr_set_modify(struct cmp_connection *c, __be32 ipcr) c 226 sound/firewire/cmp.c ipcr |= cpu_to_be32(c->resources.channel << PCR_CHANNEL_SHIFT); c 231 sound/firewire/cmp.c static int get_overhead_id(struct cmp_connection *c) c 241 sound/firewire/cmp.c if (c->resources.bandwidth_overhead < (id << 5)) c 250 sound/firewire/cmp.c static __be32 opcr_set_modify(struct cmp_connection *c, __be32 opcr) c 255 sound/firewire/cmp.c if (c->speed > SCODE_400) { c 257 sound/firewire/cmp.c xspd = c->speed - SCODE_800; c 259 sound/firewire/cmp.c spd = c->speed; c 271 sound/firewire/cmp.c opcr |= cpu_to_be32(c->resources.channel << PCR_CHANNEL_SHIFT); c 273 sound/firewire/cmp.c opcr |= cpu_to_be32(get_overhead_id(c) << OPCR_OVERHEAD_ID_SHIFT); c 278 sound/firewire/cmp.c static int pcr_set_check(struct cmp_connection *c, __be32 pcr) c 282 sound/firewire/cmp.c cmp_error(c, "plug is already in use\n"); c 286 sound/firewire/cmp.c cmp_error(c, "plug is not on-line\n"); c 304 sound/firewire/cmp.c int cmp_connection_establish(struct cmp_connection *c) c 308 sound/firewire/cmp.c mutex_lock(&c->mutex); c 310 sound/firewire/cmp.c if (WARN_ON(c->connected)) { c 311 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 316 sound/firewire/cmp.c if (c->direction == CMP_OUTPUT) c 317 sound/firewire/cmp.c err = pcr_modify(c, opcr_set_modify, pcr_set_check, c 320 sound/firewire/cmp.c err = pcr_modify(c, ipcr_set_modify, pcr_set_check, c 324 sound/firewire/cmp.c err = fw_iso_resources_update(&c->resources); c 329 sound/firewire/cmp.c c->connected = true; c 331 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 347 sound/firewire/cmp.c int cmp_connection_update(struct cmp_connection *c) c 351 sound/firewire/cmp.c mutex_lock(&c->mutex); c 353 sound/firewire/cmp.c if (!c->connected) { c 354 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 358 sound/firewire/cmp.c err = fw_iso_resources_update(&c->resources); c 362 sound/firewire/cmp.c if (c->direction == CMP_OUTPUT) c 363 sound/firewire/cmp.c err = pcr_modify(c, opcr_set_modify, pcr_set_check, c 366 sound/firewire/cmp.c err = pcr_modify(c, ipcr_set_modify, pcr_set_check, c 372 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 377 sound/firewire/cmp.c c->connected = false; c 378 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 384 sound/firewire/cmp.c static __be32 pcr_break_modify(struct cmp_connection *c, __be32 pcr) c 397 sound/firewire/cmp.c void cmp_connection_break(struct cmp_connection *c) c 401 sound/firewire/cmp.c mutex_lock(&c->mutex); c 403 sound/firewire/cmp.c if (!c->connected) { c 404 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 408 sound/firewire/cmp.c err = pcr_modify(c, pcr_break_modify, NULL, SUCCEED_ON_BUS_RESET); c 410 sound/firewire/cmp.c cmp_error(c, "plug is still connected\n"); c 412 sound/firewire/cmp.c c->connected = false; c 414 sound/firewire/cmp.c mutex_unlock(&c->mutex); c 18 sound/firewire/dice/dice-pcm.c const struct snd_interval *c = c 39 sound/firewire/dice/dice-pcm.c if (!snd_interval_test(c, pcm_channels[mode])) c 58 sound/firewire/dice/dice-pcm.c struct snd_interval *c = c 84 sound/firewire/dice/dice-pcm.c return snd_interval_refine(c, &channels); c 155 sound/firewire/digi00x/amdtp-dot.c int i, c; c 166 sound/firewire/digi00x/amdtp-dot.c for (c = 0; c < channels; ++c) { c 167 sound/firewire/digi00x/amdtp-dot.c buffer[c] = cpu_to_be32((*src >> 8) | 0x40000000); c 168 sound/firewire/digi00x/amdtp-dot.c dot_encode_step(&p->state, &buffer[c]); c 187 sound/firewire/digi00x/amdtp-dot.c int i, c; c 198 sound/firewire/digi00x/amdtp-dot.c for (c = 0; c < channels; ++c) { c 199 sound/firewire/digi00x/amdtp-dot.c *dst = be32_to_cpu(buffer[c]) << 8; c 212 sound/firewire/digi00x/amdtp-dot.c unsigned int channels, i, c; c 218 sound/firewire/digi00x/amdtp-dot.c for (c = 0; c < channels; ++c) c 219 sound/firewire/digi00x/amdtp-dot.c buffer[c] = cpu_to_be32(0x40000000); c 15 sound/firewire/digi00x/digi00x-pcm.c const struct snd_interval *c = c 23 sound/firewire/digi00x/digi00x-pcm.c if (!snd_interval_test(c, c 37 sound/firewire/digi00x/digi00x-pcm.c struct snd_interval *c = c 54 sound/firewire/digi00x/digi00x-pcm.c return snd_interval_refine(c, &t); c 40 sound/firewire/fireface/amdtp-ff.c int i, c; c 50 sound/firewire/fireface/amdtp-ff.c for (c = 0; c < channels; ++c) { c 51 sound/firewire/fireface/amdtp-ff.c buffer[c] = cpu_to_le32(*src); c 70 sound/firewire/fireface/amdtp-ff.c int i, c; c 80 sound/firewire/fireface/amdtp-ff.c for (c = 0; c < channels; ++c) { c 81 sound/firewire/fireface/amdtp-ff.c *dst = le32_to_cpu(buffer[c]) & 0xffffff00; c 94 sound/firewire/fireface/amdtp-ff.c unsigned int i, c, channels = p->pcm_channels; c 97 sound/firewire/fireface/amdtp-ff.c for (c = 0; c < channels; ++c) c 98 sound/firewire/fireface/amdtp-ff.c buffer[c] = cpu_to_le32(0x00000000); c 16 sound/firewire/fireface/ff-pcm.c const struct snd_interval *c = c 31 sound/firewire/fireface/ff-pcm.c if (!snd_interval_test(c, pcm_channels[mode])) c 45 sound/firewire/fireface/ff-pcm.c struct snd_interval *c = c 69 sound/firewire/fireface/ff-pcm.c return snd_interval_refine(c, &t); c 69 sound/firewire/fireworks/fireworks_pcm.c const struct snd_interval *c = c 78 sound/firewire/fireworks/fireworks_pcm.c if (!snd_interval_test(c, pcm_channels[mode])) c 92 sound/firewire/fireworks/fireworks_pcm.c struct snd_interval *c = c 110 sound/firewire/fireworks/fireworks_pcm.c return snd_interval_refine(c, &t); c 132 sound/firewire/fireworks/fireworks_proc.c unsigned int g, c, m, max, size; c 155 sound/firewire/fireworks/fireworks_proc.c for (c = 0; c < efw->phys_out_grps[g].count; c++) { c 158 sound/firewire/fireworks/fireworks_proc.c name, c, linear[m++]); c 168 sound/firewire/fireworks/fireworks_proc.c for (c = 0; c < efw->phys_in_grps[g].count; c++) c 171 sound/firewire/fireworks/fireworks_proc.c name, c, linear[m++]); c 131 sound/firewire/motu/amdtp-motu.c int i, c; c 143 sound/firewire/motu/amdtp-motu.c for (c = 0; c < channels; ++c) { c 167 sound/firewire/motu/amdtp-motu.c int i, c; c 179 sound/firewire/motu/amdtp-motu.c for (c = 0; c < channels; ++c) { c 197 sound/firewire/motu/amdtp-motu.c unsigned int channels, i, c; c 205 sound/firewire/motu/amdtp-motu.c for (c = 0; c < channels; ++c) { c 16 sound/firewire/motu/motu-pcm.c const struct snd_interval *c = c 31 sound/firewire/motu/motu-pcm.c if (!snd_interval_test(c, pcm_channels)) c 48 sound/firewire/motu/motu-pcm.c struct snd_interval *c = c 68 sound/firewire/motu/motu-pcm.c return snd_interval_refine(c, &channels); c 16 sound/firewire/oxfw/oxfw-pcm.c const struct snd_interval *c = c 31 sound/firewire/oxfw/oxfw-pcm.c if (!snd_interval_test(c, formation.pcm)) c 45 sound/firewire/oxfw/oxfw-pcm.c struct snd_interval *c = c 77 sound/firewire/oxfw/oxfw-pcm.c return snd_interval_list(c, count, list, 0); c 45 sound/firewire/tascam/amdtp-tascam.c int i, c; c 55 sound/firewire/tascam/amdtp-tascam.c for (c = 0; c < channels; ++c) { c 56 sound/firewire/tascam/amdtp-tascam.c buffer[c] = cpu_to_be32(*src); c 75 sound/firewire/tascam/amdtp-tascam.c int i, c; c 88 sound/firewire/tascam/amdtp-tascam.c for (c = 0; c < channels; ++c) { c 89 sound/firewire/tascam/amdtp-tascam.c *dst = be32_to_cpu(buffer[c]); c 102 sound/firewire/tascam/amdtp-tascam.c unsigned int channels, i, c; c 107 sound/firewire/tascam/amdtp-tascam.c for (c = 0; c < channels; ++c) c 108 sound/firewire/tascam/amdtp-tascam.c buffer[c] = 0x00000000; c 53 sound/firewire/tascam/tascam.c u8 c; c 63 sound/firewire/tascam/tascam.c c = config_rom[28 + i / 4] >> (24 - 8 * (i % 4)); c 64 sound/firewire/tascam/tascam.c if (c == '\0') c 66 sound/firewire/tascam/tascam.c model[i] = c; c 668 sound/hda/hdac_device.c const struct hda_vendor_id *c; c 671 sound/hda/hdac_device.c for (c = hda_vendor_ids; c->id; c++) { c 672 sound/hda/hdac_device.c if (c->id == vendor_id) { c 673 sound/hda/hdac_device.c codec->vendor_name = kstrdup(c->name, GFP_KERNEL); c 417 sound/hda/hdmi_chmap.c int snd_hdac_chmap_to_spk_mask(unsigned char c) c 422 sound/hda/hdmi_chmap.c if (t->map == c) c 637 sound/hda/hdmi_chmap.c int c; c 639 sound/hda/hdmi_chmap.c for (c = 7; c >= 0; c--) { c 640 sound/hda/hdmi_chmap.c int spk = cap->speakers[c]; c 846 sound/hda/hdmi_chmap.c kctl->tlv.c = hdmi_chmap_ctl_tlv; c 62 sound/isa/gus/gus_mem_proc.c entry->c.ops = &snd_gf1_mem_proc_ops; c 80 sound/isa/gus/gus_mem_proc.c entry->c.ops = &snd_gf1_mem_proc_ops; c 760 sound/isa/sb/emu8000.c #define AWE_INIT1(c) EMU8000_CMD(2,c), DATA1 c 761 sound/isa/sb/emu8000.c #define AWE_INIT2(c) EMU8000_CMD(2,c), DATA2 c 762 sound/isa/sb/emu8000.c #define AWE_INIT3(c) EMU8000_CMD(3,c), DATA1 c 763 sound/isa/sb/emu8000.c #define AWE_INIT4(c) EMU8000_CMD(3,c), DATA2 c 76 sound/isa/sb/emu8000_patch.c unsigned short c; c 80 sound/isa/sb/emu8000_patch.c c = cc << 8; /* convert 8bit -> 16bit */ c 83 sound/isa/sb/emu8000_patch.c get_user(c, (unsigned short __user *)buf + offset); c 87 sound/isa/sb/emu8000_patch.c c = swab16(cc); c 91 sound/isa/sb/emu8000_patch.c c ^= 0x8000; /* unsigned -> signed */ c 92 sound/isa/sb/emu8000_patch.c return c; c 32 sound/isa/sb/sb16_csp.c #define CSP_HDR_VALUE(a,b,c,d) ((a) | ((b)<<8) | ((c)<<16) | ((d)<<24)) c 34 sound/isa/sb/sb16_csp.c #define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24)) c 65 sound/isa/sb/sb8_main.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 66 sound/isa/sb/sb8_main.c if (c->min > 1) { c 146 sound/isa/sscape.c static inline struct soundscape *get_card_soundscape(struct snd_card *c) c 148 sound/isa/sscape.c return (struct soundscape *) (c->private_data); c 333 sound/isa/sscape.c static void soundscape_free(struct snd_card *c) c 335 sound/isa/sscape.c struct soundscape *sscape = get_card_soundscape(c); c 339 sound/isa/wavefront/wavefront_synth.c int c; c 394 sound/isa/wavefront/wavefront_synth.c if ((c = wavefront_read (dev)) == -1) { c 403 sound/isa/wavefront/wavefront_synth.c if (c == 0xff) { c 404 sound/isa/wavefront/wavefront_synth.c if ((c = wavefront_read (dev)) == -1) { c 416 sound/isa/wavefront/wavefront_synth.c if (c == 1 && c 421 sound/isa/wavefront/wavefront_synth.c } else if (c == 3 && c 426 sound/isa/wavefront/wavefront_synth.c } else if (c == 1 && c 438 sound/isa/wavefront/wavefront_synth.c c, c 439 sound/isa/wavefront/wavefront_synth.c wavefront_errorstr (c), c 447 sound/isa/wavefront/wavefront_synth.c rbuf[i] = c; c 402 sound/oss/dmasound/dmasound_atari.c u_char c; c 406 sound/oss/dmasound/dmasound_atari.c if (get_user(c, userPtr++)) c 408 sound/oss/dmasound/dmasound_atari.c data = table[c]; c 421 sound/oss/dmasound/dmasound_atari.c u_char c; c 425 sound/oss/dmasound/dmasound_atari.c if (get_user(c, userPtr++)) c 427 sound/oss/dmasound/dmasound_atari.c data = table[c] << 8; c 428 sound/oss/dmasound/dmasound_atari.c if (get_user(c, userPtr++)) c 430 sound/oss/dmasound/dmasound_atari.c data |= table[c]; c 133 sound/oss/dmasound/dmasound_q40.c u_char c; c 137 sound/oss/dmasound/dmasound_q40.c if (get_user(c, userPtr++)) c 139 sound/oss/dmasound/dmasound_q40.c data = table[c]; c 170 sound/oss/dmasound/dmasound_q40.c u_char c; c 174 sound/oss/dmasound/dmasound_q40.c if (get_user(c, userPtr++)) c 176 sound/oss/dmasound/dmasound_q40.c data = c ; c 206 sound/oss/dmasound/dmasound_q40.c u_char c; c 210 sound/oss/dmasound/dmasound_q40.c if (get_user(c, userPtr++)) c 212 sound/oss/dmasound/dmasound_q40.c data = c ; c 243 sound/oss/dmasound/dmasound_q40.c u_char c; c 248 sound/oss/dmasound/dmasound_q40.c if (get_user(c, userPtr)) c 250 sound/oss/dmasound/dmasound_q40.c data = 0x80 + table[c]; c 282 sound/oss/dmasound/dmasound_q40.c u_char c; c 287 sound/oss/dmasound/dmasound_q40.c if (get_user(c, userPtr)) c 289 sound/oss/dmasound/dmasound_q40.c data = c + 0x80; c 321 sound/oss/dmasound/dmasound_q40.c u_char c; c 326 sound/oss/dmasound/dmasound_q40.c if (get_user(c, userPtr)) c 328 sound/oss/dmasound/dmasound_q40.c data = c ; c 436 sound/pci/ac97/ac97_proc.c entry->c.text.write = snd_ac97_proc_regs_write; c 247 sound/pci/asihpi/hpi6205.c } else if (phm->u.c.attribute == HPI_METER_PEAK) { c 1012 sound/pci/asihpi/hpi_internal.h struct hpi_control_msg c; /* mixer control; */ c 1074 sound/pci/asihpi/hpi_internal.h struct hpi_control_res c; /* mixer control; */ c 1183 sound/pci/asihpi/hpi_internal.h struct hpi_control_msg c; c 1202 sound/pci/asihpi/hpi_internal.h struct hpi_control_res c; c 323 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_METER_PEAK) { c 324 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[0] = pC->u.meter.an_log_peak[0]; c 325 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[1] = pC->u.meter.an_log_peak[1]; c 326 sound/pci/asihpi/hpicmn.c } else if (phm->u.c.attribute == HPI_METER_RMS) { c 331 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[0] = HPI_METER_MINIMUM; c 332 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[1] = HPI_METER_MINIMUM; c 334 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[0] = c 336 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[1] = c 343 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_VOLUME_GAIN) { c 344 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[0] = pC->u.vol.an_log[0]; c 345 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[1] = pC->u.vol.an_log[1]; c 346 sound/pci/asihpi/hpicmn.c } else if (phm->u.c.attribute == HPI_VOLUME_MUTE) { c 349 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = c 352 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = 0; c 356 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = 0; c 363 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) { c 364 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.mux.source_node_type; c 365 sound/pci/asihpi/hpicmn.c phr->u.c.param2 = pC->u.mux.source_node_index; c 371 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE) c 372 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.mode.mode; c 377 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_LEVEL_GAIN) { c 378 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[0] = pC->u.level.an_log[0]; c 379 sound/pci/asihpi/hpicmn.c phr->u.c.an_log_value[1] = pC->u.level.an_log[1]; c 384 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_TUNER_FREQ) c 385 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.tuner.freq_ink_hz; c 386 sound/pci/asihpi/hpicmn.c else if (phm->u.c.attribute == HPI_TUNER_BAND) c 387 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.tuner.band; c 388 sound/pci/asihpi/hpicmn.c else if (phm->u.c.attribute == HPI_TUNER_LEVEL_AVG) c 401 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_AESEBURX_ERRORSTATUS) c 402 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.aes3rx.error_status; c 403 sound/pci/asihpi/hpicmn.c else if (phm->u.c.attribute == HPI_AESEBURX_FORMAT) c 404 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.aes3rx.format; c 409 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_AESEBUTX_FORMAT) c 410 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.aes3tx.format; c 415 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_TONEDETECTOR_STATE) c 416 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.tone.state; c 421 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_SILENCEDETECTOR_STATE) { c 422 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.silence.state; c 427 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_MICROPHONE_PHANTOM_POWER) c 428 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.microphone.phantom_state; c 433 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE) c 434 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.clk.source; c 435 sound/pci/asihpi/hpicmn.c else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE_INDEX) { c 438 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = 0; c 442 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.clk.source_index; c 443 sound/pci/asihpi/hpicmn.c } else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SAMPLERATE) c 444 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = pC->u.clk.sample_rate; c 453 sound/pci/asihpi/hpicmn.c HPI_CTL_ATTR_INDEX(phm->u.c. c 460 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_PAD_PROGRAM_ID) c 461 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = p_pad->pI; c 462 sound/pci/asihpi/hpicmn.c else if (phm->u.c.attribute == HPI_PAD_PROGRAM_TYPE) c 463 sound/pci/asihpi/hpicmn.c phr->u.c.param1 = p_pad->pTY; c 466 sound/pci/asihpi/hpicmn.c HPI_CTL_ATTR_INDEX(phm->u.c. c 468 sound/pci/asihpi/hpicmn.c unsigned int offset = phm->u.c.param1; c 515 sound/pci/asihpi/hpicmn.c phm->u.c.attribute); c 558 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_VOLUME_GAIN) { c 559 sound/pci/asihpi/hpicmn.c pC->u.vol.an_log[0] = phr->u.c.an_log_value[0]; c 560 sound/pci/asihpi/hpicmn.c pC->u.vol.an_log[1] = phr->u.c.an_log_value[1]; c 561 sound/pci/asihpi/hpicmn.c } else if (phm->u.c.attribute == HPI_VOLUME_MUTE) { c 562 sound/pci/asihpi/hpicmn.c if (phm->u.c.param1) c 570 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) { c 571 sound/pci/asihpi/hpicmn.c pC->u.mux.source_node_type = (u16)phm->u.c.param1; c 572 sound/pci/asihpi/hpicmn.c pC->u.mux.source_node_index = (u16)phm->u.c.param2; c 577 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE) c 578 sound/pci/asihpi/hpicmn.c pC->u.mode.mode = (u16)phm->u.c.param1; c 581 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_LEVEL_GAIN) { c 582 sound/pci/asihpi/hpicmn.c pC->u.vol.an_log[0] = phr->u.c.an_log_value[0]; c 583 sound/pci/asihpi/hpicmn.c pC->u.vol.an_log[1] = phr->u.c.an_log_value[1]; c 587 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_MICROPHONE_PHANTOM_POWER) c 588 sound/pci/asihpi/hpicmn.c pC->u.microphone.phantom_state = (u16)phm->u.c.param1; c 591 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_AESEBUTX_FORMAT) c 592 sound/pci/asihpi/hpicmn.c pC->u.aes3tx.format = phm->u.c.param1; c 595 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_AESEBURX_FORMAT) c 596 sound/pci/asihpi/hpicmn.c pC->u.aes3rx.format = phm->u.c.param1; c 599 sound/pci/asihpi/hpicmn.c if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE) c 600 sound/pci/asihpi/hpicmn.c pC->u.clk.source = (u16)phm->u.c.param1; c 601 sound/pci/asihpi/hpicmn.c else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE_INDEX) c 602 sound/pci/asihpi/hpicmn.c pC->u.clk.source_index = (u16)phm->u.c.param1; c 603 sound/pci/asihpi/hpicmn.c else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SAMPLERATE) c 604 sound/pci/asihpi/hpicmn.c pC->u.clk.sample_rate = phm->u.c.param1; c 42 sound/pci/asihpi/hpidebug.c phm->u.c.attribute); c 1318 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = attrib; c 1319 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = param1; c 1320 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = param2; c 1335 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = attrib; c 1336 sound/pci/asihpi/hpifunc.c hm.u.c.an_log_value[0] = sv0; c 1337 sound/pci/asihpi/hpifunc.c hm.u.c.an_log_value[1] = sv1; c 1353 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = attrib; c 1354 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = param1; c 1355 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = param2; c 1358 sound/pci/asihpi/hpifunc.c *pparam1 = hr.u.c.param1; c 1360 sound/pci/asihpi/hpifunc.c *pparam2 = hr.u.c.param2; c 1379 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = attrib; c 1382 sound/pci/asihpi/hpifunc.c *sv0 = hr.u.c.an_log_value[0]; c 1384 sound/pci/asihpi/hpifunc.c *sv1 = hr.u.c.an_log_value[1]; c 1400 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = attrib; c 1401 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = index; c 1402 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = param; c 1405 sound/pci/asihpi/hpifunc.c *psetting = hr.u.c.param1; c 1414 sound/pci/asihpi/hpifunc.c char c = 0; c 1430 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = attribute; c 1431 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = sub_string_index; c 1432 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = 0; c 1445 sound/pci/asihpi/hpifunc.c c = hr.u.cu.chars8.sz_data[j]; c 1446 sound/pci/asihpi/hpifunc.c psz_string[sub_string_index + j] = c; c 1453 sound/pci/asihpi/hpifunc.c if (c == 0) c 1459 sound/pci/asihpi/hpifunc.c && (c != 0)) { c 1460 sound/pci/asihpi/hpifunc.c c = 0; c 1461 sound/pci/asihpi/hpifunc.c psz_string[sub_string_index + j] = c; c 1463 sound/pci/asihpi/hpifunc.c if (c == 0) c 1512 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_AESEBURX_USERDATA; c 1513 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = index; c 1518 sound/pci/asihpi/hpifunc.c *pw_data = (u16)hr.u.c.param2; c 1531 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_AESEBURX_CHANNELSTATUS; c 1532 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = index; c 1537 sound/pci/asihpi/hpifunc.c *pw_data = (u16)hr.u.c.param2; c 1628 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_BITSTREAM_ACTIVITY; c 1631 sound/pci/asihpi/hpifunc.c *pw_clk_activity = (u16)hr.u.c.param1; c 1633 sound/pci/asihpi/hpifunc.c *pw_data_activity = (u16)hr.u.c.param2; c 1738 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_COBRANET_GET_STATUS; c 1924 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_COMPANDER_THRESHOLD; c 1925 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = index; c 1926 sound/pci/asihpi/hpifunc.c hm.u.c.an_log_value[0] = threshold0_01dB; c 1943 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_COMPANDER_THRESHOLD; c 1944 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = index; c 1947 sound/pci/asihpi/hpifunc.c *threshold0_01dB = hr.u.c.an_log_value[0]; c 1974 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_LEVEL_RANGE; c 1978 sound/pci/asihpi/hpifunc.c hr.u.c.an_log_value[0] = 0; c 1979 sound/pci/asihpi/hpifunc.c hr.u.c.an_log_value[1] = 0; c 1980 sound/pci/asihpi/hpifunc.c hr.u.c.param1 = 0; c 1983 sound/pci/asihpi/hpifunc.c *min_gain_01dB = hr.u.c.an_log_value[0]; c 1985 sound/pci/asihpi/hpifunc.c *max_gain_01dB = hr.u.c.an_log_value[1]; c 1987 sound/pci/asihpi/hpifunc.c *step_gain_01dB = (short)hr.u.c.param1; c 2024 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_METER_PEAK; c 2029 sound/pci/asihpi/hpifunc.c memcpy(an_peakdB, hr.u.c.an_log_value, c 2049 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_METER_RMS; c 2054 sound/pci/asihpi/hpifunc.c memcpy(an_rmsdB, hr.u.c.an_log_value, c 2157 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_MULTIPLEXER_QUERYSOURCE; c 2158 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = index; c 2163 sound/pci/asihpi/hpifunc.c *source_node_type = (u16)hr.u.c.param1; c 2165 sound/pci/asihpi/hpifunc.c *source_node_index = (u16)hr.u.c.param2; c 2201 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_EQUALIZER_FILTER; c 2202 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = index; c 2207 sound/pci/asihpi/hpifunc.c *pfrequency_hz = hr.u.c.param1; c 2209 sound/pci/asihpi/hpifunc.c *pn_type = (u16)(hr.u.c.param2 >> 16); c 2211 sound/pci/asihpi/hpifunc.c *pnQ100 = hr.u.c.an_log_value[1]; c 2213 sound/pci/asihpi/hpifunc.c *pn_gain0_01dB = hr.u.c.an_log_value[0]; c 2229 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = frequency_hz; c 2230 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = (index & 0xFFFFL) + ((u32)type << 16); c 2231 sound/pci/asihpi/hpifunc.c hm.u.c.an_log_value[0] = gain0_01dB; c 2232 sound/pci/asihpi/hpifunc.c hm.u.c.an_log_value[1] = q100; c 2233 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_EQUALIZER_FILTER; c 2250 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_EQUALIZER_COEFFICIENTS; c 2251 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = index; c 2255 sound/pci/asihpi/hpifunc.c coeffs[0] = (short)hr.u.c.an_log_value[0]; c 2256 sound/pci/asihpi/hpifunc.c coeffs[1] = (short)hr.u.c.an_log_value[1]; c 2257 sound/pci/asihpi/hpifunc.c coeffs[2] = (short)hr.u.c.param1; c 2258 sound/pci/asihpi/hpifunc.c coeffs[3] = (short)(hr.u.c.param1 >> 16); c 2259 sound/pci/asihpi/hpifunc.c coeffs[4] = (short)hr.u.c.param2; c 2693 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_TUNER_RDS; c 2779 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_VOLUME_RANGE; c 2783 sound/pci/asihpi/hpifunc.c hr.u.c.an_log_value[0] = 0; c 2784 sound/pci/asihpi/hpifunc.c hr.u.c.an_log_value[1] = 0; c 2785 sound/pci/asihpi/hpifunc.c hr.u.c.param1 = 0; c 2788 sound/pci/asihpi/hpifunc.c *min_gain_01dB = hr.u.c.an_log_value[0]; c 2790 sound/pci/asihpi/hpifunc.c *max_gain_01dB = hr.u.c.an_log_value[1]; c 2792 sound/pci/asihpi/hpifunc.c *step_gain_01dB = (short)hr.u.c.param1; c 2808 sound/pci/asihpi/hpifunc.c memcpy(hm.u.c.an_log_value, an_stop_gain0_01dB, c 2811 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_VOLUME_AUTOFADE; c 2812 sound/pci/asihpi/hpifunc.c hm.u.c.param1 = duration_ms; c 2813 sound/pci/asihpi/hpifunc.c hm.u.c.param2 = profile; c 2845 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_VOX_THRESHOLD; c 2847 sound/pci/asihpi/hpifunc.c hm.u.c.an_log_value[0] = an_gain0_01dB; c 2862 sound/pci/asihpi/hpifunc.c hm.u.c.attribute = HPI_VOX_THRESHOLD; c 2866 sound/pci/asihpi/hpifunc.c *an_gain0_01dB = hr.u.c.an_log_value[0]; c 47 sound/pci/au88x0/au88x0_a3d.c a3dsrc_SetAtmosTarget(a3dsrc_t * a, short aa, short b, short c, short d, c 58 sound/pci/au88x0/au88x0_a3d.c a3d_addrB(a->slice, a->source, A3D_B_B2Target), c); c 62 sound/pci/au88x0/au88x0_a3d.c a3dsrc_SetAtmosCurrent(a3dsrc_t * a, short aa, short b, short c, short d, c 73 sound/pci/au88x0/au88x0_a3d.c a3d_addrB(a->slice, a->source, A3D_B_B2Current), c); c 88 sound/pci/au88x0/au88x0_a3d.c a3dsrc_GetAtmosTarget(a3dsrc_t * a, short *aa, short *b, short *c, c 1648 sound/pci/ca0106/ca0106_main.c struct snd_ca0106_details *c; c 1710 sound/pci/ca0106/ca0106_main.c for (c = ca0106_chip_details; c->serial; c++) { c 1712 sound/pci/ca0106/ca0106_main.c if (c->serial == subsystem[dev]) c 1714 sound/pci/ca0106/ca0106_main.c } else if (c->serial == chip->serial) c 1717 sound/pci/ca0106/ca0106_main.c chip->details = c; c 1721 sound/pci/ca0106/ca0106_main.c c->name, chip->serial, subsystem[dev]); c 1725 sound/pci/ca0106/ca0106_main.c c->name, chip->port, chip->irq); c 778 sound/pci/ca0106/ca0106_mixer.c char **c; c 818 sound/pci/ca0106/ca0106_mixer.c for (c = ca0106_remove_ctls; *c; c++) c 819 sound/pci/ca0106/ca0106_mixer.c remove_ctl(card, *c); c 820 sound/pci/ca0106/ca0106_mixer.c for (c = ca0106_rename_ctls; *c; c += 2) c 821 sound/pci/ca0106/ca0106_mixer.c rename_ctl(card, c[0], c[1]); c 59 sound/pci/cs4281.c #define BA0_HISR_FIFO(c) (1<<(12+(c))) /* FIFO channel interrupt */ c 60 sound/pci/cs4281.c #define BA0_HISR_DMA(c) (1<<(8+(c))) /* DMA channel interrupt */ c 1165 sound/pci/cs4281.c entry->c.ops = &snd_cs4281_proc_ops_BA0; c 1171 sound/pci/cs4281.c entry->c.ops = &snd_cs4281_proc_ops_BA1; c 1829 sound/pci/cs4281.c unsigned char c; c 1833 sound/pci/cs4281.c c = snd_cs4281_peekBA0(chip, BA0_MIDRP); c 1836 sound/pci/cs4281.c snd_rawmidi_receive(chip->midi_input, &c, 1); c 1841 sound/pci/cs4281.c if (snd_rawmidi_transmit(chip->midi_output, &c, 1) != 1) { c 1846 sound/pci/cs4281.c snd_cs4281_pokeBA0(chip, BA0_MIDWP, c); c 1398 sound/pci/cs46xx/cs46xx_lib.c unsigned char c; c 1402 sound/pci/cs46xx/cs46xx_lib.c c = snd_cs46xx_peekBA0(chip, BA0_MIDRP); c 1405 sound/pci/cs46xx/cs46xx_lib.c snd_rawmidi_receive(chip->midi_input, &c, 1); c 1410 sound/pci/cs46xx/cs46xx_lib.c if (snd_rawmidi_transmit(chip->midi_output, &c, 1) != 1) { c 1415 sound/pci/cs46xx/cs46xx_lib.c snd_cs46xx_pokeBA0(chip, BA0_MIDWP, c); c 2838 sound/pci/cs46xx/cs46xx_lib.c entry->c.ops = &snd_cs46xx_proc_io_ops; c 1784 sound/pci/ctxfi/cthw20k1.c #define CTLBITS(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) c 109 sound/pci/echoaudio/echoaudio.c struct snd_interval *c = hw_param_interval(params, c 118 sound/pci/echoaudio/echoaudio.c if (c->min == 2) { c 124 sound/pci/echoaudio/echoaudio.c if (c->min > 2) { c 137 sound/pci/echoaudio/echoaudio.c struct snd_interval *c = hw_param_interval(params, c 153 sound/pci/echoaudio/echoaudio.c return snd_interval_refine(c, &ch); c 160 sound/pci/echoaudio/echoaudio.c return snd_interval_refine(c, &ch); c 171 sound/pci/echoaudio/echoaudio.c struct snd_interval *c = hw_param_interval(params, c 181 sound/pci/echoaudio/echoaudio.c if (c->min > 2) { c 186 sound/pci/echoaudio/echoaudio.c } else if (c->max == 1) c 190 sound/pci/echoaudio/echoaudio.c else if (c->min == 2 && c->max == 2) c 206 sound/pci/echoaudio/echoaudio.c struct snd_interval *c = hw_param_interval(params, c 234 sound/pci/echoaudio/echoaudio.c return snd_interval_refine(c, &ch); c 994 sound/pci/echoaudio/echoaudio.c int c; c 997 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_busses_out(chip); c++) c 998 sound/pci/echoaudio/echoaudio.c ucontrol->value.integer.value[c] = chip->output_gain[c]; c 1006 sound/pci/echoaudio/echoaudio.c int c, changed, gain; c 1011 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_busses_out(chip); c++) { c 1012 sound/pci/echoaudio/echoaudio.c gain = ucontrol->value.integer.value[c]; c 1016 sound/pci/echoaudio/echoaudio.c if (chip->output_gain[c] != gain) { c 1017 sound/pci/echoaudio/echoaudio.c set_output_gain(chip, c, gain); c 1075 sound/pci/echoaudio/echoaudio.c int c; c 1078 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_analog_busses_in(chip); c++) c 1079 sound/pci/echoaudio/echoaudio.c ucontrol->value.integer.value[c] = chip->input_gain[c]; c 1087 sound/pci/echoaudio/echoaudio.c int c, gain, changed; c 1092 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_analog_busses_in(chip); c++) { c 1093 sound/pci/echoaudio/echoaudio.c gain = ucontrol->value.integer.value[c]; c 1097 sound/pci/echoaudio/echoaudio.c if (chip->input_gain[c] != gain) { c 1098 sound/pci/echoaudio/echoaudio.c set_input_gain(chip, c, gain); c 1144 sound/pci/echoaudio/echoaudio.c int c; c 1147 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_analog_busses_out(chip); c++) c 1148 sound/pci/echoaudio/echoaudio.c ucontrol->value.integer.value[c] = chip->nominal_level[c]; c 1156 sound/pci/echoaudio/echoaudio.c int c, changed; c 1161 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_analog_busses_out(chip); c++) { c 1162 sound/pci/echoaudio/echoaudio.c if (chip->nominal_level[c] != ucontrol->value.integer.value[c]) { c 1163 sound/pci/echoaudio/echoaudio.c set_nominal_level(chip, c, c 1164 sound/pci/echoaudio/echoaudio.c ucontrol->value.integer.value[c]); c 1206 sound/pci/echoaudio/echoaudio.c int c; c 1209 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_analog_busses_in(chip); c++) c 1210 sound/pci/echoaudio/echoaudio.c ucontrol->value.integer.value[c] = c 1211 sound/pci/echoaudio/echoaudio.c chip->nominal_level[bx_analog_in(chip) + c]; c 1219 sound/pci/echoaudio/echoaudio.c int c, changed; c 1224 sound/pci/echoaudio/echoaudio.c for (c = 0; c < num_analog_busses_in(chip); c++) { c 1225 sound/pci/echoaudio/echoaudio.c if (chip->nominal_level[bx_analog_in(chip) + c] != c 1226 sound/pci/echoaudio/echoaudio.c ucontrol->value.integer.value[c]) { c 1227 sound/pci/echoaudio/echoaudio.c set_nominal_level(chip, bx_analog_in(chip) + c, c 1228 sound/pci/echoaudio/echoaudio.c ucontrol->value.integer.value[c]); c 1793 sound/pci/emu10k1/emu10k1_main.c const struct snd_emu_chip_details *c; c 1834 sound/pci/emu10k1/emu10k1_main.c for (c = emu_chip_details; c->vendor; c++) { c 1835 sound/pci/emu10k1/emu10k1_main.c if (c->vendor == pci->vendor && c->device == pci->device) { c 1837 sound/pci/emu10k1/emu10k1_main.c if (c->subsystem && (c->subsystem == subsystem)) c 1842 sound/pci/emu10k1/emu10k1_main.c if (c->subsystem && (c->subsystem != emu->serial)) c 1844 sound/pci/emu10k1/emu10k1_main.c if (c->revision && c->revision != emu->revision) c 1850 sound/pci/emu10k1/emu10k1_main.c if (c->vendor == 0) { c 1856 sound/pci/emu10k1/emu10k1_main.c emu->card_capabilities = c; c 1857 sound/pci/emu10k1/emu10k1_main.c if (c->subsystem && !subsystem) c 1858 sound/pci/emu10k1/emu10k1_main.c dev_dbg(card->dev, "Sound card name = %s\n", c->name); c 1862 sound/pci/emu10k1/emu10k1_main.c "Forced to subsystem = 0x%x\n", c->name, c 1863 sound/pci/emu10k1/emu10k1_main.c pci->vendor, pci->device, emu->serial, c->subsystem); c 1867 sound/pci/emu10k1/emu10k1_main.c c->name, pci->vendor, pci->device, c 1870 sound/pci/emu10k1/emu10k1_main.c if (!*card->id && c->id) c 1871 sound/pci/emu10k1/emu10k1_main.c strlcpy(card->id, c->id, sizeof(card->id)); c 1873 sound/pci/emu10k1/emu10k1_main.c is_audigy = emu->audigy = c->emu10k2_chip; c 1781 sound/pci/emu10k1/emumixer.c char **c; c 1933 sound/pci/emu10k1/emumixer.c c = audigy_remove_ctls_1361t_adc; c 1935 sound/pci/emu10k1/emumixer.c c = audigy_remove_ctls; c 1952 sound/pci/emu10k1/emumixer.c c = emu10k1_remove_ctls; c 1954 sound/pci/emu10k1/emumixer.c for (; *c; c++) c 1955 sound/pci/emu10k1/emumixer.c remove_ctl(card, *c); c 1957 sound/pci/emu10k1/emumixer.c c = audigy_remove_ctls_i2c_adc; c 1958 sound/pci/emu10k1/emumixer.c for (; *c; c++) c 1959 sound/pci/emu10k1/emumixer.c remove_ctl(card, *c); c 1972 sound/pci/emu10k1/emumixer.c c = audigy_rename_ctls_1361t_adc; c 1974 sound/pci/emu10k1/emumixer.c c = audigy_rename_ctls_i2c_adc; c 1976 sound/pci/emu10k1/emumixer.c c = audigy_rename_ctls; c 1978 sound/pci/emu10k1/emumixer.c c = emu10k1_rename_ctls; c 1979 sound/pci/emu10k1/emumixer.c for (; *c; c += 2) c 1980 sound/pci/emu10k1/emumixer.c rename_ctl(card, c[0], c[1]); c 597 sound/pci/emu10k1/emuproc.c entry->c.ops = &snd_emu10k1_proc_ops_fx8010; c 604 sound/pci/emu10k1/emuproc.c entry->c.ops = &snd_emu10k1_proc_ops_fx8010; c 611 sound/pci/emu10k1/emuproc.c entry->c.ops = &snd_emu10k1_proc_ops_fx8010; c 618 sound/pci/emu10k1/emuproc.c entry->c.ops = &snd_emu10k1_proc_ops_fx8010; c 737 sound/pci/hda/hda_auto_parser.c unsigned int c; c 740 sound/pci/hda/hda_auto_parser.c c = snd_hda_codec_get_pincfg(codec, pin); c 741 sound/pci/hda/hda_auto_parser.c if (hdmi == is_hdmi_cfg(c)) c 1081 sound/pci/hda/hda_codec.c struct hda_codec *c; c 1109 sound/pci/hda/hda_codec.c list_for_each_codec(c, codec->bus) { c 1110 sound/pci/hda/hda_codec.c snd_array_for_each(&c->cvt_setups, i, p) { c 1112 sound/pci/hda/hda_codec.c get_wcaps_type(get_wcaps(c, p->nid)) == type) c 1172 sound/pci/hda/hda_codec.c struct hda_codec *c; c 1176 sound/pci/hda/hda_codec.c list_for_each_codec(c, codec->bus) { c 1177 sound/pci/hda/hda_codec.c snd_array_for_each(&c->cvt_setups, i, p) { c 1179 sound/pci/hda/hda_codec.c really_cleanup_stream(c, p); c 1869 sound/pci/hda/hda_codec.c if (kctl->tlv.c != snd_hda_mixer_amp_tlv) { c 3401 sound/pci/hda/hda_codec.c struct hda_codec *c; c 3403 sound/pci/hda/hda_codec.c list_for_each_codec(c, bus) c 3404 sound/pci/hda/hda_codec.c codec_set_power_save(c, delay); c 1265 sound/pci/hda/hda_controller.c int c, codecs, err; c 1272 sound/pci/hda/hda_controller.c for (c = 0; c < max_slots; c++) { c 1273 sound/pci/hda/hda_controller.c if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { c 1274 sound/pci/hda/hda_controller.c if (probe_codec(chip, c) < 0) { c 1279 sound/pci/hda/hda_controller.c "Codec #%d probe error; disabling it...\n", c); c 1280 sound/pci/hda/hda_controller.c bus->codec_mask &= ~(1 << c); c 1295 sound/pci/hda/hda_controller.c for (c = 0; c < max_slots; c++) { c 1296 sound/pci/hda/hda_controller.c if ((bus->codec_mask & (1 << c)) & chip->codec_probe_mask) { c 1298 sound/pci/hda/hda_controller.c err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec); c 3239 sound/pci/hda/hda_generic.c int c; c 3241 sound/pci/hda/hda_generic.c for (c = 0; c < num_adcs; c++) { c 3243 sound/pci/hda/hda_generic.c hda_nid_t adc = spec->adc_nids[c]; c 3251 sound/pci/hda/hda_generic.c spec->input_paths[imux_idx][c] = c 3261 sound/pci/hda/hda_generic.c spec->dyn_adc_idx[imux_idx] = c; c 3493 sound/pci/hda/hda_generic.c .tlv = { .c = cap_vol_tlv }, c 5941 sound/pci/hda/hda_generic.c int i, c, nums; c 5948 sound/pci/hda/hda_generic.c for (c = 0; c < nums; c++) { c 5950 sound/pci/hda/hda_generic.c path = get_input_path(codec, c, i); c 5953 sound/pci/hda/hda_generic.c if (i == spec->cur_mux[c]) c 5959 sound/pci/hda/hda_generic.c update_hp_mic(codec, c, true); c 41 sound/pci/hda/hda_local.h .tlv = { .c = snd_hda_mixer_amp_tlv }, \ c 603 sound/pci/hda/hda_proc.c int c, curr = -1; c 616 sound/pci/hda/hda_proc.c for (c = 0; c < conn_len; c++) { c 617 sound/pci/hda/hda_proc.c snd_iprintf(buffer, " 0x%02x", conn[c]); c 618 sound/pci/hda/hda_proc.c if (c == curr) c 631 sound/pci/hda/hda_proc.c for (c = 0; c < cache_len; c++) c 632 sound/pci/hda/hda_proc.c snd_iprintf(buffer, " 0x%02x", list[c]); c 3654 sound/pci/hda/patch_ca0132.c .tlv = { .c = ca0132_volume_tlv }, \ c 3672 sound/pci/hda/patch_ca0132.c .tlv = { .c = snd_hda_mixer_amp_tlv }, \ c 3984 sound/pci/hda/patch_ca0132.c knew.tlv.c = 0; c 6066 sound/pci/hda/patch_ca0132.c knew.tlv.c = NULL; c 516 sound/pci/hda/patch_hdmi.c entry->c.text.write = write_eld_info; c 3863 sound/pci/hda/patch_hdmi.c int c; c 3877 sound/pci/hda/patch_hdmi.c for (c = 0; c < 7; c += 2) { c 3878 sound/pci/hda/patch_hdmi.c if (cap->speakers[c] || cap->speakers[c+1]) c 3894 sound/pci/hda/patch_hdmi.c int c; c 3896 sound/pci/hda/patch_hdmi.c for (c = 7; c >= 0; c--) { c 3897 sound/pci/hda/patch_hdmi.c int chan = 7 - atihdmi_paired_swap_fc_lfe(7 - c); c 42 sound/pci/ice1712/hoontech.h #define ICE1712_STDSP24_CLOCK(r, a, c) r[a&3] = ((r[a&3] & ~0x20) | (((c)&1)<<5)) c 2273 sound/pci/ice1712/ice1712.c struct snd_ice1712_card_info * const *tbl, *c; c 2297 sound/pci/ice1712/ice1712.c for (c = *tbl; c->subvendor; c++) { c 2298 sound/pci/ice1712/ice1712.c if (modelname && c->model && !strcmp(modelname, c->model)) { c 2300 sound/pci/ice1712/ice1712.c "Using board model %s\n", c->name); c 2301 sound/pci/ice1712/ice1712.c ice->eeprom.subvendor = c->subvendor; c 2302 sound/pci/ice1712/ice1712.c } else if (c->subvendor != ice->eeprom.subvendor) c 2304 sound/pci/ice1712/ice1712.c if (!c->eeprom_size || !c->eeprom_data) c 2309 sound/pci/ice1712/ice1712.c ice->eeprom.size = c->eeprom_size + 6; c 2310 sound/pci/ice1712/ice1712.c memcpy(ice->eeprom.data, c->eeprom_data, c->eeprom_size); c 2619 sound/pci/ice1712/ice1712.c struct snd_ice1712_card_info * const *tbl, *c; c 2644 sound/pci/ice1712/ice1712.c for (c = *tbl; c->subvendor; c++) { c 2645 sound/pci/ice1712/ice1712.c if (c->subvendor == ice->eeprom.subvendor) { c 2646 sound/pci/ice1712/ice1712.c ice->card_info = c; c 2647 sound/pci/ice1712/ice1712.c strcpy(card->shortname, c->name); c 2648 sound/pci/ice1712/ice1712.c if (c->driver) /* specific driver? */ c 2649 sound/pci/ice1712/ice1712.c strcpy(card->driver, c->driver); c 2650 sound/pci/ice1712/ice1712.c if (c->chip_init) { c 2651 sound/pci/ice1712/ice1712.c err = c->chip_init(ice); c 2661 sound/pci/ice1712/ice1712.c c = &no_matched; c 2690 sound/pci/ice1712/ice1712.c if (c->build_controls) { c 2691 sound/pci/ice1712/ice1712.c err = c->build_controls(ice); c 2706 sound/pci/ice1712/ice1712.c if (!c->no_mpu401) { c 2709 sound/pci/ice1712/ice1712.c c->mpu401_1_info_flags | c 2716 sound/pci/ice1712/ice1712.c if (c->mpu401_1_name) c 2720 sound/pci/ice1712/ice1712.c "%s %d", c->mpu401_1_name, card->number); c 2726 sound/pci/ice1712/ice1712.c c->mpu401_2_info_flags | c 2734 sound/pci/ice1712/ice1712.c if (c->mpu401_2_name) c 2738 sound/pci/ice1712/ice1712.c "%s %d", c->mpu401_2_name, c 2294 sound/pci/ice1712/ice1724.c struct snd_ice1712_card_info * const *tbl, *c; c 2324 sound/pci/ice1712/ice1724.c for (c = *tbl; c->name; c++) { c 2325 sound/pci/ice1712/ice1724.c if (modelname && c->model && c 2326 sound/pci/ice1712/ice1724.c !strcmp(modelname, c->model)) { c 2329 sound/pci/ice1712/ice1724.c c->name); c 2330 sound/pci/ice1712/ice1724.c ice->eeprom.subvendor = c->subvendor; c 2331 sound/pci/ice1712/ice1724.c } else if (c->subvendor != ice->eeprom.subvendor) c 2333 sound/pci/ice1712/ice1724.c ice->card_info = c; c 2334 sound/pci/ice1712/ice1724.c if (!c->eeprom_size || !c->eeprom_data) c 2339 sound/pci/ice1712/ice1724.c ice->eeprom.size = c->eeprom_size + 6; c 2340 sound/pci/ice1712/ice1724.c memcpy(ice->eeprom.data, c->eeprom_data, c->eeprom_size); c 2608 sound/pci/ice1712/ice1724.c struct snd_ice1712_card_info * const *tbl, *c; c 2635 sound/pci/ice1712/ice1724.c for (c = *tbl; c->name; c++) { c 2636 sound/pci/ice1712/ice1724.c if ((model[dev] && c->model && c 2637 sound/pci/ice1712/ice1724.c !strcmp(model[dev], c->model)) || c 2638 sound/pci/ice1712/ice1724.c (c->subvendor == ice->eeprom.subvendor)) { c 2639 sound/pci/ice1712/ice1724.c strcpy(card->shortname, c->name); c 2640 sound/pci/ice1712/ice1724.c if (c->driver) /* specific driver? */ c 2641 sound/pci/ice1712/ice1724.c strcpy(card->driver, c->driver); c 2642 sound/pci/ice1712/ice1724.c if (c->chip_init) { c 2643 sound/pci/ice1712/ice1724.c err = c->chip_init(ice); c 2653 sound/pci/ice1712/ice1724.c c = &no_matched; c 2722 sound/pci/ice1712/ice1724.c if (c->build_controls) { c 2723 sound/pci/ice1712/ice1724.c err = c->build_controls(ice); c 2730 sound/pci/ice1712/ice1724.c if (!c->no_mpu401) { c 410 sound/pci/ice1712/se.c int c; c 415 sound/pci/ice1712/se.c for (c = 0; member[c]; c++) c 417 sound/pci/ice1712/se.c return c; c 435 sound/pci/ice1712/se.c int n, c; c 438 sound/pci/ice1712/se.c c = se200pci_get_enum_count(n); c 439 sound/pci/ice1712/se.c if (!c) c 441 sound/pci/ice1712/se.c return snd_ctl_enum_info(uinfo, 1, c, se200pci_cont[n].member); c 467 sound/pci/korg1212/korg1212.c union swap_u32 { unsigned char c[4]; u32 i; }; c 478 sound/pci/korg1212/korg1212.c retVal.c[2] = swapper.c[3]; c 479 sound/pci/korg1212/korg1212.c retVal.c[3] = swapper.c[2]; c 480 sound/pci/korg1212/korg1212.c retVal.c[1] = swapper.c[1]; c 481 sound/pci/korg1212/korg1212.c retVal.c[0] = swapper.c[0]; c 495 sound/pci/korg1212/korg1212.c retVal.c[2] = swapper.c[2]; c 496 sound/pci/korg1212/korg1212.c retVal.c[3] = swapper.c[3]; c 497 sound/pci/korg1212/korg1212.c retVal.c[1] = swapper.c[0]; c 498 sound/pci/korg1212/korg1212.c retVal.c[0] = swapper.c[1]; c 578 sound/pci/lola/lola_mixer.c .tlv.c = lola_analog_vol_tlv, c 1216 sound/pci/mixart/mixart.c entry->c.ops = &snd_mixart_proc_ops_BA0; c 1222 sound/pci/mixart/mixart.c entry->c.ops = &snd_mixart_proc_ops_BA1; c 990 sound/pci/oxygen/xonar_wm87x6.c #define WM8776_FIELD_CTL_VOLUME(a, b, c, d, e, f, g, h, tlv_p) { \ c 991 sound/pci/oxygen/xonar_wm87x6.c _WM8776_FIELD_CTL(a " Capture Volume", b, c, d, e, f, g, h), \ c 230 sound/pci/riptide/riptide.c #define SEND_GETC(p,b,c) sendcmd(p,PARM|RESP,GETC,c,RET(b)) c 233 sound/pci/riptide/riptide.c #define SEND_RMEM(p,b,c,d) sendcmd(p,PARM|RESP,RMEM|BYTE1(b),LONG0(c),RET(d)) /* memory access for firmware write */ c 234 sound/pci/riptide/riptide.c #define SEND_SMEM(p,b,c) sendcmd(p,PARM,SMEM|BYTE1(b),LONG0(c),RET(0)) /* memory access for firmware write */ c 235 sound/pci/riptide/riptide.c #define SEND_WMEM(p,b,c) sendcmd(p,PARM,WMEM|BYTE1(b),LONG0(c),RET(0)) /* memory access for firmware write */ c 236 sound/pci/riptide/riptide.c #define SEND_SDTM(p,b,c) sendcmd(p,PARM|RESP,SDTM|TRINIB1(b),0,RET(c)) /* memory access for firmware write */ c 239 sound/pci/riptide/riptide.c #define SEND_SSTR(p,b,c) sendcmd(p,PARM,SSTR|BYTE3(b),LONG0(c),RET(0)) /* start stream */ c 243 sound/pci/riptide/riptide.c #define SEND_GPOS(p,b,c,d) sendcmd(p,PARM|RESP,GPOS,BYTE3(c)|BYTE2(b),RET(d)) /* get position in dma */ c 244 sound/pci/riptide/riptide.c #define SEND_SETF(p,b,c,d,e,f,g) sendcmd(p,PARM,SETF|WORD1(b)|BYTE3(c),d|BYTE1(e)|BYTE2(f)|BYTE3(g),RET(0)) /* set sample format at mixer */ c 245 sound/pci/riptide/riptide.c #define SEND_GSTS(p,b,c,d) sendcmd(p,PARM|RESP,GSTS,BYTE3(c)|BYTE2(b),RET(d)) c 246 sound/pci/riptide/riptide.c #define SEND_NGPOS(p,b,c,d) sendcmd(p,PARM|RESP,NGPOS,BYTE3(c)|BYTE2(b),RET(d)) c 247 sound/pci/riptide/riptide.c #define SEND_PSEL(p,b,c) sendcmd(p,PARM,PSEL,BYTE2(b)|BYTE3(c),RET(0)) /* activate lbus path */ c 248 sound/pci/riptide/riptide.c #define SEND_PCLR(p,b,c) sendcmd(p,PARM,PCLR,BYTE2(b)|BYTE3(c),RET(0)) /* deactivate lbus path */ c 250 sound/pci/riptide/riptide.c #define SEND_RSSV(p,b,c,d) sendcmd(p,PARM|RESP,RSSV,BYTE2(b)|BYTE3(c),RET(d)) c 251 sound/pci/riptide/riptide.c #define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0)) /* select paths for internal connections */ c 252 sound/pci/riptide/riptide.c #define SEND_SSRC(p,b,c,d,e) sendcmd(p,PARM,SSRC|BYTE1(b)|WORD2(c),WORD0(d)|WORD2(e),RET(0)) /* configure source */ c 254 sound/pci/riptide/riptide.c #define SEND_RSRC(p,b,c) sendcmd(p,RESP,RSRC|BYTE1(b),0,RET(c)) /* read source config */ c 255 sound/pci/riptide/riptide.c #define SEND_SSRB(p,b,c) sendcmd(p,PARM,SSRB|BYTE1(b),WORD2(c),RET(0)) c 256 sound/pci/riptide/riptide.c #define SEND_SDGV(p,b,c,d,e) sendcmd(p,PARM,SDGV|BYTE2(b)|BYTE3(c),WORD0(d)|WORD2(e),RET(0)) /* set digital mixer */ c 257 sound/pci/riptide/riptide.c #define SEND_RDGV(p,b,c,d) sendcmd(p,PARM|RESP,RDGV|BYTE2(b)|BYTE3(c),0,RET(d)) /* read digital mixer */ c 259 sound/pci/riptide/riptide.c #define SEND_SACR(p,b,c) sendcmd(p,PARM,SACR,WORD0(b)|WORD2(c),RET(0)) /* set AC97 register */ c 260 sound/pci/riptide/riptide.c #define SEND_RACR(p,b,c) sendcmd(p,PARM|RESP,RACR,WORD2(b),RET(c)) /* get AC97 register */ c 262 sound/pci/riptide/riptide.c #define SEND_TXAC(p,b,c,d,e,f) sendcmd(p,PARM,TXAC|BYTE1(b)|WORD2(c),WORD0(d)|BYTE2(e)|BYTE3(f),RET(0)) c 263 sound/pci/riptide/riptide.c #define SEND_RXAC(p,b,c,d) sendcmd(p,PARM|RESP,RXAC,BYTE2(b)|BYTE3(c),RET(d)) c 655 sound/pci/riptide/riptide.c unsigned char c; c 660 sound/pci/riptide/riptide.c c = in[len - 1]; c 661 sound/pci/riptide/riptide.c value = hex_to_bin(c); c 1081 sound/pci/riptide/riptide.c struct sgd *c; c 1097 sound/pci/riptide/riptide.c c = &data->sgdbuf[j]; c 1098 sound/pci/riptide/riptide.c flag = le32_to_cpu(c->dwStat_Ctl); c 1100 sound/pci/riptide/riptide.c pos += le32_to_cpu(c->dwSegLen); c 1102 sound/pci/riptide/riptide.c pos += le32_to_cpu(c->dwSegLen); c 1109 sound/pci/riptide/riptide.c c->dwStat_Ctl = c 1477 sound/pci/riptide/riptide.c struct sgd *c, *p = NULL; c 1494 sound/pci/riptide/riptide.c c = &data->sgdbuf[i]; c 1500 sound/pci/riptide/riptide.c c->dwNextLink = cpu_to_le32(data->sgdlist.addr); c 1503 sound/pci/riptide/riptide.c c->dwSegPtrPhys = cpu_to_le32(addr); c 1507 sound/pci/riptide/riptide.c c->dwSegLen = cpu_to_le32(f); c 1508 sound/pci/riptide/riptide.c c->dwStat_Ctl = c 1511 sound/pci/riptide/riptide.c p = c; c 4274 sound/pci/rme9652/hdsp.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 4280 sound/pci/rme9652/hdsp.c return snd_interval_list(c, 3, list, 0); c 4285 sound/pci/rme9652/hdsp.c return snd_interval_list(c, 2, list, 0); c 4294 sound/pci/rme9652/hdsp.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 4299 sound/pci/rme9652/hdsp.c return snd_interval_list(c, 3, list, 0); c 4304 sound/pci/rme9652/hdsp.c return snd_interval_list(c, 2, list, 0); c 4311 sound/pci/rme9652/hdsp.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 4319 sound/pci/rme9652/hdsp.c return snd_interval_refine(c, &t); c 4326 sound/pci/rme9652/hdsp.c return snd_interval_refine(c, &t); c 4333 sound/pci/rme9652/hdsp.c return snd_interval_refine(c, &t); c 4342 sound/pci/rme9652/hdsp.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 4350 sound/pci/rme9652/hdsp.c return snd_interval_refine(c, &t); c 4357 sound/pci/rme9652/hdsp.c return snd_interval_refine(c, &t); c 4364 sound/pci/rme9652/hdsp.c return snd_interval_refine(c, &t); c 4373 sound/pci/rme9652/hdsp.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 4375 sound/pci/rme9652/hdsp.c if (c->min >= hdsp->ss_out_channels) { c 4382 sound/pci/rme9652/hdsp.c } else if (c->max <= hdsp->qs_out_channels && hdsp->io_type == H9632) { c 4389 sound/pci/rme9652/hdsp.c } else if (c->max <= hdsp->ds_out_channels) { c 4404 sound/pci/rme9652/hdsp.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 4406 sound/pci/rme9652/hdsp.c if (c->min >= hdsp->ss_in_channels) { c 4413 sound/pci/rme9652/hdsp.c } else if (c->max <= hdsp->qs_in_channels && hdsp->io_type == H9632) { c 4420 sound/pci/rme9652/hdsp.c } else if (c->max <= hdsp->ds_in_channels) { c 5581 sound/pci/rme9652/hdspm.c int c = hdspm->channel_map_out[i]; c 5583 sound/pci/rme9652/hdspm.c if (c < 0) c 5587 sound/pci/rme9652/hdspm.c c); c 5588 sound/pci/rme9652/hdspm.c snd_hdspm_enable_out(hdspm, c, 1); c 5598 sound/pci/rme9652/hdspm.c int c = hdspm->channel_map_in[i]; c 5600 sound/pci/rme9652/hdspm.c if (c < 0) c 5604 sound/pci/rme9652/hdspm.c c); c 5605 sound/pci/rme9652/hdspm.c snd_hdspm_enable_in(hdspm, c, 1); c 5877 sound/pci/rme9652/hdspm.c struct snd_interval *c = c 5888 sound/pci/rme9652/hdspm.c return snd_interval_refine(c, &t); c 5895 sound/pci/rme9652/hdspm.c return snd_interval_refine(c, &t); c 5902 sound/pci/rme9652/hdspm.c return snd_interval_refine(c, &t); c 5912 sound/pci/rme9652/hdspm.c struct snd_interval *c = c 5923 sound/pci/rme9652/hdspm.c return snd_interval_refine(c, &t); c 5930 sound/pci/rme9652/hdspm.c return snd_interval_refine(c, &t); c 5937 sound/pci/rme9652/hdspm.c return snd_interval_refine(c, &t); c 5947 sound/pci/rme9652/hdspm.c struct snd_interval *c = c 5952 sound/pci/rme9652/hdspm.c if (c->min >= hdspm->ss_in_channels) { c 5959 sound/pci/rme9652/hdspm.c } else if (c->max <= hdspm->qs_in_channels) { c 5966 sound/pci/rme9652/hdspm.c } else if (c->max <= hdspm->ds_in_channels) { c 5981 sound/pci/rme9652/hdspm.c struct snd_interval *c = c 5986 sound/pci/rme9652/hdspm.c if (c->min >= hdspm->ss_out_channels) { c 5993 sound/pci/rme9652/hdspm.c } else if (c->max <= hdspm->qs_out_channels) { c 6000 sound/pci/rme9652/hdspm.c } else if (c->max <= hdspm->ds_out_channels) { c 6017 sound/pci/rme9652/hdspm.c struct snd_interval *c = hw_param_interval(params, c 6023 sound/pci/rme9652/hdspm.c return snd_interval_list(c, 3, list, 0); c 6031 sound/pci/rme9652/hdspm.c struct snd_interval *c = hw_param_interval(params, c 6037 sound/pci/rme9652/hdspm.c return snd_interval_list(c, 3, list, 0); c 2228 sound/pci/rme9652/rme9652.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 2230 sound/pci/rme9652/rme9652.c return snd_interval_list(c, 2, list, 0); c 2237 sound/pci/rme9652/rme9652.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 2245 sound/pci/rme9652/rme9652.c return snd_interval_refine(c, &t); c 2252 sound/pci/rme9652/rme9652.c return snd_interval_refine(c, &t); c 2261 sound/pci/rme9652/rme9652.c struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); c 2263 sound/pci/rme9652/rme9652.c if (c->min >= rme9652->ss_channels) { c 2270 sound/pci/rme9652/rme9652.c } else if (c->max <= rme9652->ds_channels) { c 80 sound/pcmcia/vx/vxp_ops.c int c; c 82 sound/pcmcia/vx/vxp_ops.c c = vx_inb(chip, CDSP); c 83 sound/pcmcia/vx/vxp_ops.c if (c == CDSP_MAGIC) c 87 sound/pcmcia/vx/vxp_ops.c snd_printk(KERN_ERR "cannot find xilinx magic word (%x)\n", c); c 139 sound/pcmcia/vx/vxp_ops.c int c; c 171 sound/pcmcia/vx/vxp_ops.c c = vx_inb(chip, RXL); c 172 sound/pcmcia/vx/vxp_ops.c if (c != (int)data) c 173 sound/pcmcia/vx/vxp_ops.c snd_printk(KERN_ERR "vxpocket: load xilinx mismatch at %d: 0x%x != 0x%x\n", i, c, (int)data); c 187 sound/pcmcia/vx/vxp_ops.c c = (int)vx_inb(chip, RXH) << 16; c 188 sound/pcmcia/vx/vxp_ops.c c |= (int)vx_inb(chip, RXM) << 8; c 189 sound/pcmcia/vx/vxp_ops.c c |= vx_inb(chip, RXL); c 191 sound/pcmcia/vx/vxp_ops.c snd_printdd(KERN_DEBUG "xilinx: dsp size received 0x%x, orig 0x%zx\n", c, fw->size); c 452 sound/ppc/pmac.c int c; c 457 sound/ppc/pmac.c for (c = 0; c < rec->nperiods; c++) { /* at most all fragments */ c 260 sound/soc/au1x/dbdma2.c u32 c = to_dmadata(substream)->ddma_chan; c 265 sound/soc/au1x/dbdma2.c au1xxx_dbdma_start(c); c 269 sound/soc/au1x/dbdma2.c au1xxx_dbdma_stop(c); c 84 sound/soc/au1x/i2sc.c unsigned long c; c 88 sound/soc/au1x/i2sc.c c = ctx->cfg; c 90 sound/soc/au1x/i2sc.c c &= ~CFG_FM_MASK; c 93 sound/soc/au1x/i2sc.c c |= CFG_FM_I2S; c 96 sound/soc/au1x/i2sc.c c |= CFG_FM_RJ; c 99 sound/soc/au1x/i2sc.c c |= CFG_FM_LJ; c 105 sound/soc/au1x/i2sc.c c &= ~(CFG_IC | CFG_ICK); /* IB-IF */ c 108 sound/soc/au1x/i2sc.c c |= CFG_IC | CFG_ICK; c 111 sound/soc/au1x/i2sc.c c |= CFG_IC; c 114 sound/soc/au1x/i2sc.c c |= CFG_ICK; c 131 sound/soc/au1x/i2sc.c ctx->cfg = c; c 60 sound/soc/au1x/psc-ac97.c struct snd_soc_card *c = x->bus->card->private_data; c 61 sound/soc/au1x/psc-ac97.c return snd_soc_dai_get_drvdata(c->rtd->cpu_dai); c 649 sound/soc/codecs/cs4271.c struct cs4271_private **c) c 684 sound/soc/codecs/cs4271.c *c = cs4271; c 105 sound/soc/codecs/es7134.c static int es7134_component_probe(struct snd_soc_component *c) c 107 sound/soc/codecs/es7134.c struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(c); c 108 sound/soc/codecs/es7134.c struct es7134_data *priv = snd_soc_component_get_drvdata(c); c 116 sound/soc/codecs/es7134.c dev_err(c->dev, "failed to add extra widgets\n"); c 125 sound/soc/codecs/es7134.c dev_err(c->dev, "failed to add extra routes\n"); c 28 sound/soc/codecs/max9759.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 29 sound/soc/codecs/max9759.c struct max9759 *priv = snd_soc_component_get_drvdata(c); c 45 sound/soc/codecs/max9759.c struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); c 46 sound/soc/codecs/max9759.c struct max9759 *priv = snd_soc_component_get_drvdata(c); c 64 sound/soc/codecs/max9759.c struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); c 65 sound/soc/codecs/max9759.c struct max9759 *priv = snd_soc_component_get_drvdata(c); c 85 sound/soc/codecs/max9759.c struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); c 86 sound/soc/codecs/max9759.c struct max9759 *priv = snd_soc_component_get_drvdata(c); c 96 sound/soc/codecs/max9759.c struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); c 97 sound/soc/codecs/max9759.c struct max9759 *priv = snd_soc_component_get_drvdata(c); c 43 sound/soc/codecs/max9768.c struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); c 44 sound/soc/codecs/max9768.c struct max9768 *max9768 = snd_soc_component_get_drvdata(c); c 55 sound/soc/codecs/max9768.c struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); c 56 sound/soc/codecs/max9768.c struct max9768 *max9768 = snd_soc_component_get_drvdata(c); c 107 sound/soc/codecs/max98504.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 108 sound/soc/codecs/max98504.c struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c); c 123 sound/soc/codecs/max98504.c static int max98504_component_probe(struct snd_soc_component *c) c 125 sound/soc/codecs/max98504.c struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c); c 157 sound/soc/codecs/max98504.c static void max98504_component_remove(struct snd_soc_component *c) c 159 sound/soc/codecs/max98504.c struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c); c 21 sound/soc/codecs/simple-amplifier.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 22 sound/soc/codecs/simple-amplifier.c struct simple_amp *priv = snd_soc_component_get_drvdata(c); c 22 sound/soc/codecs/ssm2305.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 23 sound/soc/codecs/ssm2305.c struct ssm2305 *data = snd_soc_component_get_drvdata(c); c 96 sound/soc/codecs/tpa6130a2.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 97 sound/soc/codecs/tpa6130a2.c struct tpa6130a2_data *data = snd_soc_component_get_drvdata(c); c 419 sound/soc/codecs/wcd9335.h #define WCD9335_CDC_COMPANDER1_CTL(c) WCD9335_REG(0x0b, (0x001 + c * 0x8)) c 458 sound/soc/codecs/wcd9335.h #define WCD9335_CDC_RX1_RX_PATH_CFG(c) WCD9335_REG(0x0b, (0x056 + c * 0x14)) c 106 sound/soc/codecs/wm8510.c #define wm8510_reset(c) snd_soc_component_write(c, WM8510_RESET, 0) c 58 sound/soc/codecs/wm8711.c #define wm8711_reset(c) snd_soc_component_write(c, WM8711_RESET, 0) c 86 sound/soc/codecs/wm8750.c #define wm8750_reset(c) snd_soc_component_write(c, WM8750_RESET, 0) c 148 sound/soc/codecs/wm8753.c #define wm8753_reset(c) snd_soc_component_write(c, WM8753_RESET, 0) c 334 sound/soc/codecs/wm8940.c #define wm8940_reset(c) snd_soc_component_write(c, WM8940_SOFTRESET, 0); c 138 sound/soc/codecs/wm8960.c #define wm8960_reset(c) regmap_write(c, WM8960_RESET, 0) c 88 sound/soc/codecs/wm8971.c #define wm8971_reset(c) snd_soc_component_write(c, WM8971_RESET, 0) c 53 sound/soc/codecs/wm8974.c #define wm8974_reset(c) snd_soc_component_write(c, WM8974_RESET, 0) c 119 sound/soc/codecs/wm8988.c #define wm8988_reset(c) snd_soc_component_write(c, WM8988_RESET, 0) c 110 sound/soc/codecs/wm8990.c #define wm8990_reset(c) snd_soc_component_write(c, WM8990_RESET, 0) c 1289 sound/soc/codecs/wm_adsp.c kcontrol->tlv.c = snd_soc_bytes_tlv_callback; c 155 sound/soc/intel/atom/sst-atom-controls.c struct snd_soc_component *c = snd_kcontrol_chip(kcontrol); c 156 sound/soc/intel/atom/sst-atom-controls.c struct sst_data *drv = snd_soc_component_get_drvdata(c); c 172 sound/soc/intel/atom/sst-atom-controls.c dev_dbg(c->dev, "%s - %s map = %#x\n", c 212 sound/soc/intel/atom/sst-atom-controls.c struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); c 213 sound/soc/intel/atom/sst-atom-controls.c struct sst_data *drv = snd_soc_component_get_drvdata(c); c 246 sound/soc/intel/atom/sst-atom-controls.c dev_dbg(c->dev, "%s %s map = %#x\n", c 505 sound/soc/intel/atom/sst-atom-controls.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 506 sound/soc/intel/atom/sst-atom-controls.c struct sst_data *drv = snd_soc_component_get_drvdata(c); c 963 sound/soc/intel/atom/sst-atom-controls.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 964 sound/soc/intel/atom/sst-atom-controls.c struct sst_data *drv = snd_soc_component_get_drvdata(c); c 966 sound/soc/intel/atom/sst-atom-controls.c dev_dbg(c->dev, "Enter: widget=%s\n", w->name); c 984 sound/soc/intel/atom/sst-atom-controls.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 985 sound/soc/intel/atom/sst-atom-controls.c struct sst_data *drv = snd_soc_component_get_drvdata(c); c 988 sound/soc/intel/atom/sst-atom-controls.c dev_dbg(c->dev, "widget=%s\n", w->name); c 989 sound/soc/intel/atom/sst-atom-controls.c dev_dbg(c->dev, "task=%u, location=%#x\n", c 1021 sound/soc/intel/atom/sst-atom-controls.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 1022 sound/soc/intel/atom/sst-atom-controls.c struct sst_data *drv = snd_soc_component_get_drvdata(c); c 1025 sound/soc/intel/atom/sst-atom-controls.c dev_dbg(c->dev, "Enter:widget=%s\n", w->name); c 1373 sound/soc/intel/atom/sst-atom-controls.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 1377 sound/soc/intel/atom/sst-atom-controls.c module = devm_kzalloc(c->dev, sizeof(*module), GFP_KERNEL); c 1394 sound/soc/intel/atom/sst-atom-controls.c dev_err(c->dev, "invoked for unknown type %d module %s", c 78 sound/soc/intel/skylake/skl-sst-dsp.h #define SKL_DSP_CORE_MASK(c) BIT(c) c 745 sound/soc/intel/skylake/skl.c int c, max_slots; c 750 sound/soc/intel/skylake/skl.c for (c = 0; c < max_slots; c++) { c 751 sound/soc/intel/skylake/skl.c if ((bus->codec_mask & (1 << c))) { c 752 sound/soc/intel/skylake/skl.c if (probe_codec(bus, c) < 0) { c 758 sound/soc/intel/skylake/skl.c "Codec #%d probe error; disabling it...\n", c); c 759 sound/soc/intel/skylake/skl.c bus->codec_mask &= ~(1 << c); c 628 sound/soc/meson/axg-card.c int (*func)(struct snd_soc_card *c, c 298 sound/soc/meson/axg-spdifin.c struct snd_soc_component *c = snd_kcontrol_chip(kcontrol); c 299 sound/soc/meson/axg-spdifin.c struct axg_spdifin *priv = snd_soc_component_get_drvdata(c); c 363 sound/soc/meson/axg-spdifin.c struct snd_soc_component *c = snd_kcontrol_chip(kcontrol); c 364 sound/soc/meson/axg-spdifin.c struct axg_spdifin *priv = snd_soc_component_get_drvdata(c); c 229 sound/soc/meson/axg-tdm-formatter.c struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); c 230 sound/soc/meson/axg-tdm-formatter.c struct axg_tdm_formatter *formatter = snd_soc_component_get_drvdata(c); c 243 sound/soc/meson/axg-tdm-formatter.c dev_err(c->dev, "Unexpected event %d\n", event); c 335 sound/soc/meson/g12a-tohdmitx.c static int g12a_tohdmi_component_probe(struct snd_soc_component *c) c 338 sound/soc/meson/g12a-tohdmitx.c return snd_soc_component_write(c, TOHDMITX_CTRL0, c 93 sound/soc/qcom/qdsp6/q6adm.c struct q6copp *c = NULL; c 98 sound/soc/qcom/qdsp6/q6adm.c list_for_each_entry(c, &adm->copps_list, node) { c 99 sound/soc/qcom/qdsp6/q6adm.c if ((port_idx == c->afe_port) && (copp_idx == c->copp_idx)) { c 100 sound/soc/qcom/qdsp6/q6adm.c ret = c; c 101 sound/soc/qcom/qdsp6/q6adm.c kref_get(&c->refcount); c 114 sound/soc/qcom/qdsp6/q6adm.c struct q6copp *c = container_of(ref, struct q6copp, refcount); c 115 sound/soc/qcom/qdsp6/q6adm.c struct q6adm *adm = c->adm; c 119 sound/soc/qcom/qdsp6/q6adm.c clear_bit(c->copp_idx, &adm->copp_bitmap[c->afe_port]); c 120 sound/soc/qcom/qdsp6/q6adm.c list_del(&c->node); c 122 sound/soc/qcom/qdsp6/q6adm.c kfree(c); c 214 sound/soc/qcom/qdsp6/q6adm.c struct q6copp *c; c 223 sound/soc/qcom/qdsp6/q6adm.c c = kzalloc(sizeof(*c), GFP_ATOMIC); c 224 sound/soc/qcom/qdsp6/q6adm.c if (!c) c 228 sound/soc/qcom/qdsp6/q6adm.c c->copp_idx = idx; c 229 sound/soc/qcom/qdsp6/q6adm.c c->afe_port = port_idx; c 230 sound/soc/qcom/qdsp6/q6adm.c c->adm = adm; c 232 sound/soc/qcom/qdsp6/q6adm.c init_waitqueue_head(&c->wait); c 234 sound/soc/qcom/qdsp6/q6adm.c return c; c 302 sound/soc/qcom/qdsp6/q6adm.c struct q6copp *c = NULL; c 308 sound/soc/qcom/qdsp6/q6adm.c list_for_each_entry(c, &adm->copps_list, node) { c 309 sound/soc/qcom/qdsp6/q6adm.c if ((port_id == c->afe_port) && (topology == c->topology) && c 310 sound/soc/qcom/qdsp6/q6adm.c (mode == c->mode) && (rate == c->rate) && c 311 sound/soc/qcom/qdsp6/q6adm.c (bit_width == c->bit_width) && (app_type == c->app_type)) { c 312 sound/soc/qcom/qdsp6/q6adm.c ret = c; c 313 sound/soc/qcom/qdsp6/q6adm.c kref_get(&c->refcount); c 214 sound/soc/qcom/qdsp6/q6asm-dai.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME); c 218 sound/soc/qcom/qdsp6/q6asm-dai.c pdata = snd_soc_component_get_drvdata(c); c 330 sound/soc/qcom/qdsp6/q6asm-dai.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME); c 333 sound/soc/qcom/qdsp6/q6asm-dai.c struct device *dev = c->dev; c 339 sound/soc/qcom/qdsp6/q6asm-dai.c pdata = snd_soc_component_get_drvdata(c); c 456 sound/soc/qcom/qdsp6/q6asm-dai.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME); c 457 sound/soc/qcom/qdsp6/q6asm-dai.c struct device *dev = c->dev; c 550 sound/soc/qcom/qdsp6/q6asm-dai.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME); c 554 sound/soc/qcom/qdsp6/q6asm-dai.c struct device *dev = c->dev; c 559 sound/soc/qcom/qdsp6/q6asm-dai.c pdata = snd_soc_component_get_drvdata(c); c 635 sound/soc/qcom/qdsp6/q6asm-dai.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME); c 638 sound/soc/qcom/qdsp6/q6asm-dai.c struct device *dev = c->dev; c 643 sound/soc/qcom/qdsp6/q6asm-dai.c pdata = snd_soc_component_get_drvdata(c); c 755 sound/soc/qcom/qdsp6/q6asm-dai.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME); c 756 sound/soc/qcom/qdsp6/q6asm-dai.c struct device *dev = c->dev; c 806 sound/soc/qcom/qdsp6/q6asm-dai.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME); c 811 sound/soc/qcom/qdsp6/q6asm-dai.c dev = c->dev; c 742 sound/soc/qcom/qdsp6/q6asm.c int q6asm_get_session_id(struct audio_client *c) c 744 sound/soc/qcom/qdsp6/q6asm.c return c->session; c 416 sound/soc/qcom/qdsp6/q6routing.c struct snd_soc_component *c = snd_soc_dapm_to_component(dapm); c 417 sound/soc/qcom/qdsp6/q6routing.c struct msm_routing_data *priv = dev_get_drvdata(c->dev); c 433 sound/soc/qcom/qdsp6/q6routing.c struct snd_soc_component *c = snd_soc_dapm_to_component(dapm); c 434 sound/soc/qcom/qdsp6/q6routing.c struct msm_routing_data *data = dev_get_drvdata(c->dev); c 946 sound/soc/qcom/qdsp6/q6routing.c struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME); c 947 sound/soc/qcom/qdsp6/q6routing.c struct msm_routing_data *data = dev_get_drvdata(c->dev); c 987 sound/soc/qcom/qdsp6/q6routing.c static int msm_routing_probe(struct snd_soc_component *c) c 1633 sound/soc/sh/rcar/core.c struct rsnd_kctrl_cfg *c = kctrl->private_data; c 1635 sound/soc/sh/rcar/core.c if (c == cfg) c 2822 sound/soc/soc-core.c struct snd_soc_card *card, *c; c 2824 sound/soc/soc-core.c list_for_each_entry_safe(card, c, &unbind_card_list, list) c 602 sound/soc/soc-topology.c k->tlv.c = snd_soc_bytes_tlv_callback; c 292 sound/soc/sof/intel/hda.h #define HDA_DSP_CORE_MASK(c) BIT(c) c 344 sound/soc/ti/ams-delta.c const unsigned char *c; c 372 sound/soc/ti/ams-delta.c for (c = &cp[count - 1]; c >= cp; c--) { c 373 sound/soc/ti/ams-delta.c if (*c != '\r') c 2013 sound/sparc/dbri.c struct snd_interval *c = hw_param_interval(params, c 2019 sound/sparc/dbri.c if (c->min > 1) { c 2029 sound/sparc/dbri.c struct snd_interval *c = hw_param_interval(params, c 2039 sound/sparc/dbri.c return snd_interval_refine(c, &ch); c 104 sound/synth/emux/emux_proc.c entry->c.text.read = snd_emux_proc_info_read; c 286 sound/usb/6fire/firmware.c const char *c; c 301 sound/usb/6fire/firmware.c c = fw->data; c 313 sound/usb/6fire/firmware.c while (c != end) { c 314 sound/usb/6fire/firmware.c for (i = 0; c != end && i < FPGA_BUFSIZE; i++, c++) c 315 sound/usb/6fire/firmware.c buffer[i] = bitrev8((u8)*c); c 454 sound/usb/caiaq/audio.c int c, n, sz = 0; c 462 sound/usb/caiaq/audio.c for (c = 0; c < CHANNELS_PER_STREAM; c++) { c 477 sound/usb/caiaq/audio.c if (usb_buf[i] != ((stream << 1) | c) && c 481 sound/usb/caiaq/audio.c ((stream << 1) | c), usb_buf[i], c, stream, i); c 571 sound/usb/caiaq/audio.c int c, n, sz = 0; c 579 sound/usb/caiaq/audio.c for (c = 0; c < CHANNELS_PER_STREAM; c++) { c 597 sound/usb/caiaq/audio.c usb_buf[i++] = (stream << 1) | c; c 571 sound/usb/caiaq/control.c static int add_controls(struct caiaq_controller *c, int num, c 577 sound/usb/caiaq/control.c for (i = 0; i < num; i++, c++) { c 578 sound/usb/caiaq/control.c kcontrol_template.name = c->name; c 579 sound/usb/caiaq/control.c kcontrol_template.private_value = c->index; c 428 sound/usb/caiaq/device.c char *c, usbpath[32]; c 493 sound/usb/caiaq/device.c for (c = card->shortname, len = 0; c 494 sound/usb/caiaq/device.c *c && len < sizeof(card->id); c++) c 495 sound/usb/caiaq/device.c if (*c != ' ') c 496 sound/usb/caiaq/device.c id[len++] = *c; c 123 sound/usb/caiaq/device.h #define caiaqdev(c) ((struct snd_usb_caiaqdev*)(c)->private_data) c 1338 sound/usb/mixer.c int c, cnt, val, err; c 1343 sound/usb/mixer.c for (c = 0; c < MAX_CHANNELS; c++) { c 1344 sound/usb/mixer.c if (!(cval->cmask & (1 << c))) c 1346 sound/usb/mixer.c err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &val); c 1370 sound/usb/mixer.c int c, cnt, val, oval, err; c 1375 sound/usb/mixer.c for (c = 0; c < MAX_CHANNELS; c++) { c 1376 sound/usb/mixer.c if (!(cval->cmask & (1 << c))) c 1378 sound/usb/mixer.c err = snd_usb_get_cur_mix_value(cval, c + 1, cnt, &oval); c 1384 sound/usb/mixer.c snd_usb_set_cur_mix_value(cval, c + 1, cnt, val); c 1602 sound/usb/mixer.c int i, c = 0; c 1605 sound/usb/mixer.c c++; c 1606 sound/usb/mixer.c cval->channels = c; c 1688 sound/usb/mixer.c kctl->tlv.c = snd_usb_mixer_vol_tlv; c 2006 sound/usb/mixer.c u8 *c = uac_mixer_unit_bmControls(desc, protocol); c 2023 sound/usb/mixer.c return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0]; c 2054 sound/usb/mixer.c __u8 *c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); c 2056 sound/usb/mixer.c if (check_matrix_bitmap(c, in_ch, i, num_outs)) { c 2158 sound/usb/mixer.c __u8 *c = uac_mixer_unit_bmControls(desc, c 2161 sound/usb/mixer.c if (check_matrix_bitmap(c, ich, och, num_outs)) { c 3582 sound/usb/mixer.c int c, err, idx; c 3586 sound/usb/mixer.c for (c = 0; c < MAX_CHANNELS; c++) { c 3587 sound/usb/mixer.c if (!(cval->cmask & (1 << c))) c 3589 sound/usb/mixer.c if (cval->cached & (1 << (c + 1))) { c 3590 sound/usb/mixer.c err = snd_usb_set_cur_mix_value(cval, c + 1, idx, c 97 sound/usb/mixer_quirks.c kctl->tlv.c = tlv_callback; c 233 sound/usb/stream.c kctl->tlv.c = usb_chmap_ctl_tlv; c 289 sound/usb/stream.c int c; c 300 sound/usb/stream.c c = 0; c 305 sound/usb/stream.c chmap->map[c++] = *maps; c 310 sound/usb/stream.c chmap->map[c++] = SNDRV_CHMAP_MONO; c 312 sound/usb/stream.c for (; c < channels && *maps; maps++) c 313 sound/usb/stream.c chmap->map[c++] = *maps; c 316 sound/usb/stream.c for (; c < channels; c++) c 317 sound/usb/stream.c chmap->map[c] = SNDRV_CHMAP_UNKNOWN; c 330 sound/usb/stream.c int len, c; c 340 sound/usb/stream.c c = 0; c 343 sound/usb/stream.c while (((p - (void *)cluster) < len) && (c < channels)) { c 453 sound/usb/stream.c chmap->map[c++] = map; c 458 sound/usb/stream.c if (channels < c) c 463 sound/usb/stream.c for (; c < channels; c++) c 464 sound/usb/stream.c chmap->map[c] = SNDRV_CHMAP_UNKNOWN; c 25 sound/usb/usx2y/us122l.h #define US122L(c) ((struct us122l *)(c)->private_data) c 80 sound/usb/usx2y/usbusx2y.h #define usX2Y(c) ((struct usX2Ydev *)(c)->private_data) c 470 sound/x86/intel_hdmi_audio.c int i, c; c 516 sound/x86/intel_hdmi_audio.c for (c = 0; c < channel_allocations[i].channels; c++) { c 517 sound/x86/intel_hdmi_audio.c chmap->map[c] = spk_to_chmap( c 519 sound/x86/intel_hdmi_audio.c (MAX_SPEAKERS - 1) - c]); c 195 tools/accounting/getdelays.c #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) c 242 tools/accounting/getdelays.c static void print_cgroupstats(struct cgroupstats *c) c 245 tools/accounting/getdelays.c "uninterruptible %llu\n", (unsigned long long)c->nr_sleeping, c 246 tools/accounting/getdelays.c (unsigned long long)c->nr_io_wait, c 247 tools/accounting/getdelays.c (unsigned long long)c->nr_running, c 248 tools/accounting/getdelays.c (unsigned long long)c->nr_stopped, c 249 tools/accounting/getdelays.c (unsigned long long)c->nr_uninterruptible); c 264 tools/accounting/getdelays.c int c, rc, rep_len, aggr_len, len2; c 290 tools/accounting/getdelays.c c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:"); c 291 tools/accounting/getdelays.c if (c < 0) c 294 tools/accounting/getdelays.c switch (c) { c 27 tools/arch/x86/include/asm/rmwcc.h char c; \ c 29 tools/arch/x86/include/asm/rmwcc.h : "+m" (var), "=qm" (c) \ c 31 tools/arch/x86/include/asm/rmwcc.h return c != 0; \ c 123 tools/bpf/bpftool/json_writer.c static void jsonw_begin(json_writer_t *self, int c) c 126 tools/bpf/bpftool/json_writer.c putc(c, self->out); c 131 tools/bpf/bpftool/json_writer.c static void jsonw_end(json_writer_t *self, int c) c 138 tools/bpf/bpftool/json_writer.c putc(c, self->out); c 838 tools/bpf/bpftool/prog.c char c; c 854 tools/bpf/bpftool/prog.c c = *(char *)(data + j); c 855 tools/bpf/bpftool/prog.c if (c < ' ' || c > '~') c 856 tools/bpf/bpftool/prog.c c = '.'; c 857 tools/bpf/bpftool/prog.c fprintf(f, "%c%s", c, j == i + 7 ? " " : ""); c 5 tools/build/tests/ex/ex.c int c(void); c 15 tools/build/tests/ex/ex.c c(); c 903 tools/firewire/nosy-dump.c char c; c 981 tools/firewire/nosy-dump.c read(STDIN_FILENO, &c, sizeof c); c 982 tools/firewire/nosy-dump.c switch (c) { c 149 tools/gpio/gpio-event-mon.c int c; c 151 tools/gpio/gpio-event-mon.c while ((c = getopt(argc, argv, "c:n:o:dsrf?")) != -1) { c 152 tools/gpio/gpio-event-mon.c switch (c) { c 125 tools/gpio/gpio-hammer.c int c; c 129 tools/gpio/gpio-hammer.c while ((c = getopt(argc, argv, "c:n:o:?")) != -1) { c 130 tools/gpio/gpio-hammer.c switch (c) { c 149 tools/gpio/lsgpio.c int c; c 151 tools/gpio/lsgpio.c while ((c = getopt(argc, argv, "n:")) != -1) { c 152 tools/gpio/lsgpio.c switch (c) { c 336 tools/iio/iio_generic_buffer.c int ret, c; c 356 tools/iio/iio_generic_buffer.c while ((c = getopt_long(argc, argv, "aAc:egl:n:N:t:T:w:?", longopts, c 358 tools/iio/iio_generic_buffer.c switch (c) { c 166 tools/iio/lsiio.c int c, err = 0; c 168 tools/iio/lsiio.c while ((c = getopt(argc, argv, "v")) != EOF) { c 169 tools/iio/lsiio.c switch (c) { c 23 tools/include/linux/ctype.h #define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) c 24 tools/include/linux/ctype.h #define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) c 25 tools/include/linux/ctype.h #define iscntrl(c) ((__ismask(c)&(_C)) != 0) c 26 tools/include/linux/ctype.h static inline int __isdigit(int c) c 28 tools/include/linux/ctype.h return '0' <= c && c <= '9'; c 30 tools/include/linux/ctype.h #define isdigit(c) __isdigit(c) c 31 tools/include/linux/ctype.h #define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) c 32 tools/include/linux/ctype.h #define islower(c) ((__ismask(c)&(_L)) != 0) c 33 tools/include/linux/ctype.h #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) c 34 tools/include/linux/ctype.h #define ispunct(c) ((__ismask(c)&(_P)) != 0) c 36 tools/include/linux/ctype.h #define isspace(c) ((__ismask(c)&(_S)) != 0) c 37 tools/include/linux/ctype.h #define isupper(c) ((__ismask(c)&(_U)) != 0) c 38 tools/include/linux/ctype.h #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) c 40 tools/include/linux/ctype.h #define isascii(c) (((unsigned char)(c))<=0x7f) c 41 tools/include/linux/ctype.h #define toascii(c) (((unsigned char)(c))&0x7f) c 43 tools/include/linux/ctype.h static inline unsigned char __tolower(unsigned char c) c 45 tools/include/linux/ctype.h if (isupper(c)) c 46 tools/include/linux/ctype.h c -= 'A'-'a'; c 47 tools/include/linux/ctype.h return c; c 50 tools/include/linux/ctype.h static inline unsigned char __toupper(unsigned char c) c 52 tools/include/linux/ctype.h if (islower(c)) c 53 tools/include/linux/ctype.h c -= 'a'-'A'; c 54 tools/include/linux/ctype.h return c; c 57 tools/include/linux/ctype.h #define tolower(c) __tolower(c) c 58 tools/include/linux/ctype.h #define toupper(c) __toupper(c) c 64 tools/include/linux/ctype.h static inline char _tolower(const char c) c 66 tools/include/linux/ctype.h return c | 0x20; c 70 tools/include/linux/ctype.h static inline int isodigit(const char c) c 72 tools/include/linux/ctype.h return c >= '0' && c <= '7'; c 37 tools/include/linux/irqflags.h #define trace_lock_acquire(a, b, c, d, e, f, g) c 35 tools/include/linux/jhash.h #define __jhash_mix(a, b, c) \ c 37 tools/include/linux/jhash.h a -= c; a ^= rol32(c, 4); c += b; \ c 38 tools/include/linux/jhash.h b -= a; b ^= rol32(a, 6); a += c; \ c 39 tools/include/linux/jhash.h c -= b; c ^= rol32(b, 8); b += a; \ c 40 tools/include/linux/jhash.h a -= c; a ^= rol32(c, 16); c += b; \ c 41 tools/include/linux/jhash.h b -= a; b ^= rol32(a, 19); a += c; \ c 42 tools/include/linux/jhash.h c -= b; c ^= rol32(b, 4); b += a; \ c 46 tools/include/linux/jhash.h #define __jhash_final(a, b, c) \ c 48 tools/include/linux/jhash.h c ^= b; c -= rol32(b, 14); \ c 49 tools/include/linux/jhash.h a ^= c; a -= rol32(c, 11); \ c 51 tools/include/linux/jhash.h c ^= b; c -= rol32(b, 16); \ c 52 tools/include/linux/jhash.h a ^= c; a -= rol32(c, 4); \ c 54 tools/include/linux/jhash.h c ^= b; c -= rol32(b, 24); \ c 72 tools/include/linux/jhash.h u32 a, b, c; c 76 tools/include/linux/jhash.h a = b = c = JHASH_INITVAL + length + initval; c 82 tools/include/linux/jhash.h c += __get_unaligned_cpu32(k + 8); c 83 tools/include/linux/jhash.h __jhash_mix(a, b, c); c 90 tools/include/linux/jhash.h case 12: c += (u32)k[11]<<24; c 91 tools/include/linux/jhash.h case 11: c += (u32)k[10]<<16; c 92 tools/include/linux/jhash.h case 10: c += (u32)k[9]<<8; c 93 tools/include/linux/jhash.h case 9: c += k[8]; c 102 tools/include/linux/jhash.h __jhash_final(a, b, c); c 107 tools/include/linux/jhash.h return c; c 119 tools/include/linux/jhash.h u32 a, b, c; c 122 tools/include/linux/jhash.h a = b = c = JHASH_INITVAL + (length<<2) + initval; c 128 tools/include/linux/jhash.h c += k[2]; c 129 tools/include/linux/jhash.h __jhash_mix(a, b, c); c 136 tools/include/linux/jhash.h case 3: c += k[2]; c 139 tools/include/linux/jhash.h __jhash_final(a, b, c); c 144 tools/include/linux/jhash.h return c; c 149 tools/include/linux/jhash.h static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) c 153 tools/include/linux/jhash.h c += initval; c 155 tools/include/linux/jhash.h __jhash_final(a, b, c); c 157 tools/include/linux/jhash.h return c; c 160 tools/include/linux/jhash.h static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) c 162 tools/include/linux/jhash.h return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2)); c 238 tools/include/linux/overflow.h static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) c 244 tools/include/linux/overflow.h if (check_mul_overflow(bytes, c, &bytes)) c 250 tools/include/linux/overflow.h static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) c 256 tools/include/linux/overflow.h if (check_add_overflow(bytes, c, &bytes)) c 2328 tools/include/nolibc/nolibc.h char *strchr(const char *s, int c) c 2331 tools/include/nolibc/nolibc.h if (*s == (char)c) c 2339 tools/include/nolibc/nolibc.h char *strrchr(const char *s, int c) c 2344 tools/include/nolibc/nolibc.h if (*s == (char)c) c 2367 tools/include/nolibc/nolibc.h int isdigit(int c) c 2369 tools/include/nolibc/nolibc.h return (unsigned int)(c - '0') <= 9; c 504 tools/include/uapi/sound/asound.h } c; c 17 tools/lib/bpf/libbpf_errno.c #define ERRCODE_OFFSET(c) ERRNO_OFFSET(LIBBPF_ERRNO__##c) c 7 tools/lib/lockdep/tests/ABBCCA.c pthread_mutex_t a, b, c; c 11 tools/lib/lockdep/tests/ABBCCA.c pthread_mutex_init(&c, NULL); c 14 tools/lib/lockdep/tests/ABBCCA.c LOCK_UNLOCK_2(b, c); c 15 tools/lib/lockdep/tests/ABBCCA.c LOCK_UNLOCK_2(c, a); c 17 tools/lib/lockdep/tests/ABBCCA.c pthread_mutex_destroy(&c); c 7 tools/lib/lockdep/tests/ABBCCDDA.c pthread_mutex_t a, b, c, d; c 11 tools/lib/lockdep/tests/ABBCCDDA.c pthread_mutex_init(&c, NULL); c 15 tools/lib/lockdep/tests/ABBCCDDA.c LOCK_UNLOCK_2(b, c); c 16 tools/lib/lockdep/tests/ABBCCDDA.c LOCK_UNLOCK_2(c, d); c 20 tools/lib/lockdep/tests/ABBCCDDA.c pthread_mutex_destroy(&c); c 7 tools/lib/lockdep/tests/ABCABC.c pthread_mutex_t a, b, c; c 11 tools/lib/lockdep/tests/ABCABC.c pthread_mutex_init(&c, NULL); c 14 tools/lib/lockdep/tests/ABCABC.c LOCK_UNLOCK_2(c, a); c 15 tools/lib/lockdep/tests/ABCABC.c LOCK_UNLOCK_2(b, c); c 17 tools/lib/lockdep/tests/ABCABC.c pthread_mutex_destroy(&c); c 7 tools/lib/lockdep/tests/ABCDBCDA.c pthread_mutex_t a, b, c, d; c 11 tools/lib/lockdep/tests/ABCDBCDA.c pthread_mutex_init(&c, NULL); c 15 tools/lib/lockdep/tests/ABCDBCDA.c LOCK_UNLOCK_2(c, d); c 16 tools/lib/lockdep/tests/ABCDBCDA.c LOCK_UNLOCK_2(b, c); c 20 tools/lib/lockdep/tests/ABCDBCDA.c pthread_mutex_destroy(&c); c 7 tools/lib/lockdep/tests/ABCDBDDA.c pthread_mutex_t a, b, c, d; c 11 tools/lib/lockdep/tests/ABCDBDDA.c pthread_mutex_init(&c, NULL); c 15 tools/lib/lockdep/tests/ABCDBDDA.c LOCK_UNLOCK_2(c, d); c 20 tools/lib/lockdep/tests/ABCDBDDA.c pthread_mutex_destroy(&c); c 29 tools/lib/subcmd/exec-cmd.c #define is_dir_sep(c) ((c) == '/') c 262 tools/lib/subcmd/help.c int is_in_cmdlist(struct cmdnames *c, const char *s) c 266 tools/lib/subcmd/help.c for (i = 0; i < c->cnt; i++) c 267 tools/lib/subcmd/help.c if (!strcmp(s, c->names[i]->name)) c 17 tools/lib/subcmd/help.h static inline void mput_char(char c, unsigned int num) c 20 tools/lib/subcmd/help.h putchar(c); c 32 tools/lib/subcmd/help.h int is_in_cmdlist(struct cmdnames *c, const char *s); c 207 tools/lib/traceevent/trace-seq.c int trace_seq_putc(struct trace_seq *s, unsigned char c) c 216 tools/lib/traceevent/trace-seq.c s->buffer[s->len++] = c; c 48 tools/lib/traceevent/trace-seq.h extern int trace_seq_putc(struct trace_seq *s, unsigned char c); c 138 tools/pci/pcitest.c int c; c 156 tools/pci/pcitest.c while ((c = getopt(argc, argv, "D:b:m:x:i:Ilhrwcs:")) != EOF) c 157 tools/pci/pcitest.c switch (c) { c 44 tools/perf/arch/s390/util/machine.c void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) c 46 tools/perf/arch/s390/util/machine.c if (strchr(p->name, '[') == NULL && strchr(c->name, '[')) c 50 tools/perf/arch/s390/util/machine.c p->end = c->start; c 14 tools/perf/arch/x86/util/header.c cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c, c 21 tools/perf/arch/x86/util/header.c "=c" (*c), c 29 tools/perf/arch/x86/util/header.c unsigned int a, b, c, d, lvl; c 34 tools/perf/arch/x86/util/header.c cpuid(0, &lvl, &b, &c, &d); c 37 tools/perf/arch/x86/util/header.c strncpy(&vendor[8], (char *)(&c), 4); c 41 tools/perf/arch/x86/util/header.c cpuid(1, &a, &b, &c, &d); c 190 tools/perf/arch/x86/util/intel-pt.c char c; c 224 tools/perf/arch/x86/util/intel-pt.c if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && c 225 tools/perf/arch/x86/util/intel-pt.c perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1) c 522 tools/perf/arch/x86/util/intel-pt.c char c; c 531 tools/perf/arch/x86/util/intel-pt.c if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && c 2696 tools/perf/builtin-c2c.c const char *c = coalesce ?: coalesce_default; c 2698 tools/perf/builtin-c2c.c if (asprintf(&c2c.cl_sort, "offset,%s", c) < 0) c 801 tools/perf/builtin-diff.c int c) c 803 tools/perf/builtin-diff.c switch (c) { c 841 tools/perf/builtin-diff.c int c, int sort_idx) c 866 tools/perf/builtin-diff.c return __hist_entry__cmp_compute(p_left, p_right, c); c 871 tools/perf/builtin-diff.c int c, int sort_idx) c 884 tools/perf/builtin-diff.c if (c != COMPUTE_DELTA && c != COMPUTE_DELTA_ABS) { c 897 tools/perf/builtin-diff.c return __hist_entry__cmp_compute(p_left, p_right, c); c 879 tools/perf/builtin-kvm.c uint64_t c; c 882 tools/perf/builtin-kvm.c rc = read(kvm->timerfd, &c, sizeof(uint64_t)); c 896 tools/perf/builtin-kvm.c if (c != 1) c 897 tools/perf/builtin-kvm.c pr_debug("Missed timer beats: %" PRIu64 "\n", c-1); c 932 tools/perf/builtin-kvm.c int c; c 934 tools/perf/builtin-kvm.c c = getc(stdin); c 935 tools/perf/builtin-kvm.c if (c == 'q') c 591 tools/perf/builtin-probe.c # define set_nobuild(s, l, c) set_option_nobuild(options, s, l, "NO_DWARF=1", c) c 1831 tools/perf/builtin-record.c #define CLOCKID_MAP(n, c) \ c 1832 tools/perf/builtin-record.c { .name = n, .clockid = (c), } c 2279 tools/perf/builtin-record.c # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c) c 2293 tools/perf/builtin-record.c # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c) c 2016 tools/perf/builtin-sched.c char c; c 2022 tools/perf/builtin-sched.c c = (thread->tid == 0) ? 'i' : 's'; c 2024 tools/perf/builtin-sched.c c = ' '; c 2025 tools/perf/builtin-sched.c printf("%c", c); c 2459 tools/perf/builtin-sched.c char c; c 2463 tools/perf/builtin-sched.c c = (i == sample->cpu) ? 'm' : ' '; c 2464 tools/perf/builtin-sched.c printf("%c", c); c 219 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 221 tools/perf/builtin-timechart.c c = p->all; c 222 tools/perf/builtin-timechart.c while (c) { c 223 tools/perf/builtin-timechart.c if (c->comm && strcmp(c->comm, comm) == 0) { c 224 tools/perf/builtin-timechart.c p->current = c; c 227 tools/perf/builtin-timechart.c if (!c->comm) { c 228 tools/perf/builtin-timechart.c c->comm = strdup(comm); c 229 tools/perf/builtin-timechart.c p->current = c; c 232 tools/perf/builtin-timechart.c c = c->next; c 234 tools/perf/builtin-timechart.c c = zalloc(sizeof(*c)); c 235 tools/perf/builtin-timechart.c assert(c != NULL); c 236 tools/perf/builtin-timechart.c c->comm = strdup(comm); c 237 tools/perf/builtin-timechart.c p->current = c; c 238 tools/perf/builtin-timechart.c c->next = p->all; c 239 tools/perf/builtin-timechart.c p->all = c; c 272 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 276 tools/perf/builtin-timechart.c c = p->current; c 277 tools/perf/builtin-timechart.c if (!c) { c 278 tools/perf/builtin-timechart.c c = zalloc(sizeof(*c)); c 279 tools/perf/builtin-timechart.c assert(c != NULL); c 280 tools/perf/builtin-timechart.c p->current = c; c 281 tools/perf/builtin-timechart.c c->next = p->all; c 282 tools/perf/builtin-timechart.c p->all = c; c 290 tools/perf/builtin-timechart.c sample->next = c->samples; c 293 tools/perf/builtin-timechart.c c->samples = sample; c 296 tools/perf/builtin-timechart.c c->total_time += (end-start); c 300 tools/perf/builtin-timechart.c if (c->start_time == 0 || c->start_time > start) c 301 tools/perf/builtin-timechart.c c->start_time = start; c 722 tools/perf/builtin-timechart.c struct per_pidcomm *c = p->current; c 726 tools/perf/builtin-timechart.c if (!c) { c 727 tools/perf/builtin-timechart.c c = zalloc(sizeof(*c)); c 728 tools/perf/builtin-timechart.c if (!c) c 730 tools/perf/builtin-timechart.c p->current = c; c 731 tools/perf/builtin-timechart.c c->next = p->all; c 732 tools/perf/builtin-timechart.c p->all = c; c 735 tools/perf/builtin-timechart.c prev = c->io_samples; c 743 tools/perf/builtin-timechart.c c->io_samples = prev->next; c 754 tools/perf/builtin-timechart.c sample->next = c->io_samples; c 755 tools/perf/builtin-timechart.c c->io_samples = sample; c 757 tools/perf/builtin-timechart.c if (c->start_time == 0 || c->start_time > start) c 758 tools/perf/builtin-timechart.c c->start_time = start; c 767 tools/perf/builtin-timechart.c struct per_pidcomm *c = p->current; c 770 tools/perf/builtin-timechart.c if (!c) { c 775 tools/perf/builtin-timechart.c sample = c->io_samples; c 810 tools/perf/builtin-timechart.c if ((u64)ret > c->max_bytes) c 811 tools/perf/builtin-timechart.c c->max_bytes = ret; c 813 tools/perf/builtin-timechart.c c->total_bytes += ret; c 832 tools/perf/builtin-timechart.c if (!sample->err && sample->bytes > c->max_bytes) c 833 tools/perf/builtin-timechart.c c->max_bytes = sample->bytes; c 1039 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 1050 tools/perf/builtin-timechart.c c = p->all; c 1051 tools/perf/builtin-timechart.c while (c) { c 1052 tools/perf/builtin-timechart.c if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { c 1054 tools/perf/builtin-timechart.c from = c->Y; c 1055 tools/perf/builtin-timechart.c task_from = strdup(c->comm); c 1058 tools/perf/builtin-timechart.c to = c->Y; c 1059 tools/perf/builtin-timechart.c task_to = strdup(c->comm); c 1062 tools/perf/builtin-timechart.c c = c->next; c 1064 tools/perf/builtin-timechart.c c = p->all; c 1065 tools/perf/builtin-timechart.c while (c) { c 1067 tools/perf/builtin-timechart.c from = c->Y; c 1068 tools/perf/builtin-timechart.c task_from = strdup(c->comm); c 1071 tools/perf/builtin-timechart.c to = c->Y; c 1072 tools/perf/builtin-timechart.c task_to = strdup(c->comm); c 1074 tools/perf/builtin-timechart.c c = c->next; c 1106 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 1110 tools/perf/builtin-timechart.c c = p->all; c 1111 tools/perf/builtin-timechart.c while (c) { c 1112 tools/perf/builtin-timechart.c sample = c->samples; c 1119 tools/perf/builtin-timechart.c c->comm, c 1125 tools/perf/builtin-timechart.c c = c->next; c 1137 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 1143 tools/perf/builtin-timechart.c c = p->all; c 1144 tools/perf/builtin-timechart.c while (c) { c 1145 tools/perf/builtin-timechart.c if (!c->display) { c 1146 tools/perf/builtin-timechart.c c->Y = 0; c 1147 tools/perf/builtin-timechart.c c = c->next; c 1151 tools/perf/builtin-timechart.c svg_box(Y, c->start_time, c->end_time, "process3"); c 1152 tools/perf/builtin-timechart.c sample = c->io_samples; c 1153 tools/perf/builtin-timechart.c for (sample = c->io_samples; sample; sample = sample->next) { c 1154 tools/perf/builtin-timechart.c double h = (double)sample->bytes / c->max_bytes; c 1220 tools/perf/builtin-timechart.c bytes = c->total_bytes; c 1235 tools/perf/builtin-timechart.c sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf); c 1236 tools/perf/builtin-timechart.c svg_text(Y, c->start_time, comm); c 1238 tools/perf/builtin-timechart.c c->Y = Y; c 1240 tools/perf/builtin-timechart.c c = c->next; c 1249 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 1257 tools/perf/builtin-timechart.c c = p->all; c 1258 tools/perf/builtin-timechart.c while (c) { c 1259 tools/perf/builtin-timechart.c if (!c->display) { c 1260 tools/perf/builtin-timechart.c c->Y = 0; c 1261 tools/perf/builtin-timechart.c c = c->next; c 1265 tools/perf/builtin-timechart.c svg_box(Y, c->start_time, c->end_time, "process"); c 1266 tools/perf/builtin-timechart.c sample = c->samples; c 1286 tools/perf/builtin-timechart.c if (c->comm) { c 1288 tools/perf/builtin-timechart.c if (c->total_time > 5000000000) /* 5 seconds */ c 1289 tools/perf/builtin-timechart.c sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC); c 1291 tools/perf/builtin-timechart.c sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC); c 1293 tools/perf/builtin-timechart.c svg_text(Y, c->start_time, comm); c 1295 tools/perf/builtin-timechart.c c->Y = Y; c 1297 tools/perf/builtin-timechart.c c = c->next; c 1318 tools/perf/builtin-timechart.c static int passes_filter(struct per_pid *p, struct per_pidcomm *c) c 1328 tools/perf/builtin-timechart.c if (strcmp(filt->name, c->comm) == 0) c 1338 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 1351 tools/perf/builtin-timechart.c c = p->all; c 1353 tools/perf/builtin-timechart.c while (c) { c 1354 tools/perf/builtin-timechart.c c->display = 0; c 1356 tools/perf/builtin-timechart.c if (c->start_time == 1) c 1357 tools/perf/builtin-timechart.c c->start_time = tchart->first_time; c 1359 tools/perf/builtin-timechart.c if (passes_filter(p, c)) { c 1360 tools/perf/builtin-timechart.c c->display = 1; c 1365 tools/perf/builtin-timechart.c if (c->end_time == 0) c 1366 tools/perf/builtin-timechart.c c->end_time = tchart->last_time; c 1368 tools/perf/builtin-timechart.c c = c->next; c 1378 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 1393 tools/perf/builtin-timechart.c c = p->all; c 1395 tools/perf/builtin-timechart.c while (c) { c 1396 tools/perf/builtin-timechart.c c->display = 0; c 1398 tools/perf/builtin-timechart.c if (c->start_time == 1) c 1399 tools/perf/builtin-timechart.c c->start_time = tchart->first_time; c 1401 tools/perf/builtin-timechart.c if (c->total_time >= threshold) { c 1402 tools/perf/builtin-timechart.c c->display = 1; c 1406 tools/perf/builtin-timechart.c if (c->end_time == 0) c 1407 tools/perf/builtin-timechart.c c->end_time = tchart->last_time; c 1409 tools/perf/builtin-timechart.c c = c->next; c 1419 tools/perf/builtin-timechart.c struct per_pidcomm *c; c 1428 tools/perf/builtin-timechart.c c = p->all; c 1430 tools/perf/builtin-timechart.c while (c) { c 1431 tools/perf/builtin-timechart.c c->display = 0; c 1433 tools/perf/builtin-timechart.c if (c->total_bytes >= threshold) { c 1434 tools/perf/builtin-timechart.c c->display = 1; c 1438 tools/perf/builtin-timechart.c if (c->end_time == 0) c 1439 tools/perf/builtin-timechart.c c->end_time = timechart->last_time; c 1441 tools/perf/builtin-timechart.c c = c->next; c 461 tools/perf/builtin-top.c static int perf_top__key_mapped(struct perf_top *top, int c) c 463 tools/perf/builtin-top.c switch (c) { c 485 tools/perf/builtin-top.c static bool perf_top__handle_keypress(struct perf_top *top, int c) c 489 tools/perf/builtin-top.c if (!perf_top__key_mapped(top, c)) { c 500 tools/perf/builtin-top.c c = getc(stdin); c 503 tools/perf/builtin-top.c if (!perf_top__key_mapped(top, c)) c 507 tools/perf/builtin-top.c switch (c) { c 669 tools/perf/builtin-top.c int delay_msecs, c; c 702 tools/perf/builtin-top.c c = getc(stdin); c 705 tools/perf/builtin-top.c if (perf_top__handle_keypress(top, c)) c 71 tools/perf/jvmti/libjvmti.c PCStackInfo *c; c 86 tools/perf/jvmti/libjvmti.c c = rec->pcinfo + i; c 91 tools/perf/jvmti/libjvmti.c ret = (*jvmti)->GetLineNumberTable(jvmti, c->methods[0], &nr, &lne); c 117 tools/perf/jvmti/libjvmti.c c = rec->pcinfo + i; c 119 tools/perf/jvmti/libjvmti.c ret = do_get_line_numbers(jvmti, c->pc, c 120 tools/perf/jvmti/libjvmti.c c->methods[0], c 121 tools/perf/jvmti/libjvmti.c c->bcis[0], c 100 tools/perf/perf.c struct pager_config *c = data; c 101 tools/perf/perf.c if (strstarts(var, "pager.") && !strcmp(var + 6, c->cmd)) c 102 tools/perf/perf.c c->val = perf_config_bool(var, value); c 110 tools/perf/perf.c struct pager_config c; c 111 tools/perf/perf.c c.cmd = cmd; c 112 tools/perf/perf.c c.val = -1; c 113 tools/perf/perf.c err = perf_config(pager_command_config, &c); c 114 tools/perf/perf.c return err ?: c.val; c 119 tools/perf/perf.c struct pager_config *c = data; c 120 tools/perf/perf.c if (strstarts(var, "tui.") && !strcmp(var + 4, c->cmd)) c 121 tools/perf/perf.c c->val = perf_config_bool(var, value); c 122 tools/perf/perf.c if (strstarts(var, "gtk.") && !strcmp(var + 4, c->cmd)) c 123 tools/perf/perf.c c->val = perf_config_bool(var, value) ? 2 : 0; c 134 tools/perf/perf.c struct pager_config c; c 135 tools/perf/perf.c c.cmd = cmd; c 136 tools/perf/perf.c c.val = -1; c 137 tools/perf/perf.c err = perf_config(browser_command_config, &c); c 138 tools/perf/perf.c return err ?: c.val; c 283 tools/perf/pmu-events/jevents.c char c = tp[i]; c 285 tools/perf/pmu-events/jevents.c if (c == '-') c 287 tools/perf/pmu-events/jevents.c else if (c == '.') { c 697 tools/perf/pmu-events/jevents.c int c; c 713 tools/perf/pmu-events/jevents.c c = tblname[i]; c 715 tools/perf/pmu-events/jevents.c if (c == '-' || c == '/') c 717 tools/perf/pmu-events/jevents.c else if (c == '.') { c 720 tools/perf/pmu-events/jevents.c } else if (!isalnum(c) && c != '_') { c 722 tools/perf/pmu-events/jevents.c prog, c, basename(fname)); c 127 tools/perf/pmu-events/jsmn.c char c = js[parser->pos]; c 130 tools/perf/pmu-events/jsmn.c if (c == '\"') { c 142 tools/perf/pmu-events/jsmn.c if (c == '\\') { c 181 tools/perf/pmu-events/jsmn.c char c; c 184 tools/perf/pmu-events/jsmn.c c = js[parser->pos]; c 185 tools/perf/pmu-events/jsmn.c switch (c) { c 193 tools/perf/pmu-events/jsmn.c token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY); c 199 tools/perf/pmu-events/jsmn.c type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY); c 121 tools/perf/pmu-events/json.c static int countchar(char *map, char c, int end) c 126 tools/perf/pmu-events/json.c if (map[i] == c) c 43 tools/perf/tests/code-reading.c static unsigned int hex(char c) c 45 tools/perf/tests/code-reading.c if (c >= '0' && c <= '9') c 46 tools/perf/tests/code-reading.c return c - '0'; c 47 tools/perf/tests/code-reading.c if (c >= 'a' && c <= 'f') c 48 tools/perf/tests/code-reading.c return c - 'a' + 10; c 49 tools/perf/tests/code-reading.c return c - 'A' + 10; c 149 tools/perf/tests/dso-data.c int c; c 156 tools/perf/tests/dso-data.c for (c = 0; c < 2; c++) { c 181 tools/perf/tests/hists_cumulate.c size_t i, c; c 227 tools/perf/tests/hists_cumulate.c c = 0; c 229 tools/perf/tests/hists_cumulate.c scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c); c 232 tools/perf/tests/hists_cumulate.c c < expected_callchain[i].nr); c 234 tools/perf/tests/hists_cumulate.c !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) && c 235 tools/perf/tests/hists_cumulate.c !strcmp(CSYM(clist), expected_callchain[i].node[c].sym)); c 236 tools/perf/tests/hists_cumulate.c c++; c 240 tools/perf/tests/hists_cumulate.c c <= expected_callchain[i].nr); c 44 tools/perf/tests/kmod-path.c #define T(path, an, k, c, n) \ c 45 tools/perf/tests/kmod-path.c TEST_ASSERT_VAL("failed", !test(path, an, k, c, n)) c 47 tools/perf/tests/kmod-path.c #define M(path, c, e) \ c 48 tools/perf/tests/kmod-path.c TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e)) c 173 tools/perf/tests/time-utils-test.c u64 c = 7654321987654321ULL; c 179 tools/perf/tests/time-utils-test.c .ptime = { {b, b + 1}, {c, c + 123}, {e, e + 5}, }, c 181 tools/perf/tests/time-utils-test.c .skip = { b - 1, b + 2, c - 1, c + 124, e - 1, e + 6 }, c 182 tools/perf/tests/time-utils-test.c .noskip = { b, b + 1, c, c + 123, e, e + 5 }, c 795 tools/perf/ui/browser.c struct ui_browser_colorset *c = &ui_browser__colorsets[i++]; c 796 tools/perf/ui/browser.c sltt_set_color(c->colorset, c->name, c->fg, c->bg); c 50 tools/perf/ui/browsers/scripts.c struct script_config *c) c 52 tools/perf/ui/browsers/scripts.c c->names[c->index] = name; c 53 tools/perf/ui/browsers/scripts.c if (asprintf(&c->paths[c->index], c 55 tools/perf/ui/browsers/scripts.c c->perf, opt, symbol_conf.inline_name ? " --inline" : "", c 56 tools/perf/ui/browsers/scripts.c c->extra_format) < 0) c 58 tools/perf/ui/browsers/scripts.c c->index++; c 64 tools/perf/ui/browsers/scripts.c struct script_config *c = data; c 68 tools/perf/ui/browsers/scripts.c if (c->index >= SCRIPT_MAX_NO) c 70 tools/perf/ui/browsers/scripts.c c->names[c->index] = strdup(var + 7); c 71 tools/perf/ui/browsers/scripts.c if (!c->names[c->index]) c 73 tools/perf/ui/browsers/scripts.c if (asprintf(&c->paths[c->index], "%s %s", value, c 74 tools/perf/ui/browsers/scripts.c c->extra_format) < 0) c 76 tools/perf/ui/browsers/scripts.c c->index++; c 321 tools/perf/util/annotate.c static inline const char *validate_comma(const char *c, struct ins_operands *ops) c 323 tools/perf/util/annotate.c if (ops->raw_comment && c > ops->raw_comment) c 326 tools/perf/util/annotate.c return c; c 336 tools/perf/util/annotate.c const char *c = strchr(ops->raw, ','); c 340 tools/perf/util/annotate.c c = validate_comma(c, ops); c 357 tools/perf/util/annotate.c if (c++ != NULL) { c 358 tools/perf/util/annotate.c ops->target.addr = strtoull(c, NULL, 16); c 360 tools/perf/util/annotate.c c = strchr(c, ','); c 361 tools/perf/util/annotate.c c = validate_comma(c, ops); c 362 tools/perf/util/annotate.c if (c++ != NULL) c 363 tools/perf/util/annotate.c ops->target.addr = strtoull(c, NULL, 16); c 410 tools/perf/util/annotate.c const char *c; c 418 tools/perf/util/annotate.c c = strchr(ops->raw, ','); c 419 tools/perf/util/annotate.c c = validate_comma(c, ops); c 421 tools/perf/util/annotate.c if (c != NULL) { c 422 tools/perf/util/annotate.c const char *c2 = strchr(c + 1, ','); c 427 tools/perf/util/annotate.c c = c2; c 428 tools/perf/util/annotate.c c++; c 431 tools/perf/util/annotate.c if (*c == ' ') c 432 tools/perf/util/annotate.c c++; c 436 tools/perf/util/annotate.c ins->name, c ? c - ops->raw : 0, ops->raw, c 1378 tools/perf/util/auxtrace.c struct auxtrace_cache *c; c 1382 tools/perf/util/auxtrace.c c = zalloc(sizeof(struct auxtrace_cache)); c 1383 tools/perf/util/auxtrace.c if (!c) c 1395 tools/perf/util/auxtrace.c c->hashtable = ht; c 1396 tools/perf/util/auxtrace.c c->sz = sz; c 1397 tools/perf/util/auxtrace.c c->entry_size = entry_size; c 1398 tools/perf/util/auxtrace.c c->limit = (c->sz * limit_percent) / 100; c 1399 tools/perf/util/auxtrace.c c->bits = bits; c 1401 tools/perf/util/auxtrace.c return c; c 1404 tools/perf/util/auxtrace.c free(c); c 1408 tools/perf/util/auxtrace.c static void auxtrace_cache__drop(struct auxtrace_cache *c) c 1414 tools/perf/util/auxtrace.c if (!c) c 1417 tools/perf/util/auxtrace.c for (i = 0; i < c->sz; i++) { c 1418 tools/perf/util/auxtrace.c hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) { c 1420 tools/perf/util/auxtrace.c auxtrace_cache__free_entry(c, entry); c 1424 tools/perf/util/auxtrace.c c->cnt = 0; c 1427 tools/perf/util/auxtrace.c void auxtrace_cache__free(struct auxtrace_cache *c) c 1429 tools/perf/util/auxtrace.c if (!c) c 1432 tools/perf/util/auxtrace.c auxtrace_cache__drop(c); c 1433 tools/perf/util/auxtrace.c zfree(&c->hashtable); c 1434 tools/perf/util/auxtrace.c free(c); c 1437 tools/perf/util/auxtrace.c void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c) c 1439 tools/perf/util/auxtrace.c return malloc(c->entry_size); c 1442 tools/perf/util/auxtrace.c void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused, c 1448 tools/perf/util/auxtrace.c int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, c 1451 tools/perf/util/auxtrace.c if (c->limit && ++c->cnt > c->limit) c 1452 tools/perf/util/auxtrace.c auxtrace_cache__drop(c); c 1455 tools/perf/util/auxtrace.c hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]); c 1460 tools/perf/util/auxtrace.c void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key) c 1465 tools/perf/util/auxtrace.c if (!c) c 1468 tools/perf/util/auxtrace.c hlist = &c->hashtable[hash_32(key, c->bits)]; c 488 tools/perf/util/auxtrace.h void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c); c 489 tools/perf/util/auxtrace.h void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry); c 490 tools/perf/util/auxtrace.h int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, c 492 tools/perf/util/auxtrace.h void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key); c 1613 tools/perf/util/bpf-loader.c #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) c 49 tools/perf/util/config.c int c; c 52 tools/perf/util/config.c c = '\n'; c 54 tools/perf/util/config.c c = fgetc(f); c 55 tools/perf/util/config.c if (c == '\r') { c 57 tools/perf/util/config.c c = fgetc(f); c 58 tools/perf/util/config.c if (c != '\n') { c 59 tools/perf/util/config.c ungetc(c, f); c 60 tools/perf/util/config.c c = '\r'; c 63 tools/perf/util/config.c if (c == '\n') c 65 tools/perf/util/config.c if (c == EOF) { c 67 tools/perf/util/config.c c = '\n'; c 70 tools/perf/util/config.c return c; c 80 tools/perf/util/config.c int c = get_next_char(); c 84 tools/perf/util/config.c if (c == '\n') { c 92 tools/perf/util/config.c if (isspace(c) && !quote) { c 97 tools/perf/util/config.c if (c == ';' || c == '#') { c 107 tools/perf/util/config.c if (c == '\\') { c 108 tools/perf/util/config.c c = get_next_char(); c 109 tools/perf/util/config.c switch (c) { c 113 tools/perf/util/config.c c = '\t'; c 116 tools/perf/util/config.c c = '\b'; c 119 tools/perf/util/config.c c = '\n'; c 128 tools/perf/util/config.c value[len++] = c; c 131 tools/perf/util/config.c if (c == '"') { c 135 tools/perf/util/config.c value[len++] = c; c 139 tools/perf/util/config.c static inline int iskeychar(int c) c 141 tools/perf/util/config.c return isalnum(c) || c == '-' || c == '_'; c 146 tools/perf/util/config.c int c; c 151 tools/perf/util/config.c c = get_next_char(); c 154 tools/perf/util/config.c if (!iskeychar(c)) c 156 tools/perf/util/config.c name[len++] = c; c 161 tools/perf/util/config.c while (c == ' ' || c == '\t') c 162 tools/perf/util/config.c c = get_next_char(); c 165 tools/perf/util/config.c if (c != '\n') { c 166 tools/perf/util/config.c if (c != '=') c 175 tools/perf/util/config.c static int get_extended_base_var(char *name, int baselen, int c) c 178 tools/perf/util/config.c if (c == '\n') c 180 tools/perf/util/config.c c = get_next_char(); c 181 tools/perf/util/config.c } while (isspace(c)); c 184 tools/perf/util/config.c if (c != '"') c 216 tools/perf/util/config.c int c = get_next_char(); c 219 tools/perf/util/config.c if (c == ']') c 221 tools/perf/util/config.c if (isspace(c)) c 222 tools/perf/util/config.c return get_extended_base_var(name, baselen, c); c 223 tools/perf/util/config.c if (!iskeychar(c) && c != '.') c 227 tools/perf/util/config.c name[baselen++] = tolower(c); c 242 tools/perf/util/config.c int line, c = get_next_char(); c 248 tools/perf/util/config.c if ((unsigned char) c == *bomptr) { c 259 tools/perf/util/config.c if (c == '\n') { c 265 tools/perf/util/config.c if (comment || isspace(c)) c 267 tools/perf/util/config.c if (c == '#' || c == ';') { c 271 tools/perf/util/config.c if (c == '[') { c 279 tools/perf/util/config.c if (!isalpha(c)) c 281 tools/perf/util/config.c var[baselen] = tolower(c); c 135 tools/perf/util/cpumap.c struct perf_cpu_map *c; c 140 tools/perf/util/cpumap.c c = calloc(1, sizeof(*c) + nr * sizeof(int)); c 141 tools/perf/util/cpumap.c if (!c) c 146 tools/perf/util/cpumap.c for (s2 = 0; s2 < c->nr; s2++) { c 147 tools/perf/util/cpumap.c if (s1 == c->map[s2]) c 150 tools/perf/util/cpumap.c if (s2 == c->nr) { c 151 tools/perf/util/cpumap.c c->map[c->nr] = s1; c 152 tools/perf/util/cpumap.c c->nr++; c 156 tools/perf/util/cpumap.c qsort(c->map, c->nr, sizeof(int), cmp_ids); c 158 tools/perf/util/cpumap.c refcount_set(&c->refcnt, 1); c 159 tools/perf/util/cpumap.c *res = c; c 302 tools/perf/util/cputopo.c char *c; c 314 tools/perf/util/cputopo.c c = strchr(buf, '\n'); c 315 tools/perf/util/cputopo.c if (c) c 316 tools/perf/util/cputopo.c *c = '\0'; c 792 tools/perf/util/data-convert-bt.c struct convert *c = container_of(tool, struct convert, tool); c 794 tools/perf/util/data-convert-bt.c struct ctf_writer *cw = &c->writer; c 807 tools/perf/util/data-convert-bt.c c->events_count++; c 808 tools/perf/util/data-convert-bt.c c->events_size += _event->header.size; c 810 tools/perf/util/data-convert-bt.c pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); c 870 tools/perf/util/data-convert-bt.c struct convert *c = container_of(tool, struct convert, tool);\ c 871 tools/perf/util/data-convert-bt.c struct ctf_writer *cw = &c->writer; \ c 877 tools/perf/util/data-convert-bt.c c->non_sample_count++; \ c 878 tools/perf/util/data-convert-bt.c c->events_size += _event->header.size; \ c 1568 tools/perf/util/data-convert-bt.c struct convert *c = cb; c 1571 tools/perf/util/data-convert-bt.c return perf_config_u64(&c->queue_size, var, value); c 1585 tools/perf/util/data-convert-bt.c struct convert c = { c 1601 tools/perf/util/data-convert-bt.c struct ctf_writer *cw = &c.writer; c 1605 tools/perf/util/data-convert-bt.c c.tool.comm = process_comm_event; c 1606 tools/perf/util/data-convert-bt.c c.tool.exit = process_exit_event; c 1607 tools/perf/util/data-convert-bt.c c.tool.fork = process_fork_event; c 1608 tools/perf/util/data-convert-bt.c c.tool.mmap = process_mmap_event; c 1609 tools/perf/util/data-convert-bt.c c.tool.mmap2 = process_mmap2_event; c 1612 tools/perf/util/data-convert-bt.c err = perf_config(convert__config, &c); c 1622 tools/perf/util/data-convert-bt.c session = perf_session__new(&data, 0, &c.tool); c 1628 tools/perf/util/data-convert-bt.c if (c.queue_size) { c 1630 tools/perf/util/data-convert-bt.c c.queue_size); c 1659 tools/perf/util/data-convert-bt.c (double) c.events_size / 1024.0 / 1024.0, c 1660 tools/perf/util/data-convert-bt.c c.events_count); c 1662 tools/perf/util/data-convert-bt.c if (!c.non_sample_count) c 1665 tools/perf/util/data-convert-bt.c fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); c 21 tools/perf/util/demangle-java.c #define BASE_ENT(c, n) [c - 'A']=n c 252 tools/perf/util/evsel.h #define perf_evsel__match(evsel, t, c) \ c 254 tools/perf/util/evsel.h evsel->core.attr.config == PERF_COUNT_##c) c 527 tools/perf/util/genelf.c int c, fd, ret; c 529 tools/perf/util/genelf.c while ((c = getopt(argc, argv, "o:h")) != -1) { c 530 tools/perf/util/genelf.c switch (c) { c 1087 tools/perf/util/header.c static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) c 1089 tools/perf/util/header.c fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); c 1104 tools/perf/util/header.c struct cpu_cache_level c; c 1107 tools/perf/util/header.c err = cpu_cache_level__read(&c, cpu, level); c 1115 tools/perf/util/header.c if (cpu_cache_level__cmp(&c, &caches[i])) c 1120 tools/perf/util/header.c caches[cnt++] = c; c 1122 tools/perf/util/header.c cpu_cache_level__free(&c); c 1152 tools/perf/util/header.c struct cpu_cache_level *c = &caches[i]; c 1155 tools/perf/util/header.c ret = do_write(ff, &c->v, sizeof(u32)); \ c 1166 tools/perf/util/header.c ret = do_write_string(ff, (const char *) c->v); \ c 2539 tools/perf/util/header.c struct cpu_cache_level c; c 2542 tools/perf/util/header.c if (do_read_u32(ff, &c.v))\ c 2552 tools/perf/util/header.c c.v = do_read_string(ff); \ c 2553 tools/perf/util/header.c if (!c.v) \ c 2561 tools/perf/util/header.c caches[i] = c; c 450 tools/perf/util/intel-pt.c struct auxtrace_cache *c; c 459 tools/perf/util/intel-pt.c c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); c 461 tools/perf/util/intel-pt.c dso->auxtrace_cache = c; c 463 tools/perf/util/intel-pt.c return c; c 470 tools/perf/util/intel-pt.c struct auxtrace_cache *c = intel_pt_cache(dso, machine); c 474 tools/perf/util/intel-pt.c if (!c) c 477 tools/perf/util/intel-pt.c e = auxtrace_cache__alloc_entry(c); c 489 tools/perf/util/intel-pt.c err = auxtrace_cache__add(c, offset, &e->entry); c 491 tools/perf/util/intel-pt.c auxtrace_cache__free_entry(c, e); c 499 tools/perf/util/intel-pt.c struct auxtrace_cache *c = intel_pt_cache(dso, machine); c 501 tools/perf/util/intel-pt.c if (!c) c 332 tools/perf/util/mmap.c int c, cpu, nr_cpus; c 340 tools/perf/util/mmap.c for (c = 0; c < nr_cpus; c++) { c 341 tools/perf/util/mmap.c cpu = cpu_map->map[c]; /* map c index to online cpu index */ c 1249 tools/perf/util/probe-event.c const char c = *range++; c 1255 tools/perf/util/probe-event.c if (c == '+') { c 1342 tools/perf/util/probe-event.c char c, nc = 0; c 1456 tools/perf/util/probe-event.c c = nc; c 1457 tools/perf/util/probe-event.c if (c == ';') { /* Lazy pattern must be the last part */ c 1468 tools/perf/util/probe-event.c switch (c) { c 73 tools/perf/util/strbuf.c int strbuf_addch(struct strbuf *sb, int c) c 79 tools/perf/util/strbuf.c sb->buf[sb->len++] = c; c 83 tools/perf/util/strbuf.h int strbuf_addch(struct strbuf *sb, int c); c 16 tools/perf/util/strfilter.c #define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!') c 17 tools/perf/util/strfilter.c #define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')') c 28 tools/perf/util/string.c char c; c 34 tools/perf/util/string.c switch (c = *p++) { c 59 tools/perf/util/string.c if (islower(c)) { c 73 tools/perf/util/string.c static bool __match_charclass(const char *pat, char c, const char **npat) c 81 tools/perf/util/string.c if (*pat++ == c) /* First character is special */ c 86 tools/perf/util/string.c if (*(pat - 1) <= c && c <= *(pat + 1)) c 91 tools/perf/util/string.c } else if (*pat++ == c) c 731 tools/perf/util/svghelper.c int c; c 738 tools/perf/util/svghelper.c c = m->map[i]; c 739 tools/perf/util/svghelper.c if (c >= nr_cpus) { c 744 tools/perf/util/svghelper.c set_bit(c, cpumask_bits(b)); c 56 tools/perf/util/symbol-elf.c static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) c 58 tools/perf/util/symbol-elf.c return cplus_demangle(c, i); c 63 tools/perf/util/symbol-elf.c const char __maybe_unused *c, c 103 tools/perf/util/symbol.c void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c) c 105 tools/perf/util/symbol.c p->end = c->start; c 236 tools/perf/util/symbol.h void arch__symbols__fixup_end(struct symbol *p, struct symbol *c); c 182 tools/perf/util/time-utils.c char *c, *endptr; c 185 tools/perf/util/time-utils.c c = strchr(str, '%'); c 186 tools/perf/util/time-utils.c if (c) c 187 tools/perf/util/time-utils.c *c = '\0'; c 328 tools/perf/util/time-utils.c const char *ostr, u64 start, u64 end, char *c) c 337 tools/perf/util/time-utils.c if (ostr + len - 1 != c) c 361 tools/perf/util/time-utils.c char *c; c 372 tools/perf/util/time-utils.c c = strchr(ostr, '/'); c 373 tools/perf/util/time-utils.c if (c) { c 378 tools/perf/util/time-utils.c c = strchr(ostr, '-'); c 379 tools/perf/util/time-utils.c if (c) { c 384 tools/perf/util/time-utils.c c = strchr(ostr, '%'); c 385 tools/perf/util/time-utils.c if (c) c 386 tools/perf/util/time-utils.c return one_percent_convert(ptime_buf, ostr, start, end, c); c 105 tools/perf/util/trace-event-read.c char c; c 108 tools/perf/util/trace-event-read.c r = read(input_fd, &c, 1); c 120 tools/perf/util/trace-event-read.c int retw = write(STDOUT_FILENO, &c, 1); c 128 tools/perf/util/trace-event-read.c buf[size++] = c; c 130 tools/perf/util/trace-event-read.c if (!c) c 646 tools/perf/util/unwind-libunwind-local.c unw_cursor_t c; c 667 tools/perf/util/unwind-libunwind-local.c ret = unw_init_remote(&c, addr_space, ui); c 671 tools/perf/util/unwind-libunwind-local.c while (!ret && (unw_step(&c) > 0) && i < max_stack) { c 672 tools/perf/util/unwind-libunwind-local.c unw_get_reg(&c, UNW_REG_IP, &ips[i]); c 681 tools/perf/util/unwind-libunwind-local.c if (unw_is_signal_frame(&c) <= 0) c 55 tools/power/acpi/tools/ec/ec_access.c int c; c 57 tools/power/acpi/tools/ec/ec_access.c while ((c = getopt(argc, argv, "rs:b:w:v:h")) != -1) { c 59 tools/power/acpi/tools/ec/ec_access.c switch (c) { c 65 tools/power/cpupower/bench/main.c int c; c 75 tools/power/cpupower/bench/main.c c = getopt_long (argc, argv, "hg:o:s:l:vc:p:f:n:r:x:y:", c 77 tools/power/cpupower/bench/main.c if (c == -1) c 80 tools/power/cpupower/bench/main.c switch (c) { c 331 tools/power/x86/turbostat/turbostat.c struct core_data *c; c 341 tools/power/x86/turbostat/turbostat.c c = GET_CORE(core_base, core_no, c 345 tools/power/x86/turbostat/turbostat.c retval = func(t, c, p); c 799 tools/power/x86/turbostat/turbostat.c int dump_counters(struct thread_data *t, struct core_data *c, c 805 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); c 826 tools/power/x86/turbostat/turbostat.c if (c) { c 827 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "core: %d\n", c->core_id); c 828 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "c3: %016llX\n", c->c3); c 829 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "c6: %016llX\n", c->c6); c 830 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "c7: %016llX\n", c->c7); c 831 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); c 832 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "Joules: %0X\n", c->core_energy); c 836 tools/power/x86/turbostat/turbostat.c i, mp->msr_num, c->counter[i]); c 838 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us); c 885 tools/power/x86/turbostat/turbostat.c int format_counters(struct thread_data *t, struct core_data *c, c 948 tools/power/x86/turbostat/turbostat.c if (c) c 963 tools/power/x86/turbostat/turbostat.c if (c) c 964 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id); c 1036 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); c 1038 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); c 1040 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7/tsc); c 1044 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc); c 1047 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c); c 1052 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int) c->counter[i]); c 1054 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]); c 1057 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]); c 1059 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]); c 1061 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i]/tsc); c 1075 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float); c 1077 tools/power/x86/turbostat/turbostat.c outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units); c 1195 tools/power/x86/turbostat/turbostat.c void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 1209 tools/power/x86/turbostat/turbostat.c for_all_cpus(format_counters, t, c, p); c 1392 tools/power/x86/turbostat/turbostat.c int delta_cpu(struct thread_data *t, struct core_data *c, c 1400 tools/power/x86/turbostat/turbostat.c delta_core(c, c2); c 1414 tools/power/x86/turbostat/turbostat.c void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 1437 tools/power/x86/turbostat/turbostat.c c->c3 = 0; c 1438 tools/power/x86/turbostat/turbostat.c c->c6 = 0; c 1439 tools/power/x86/turbostat/turbostat.c c->c7 = 0; c 1440 tools/power/x86/turbostat/turbostat.c c->mc6_us = 0; c 1441 tools/power/x86/turbostat/turbostat.c c->core_temp_c = 0; c 1442 tools/power/x86/turbostat/turbostat.c c->core_energy = 0; c 1476 tools/power/x86/turbostat/turbostat.c c->counter[i] = 0; c 1481 tools/power/x86/turbostat/turbostat.c int sum_counters(struct thread_data *t, struct core_data *c, c 1518 tools/power/x86/turbostat/turbostat.c average.cores.c3 += c->c3; c 1519 tools/power/x86/turbostat/turbostat.c average.cores.c6 += c->c6; c 1520 tools/power/x86/turbostat/turbostat.c average.cores.c7 += c->c7; c 1521 tools/power/x86/turbostat/turbostat.c average.cores.mc6_us += c->mc6_us; c 1523 tools/power/x86/turbostat/turbostat.c average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); c 1525 tools/power/x86/turbostat/turbostat.c average.cores.core_energy += c->core_energy; c 1530 tools/power/x86/turbostat/turbostat.c average.cores.counter[i] += c->counter[i]; c 1584 tools/power/x86/turbostat/turbostat.c void compute_average(struct thread_data *t, struct core_data *c, c 1592 tools/power/x86/turbostat/turbostat.c for_all_cpus(sum_counters, t, c, p); c 1778 tools/power/x86/turbostat/turbostat.c int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 1877 tools/power/x86/turbostat/turbostat.c if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) c 1882 tools/power/x86/turbostat/turbostat.c if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) c 1885 tools/power/x86/turbostat/turbostat.c if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) c 1890 tools/power/x86/turbostat/turbostat.c if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) c 1894 tools/power/x86/turbostat/turbostat.c if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us)) c 1900 tools/power/x86/turbostat/turbostat.c c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); c 1906 tools/power/x86/turbostat/turbostat.c c->core_energy = msr & 0xFFFFFFFF; c 1910 tools/power/x86/turbostat/turbostat.c if (get_mp(cpu, mp, &c->counter[i])) c 2698 tools/power/x86/turbostat/turbostat.c struct core_data *c, *c2; c 2712 tools/power/x86/turbostat/turbostat.c c = GET_CORE(core_base, core_no, c 2721 tools/power/x86/turbostat/turbostat.c retval = func(t, c, p, t2, c2, p2); c 3594 tools/power/x86/turbostat/turbostat.c int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 3639 tools/power/x86/turbostat/turbostat.c int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 3730 tools/power/x86/turbostat/turbostat.c int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 4097 tools/power/x86/turbostat/turbostat.c int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 4170 tools/power/x86/turbostat/turbostat.c int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 4456 tools/power/x86/turbostat/turbostat.c int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) c 5101 tools/power/x86/turbostat/turbostat.c allocate_counters(struct thread_data **t, struct core_data **c, c 5116 tools/power/x86/turbostat/turbostat.c *c = calloc(num_cores, sizeof(struct core_data)); c 5117 tools/power/x86/turbostat/turbostat.c if (*c == NULL) c 5121 tools/power/x86/turbostat/turbostat.c (*c)[i].core_id = -1; c 5147 tools/power/x86/turbostat/turbostat.c struct core_data *c; c 5158 tools/power/x86/turbostat/turbostat.c c = GET_CORE(core_base, core_id, node_id, pkg_id); c 5168 tools/power/x86/turbostat/turbostat.c c->core_id = core_id; c 108 tools/spi/spidev_fdx.c int c; c 114 tools/spi/spidev_fdx.c while ((c = getopt(argc, argv, "hm:r:v")) != EOF) { c 115 tools/spi/spidev_fdx.c switch (c) { c 63 tools/spi/spidev_test.c unsigned char c; c 75 tools/spi/spidev_test.c c = *line++; c 76 tools/spi/spidev_test.c printf("%c", (c < 32 || c > 126) ? '.' : c); c 215 tools/spi/spidev_test.c int c; c 217 tools/spi/spidev_test.c c = getopt_long(argc, argv, "D:s:d:b:i:o:lHOLC3NR24p:vS:I:", c 220 tools/spi/spidev_test.c if (c == -1) c 223 tools/spi/spidev_test.c switch (c) { c 142 tools/testing/radix-tree/benchmark.c int c, s; c 147 tools/testing/radix-tree/benchmark.c for (c = 0; size[c]; c++) c 149 tools/testing/radix-tree/benchmark.c benchmark_size(size[c], step[s]); c 1 tools/testing/radix-tree/linux/cpu.h #define cpuhp_setup_state_nocalls(a, b, c, d) (0) c 119 tools/testing/scatterlist/linux/mm.h #define kmemleak_alloc(a, b, c, d) c 48 tools/testing/selftests/bpf/cgroup_helpers.c char *c, *c2; c 79 tools/testing/selftests/bpf/cgroup_helpers.c for (c = strtok_r(buf, " ", &c2); c; c = strtok_r(NULL, " ", &c2)) { c 80 tools/testing/selftests/bpf/cgroup_helpers.c if (dprintf(cfd, "+%s\n", c) <= 0) { c 81 tools/testing/selftests/bpf/cgroup_helpers.c log_err("Enabling controller %s: %s", c, path); c 62 tools/testing/selftests/bpf/flow_dissector_load.c int c; c 64 tools/testing/selftests/bpf/flow_dissector_load.c while ((c = getopt(argc, argv, "adp:s:")) != -1) { c 65 tools/testing/selftests/bpf/flow_dissector_load.c switch (c) { c 10 tools/testing/selftests/bpf/prog_tests/core_reloc.c .c = 0xbeef, \ c 57 tools/testing/selftests/bpf/prog_tests/core_reloc.c .c = { [1] = { .c = 3 } }, \ c 87 tools/testing/selftests/bpf/prog_tests/core_reloc.c .c = 3, \ c 117 tools/testing/selftests/bpf/prog_tests/core_reloc.c .c = (void *)3, \ c 126 tools/testing/selftests/bpf/prog_tests/core_reloc.c .a = 1, .b = 2, .c = 3, .d = 4, \ c 278 tools/testing/selftests/bpf/prog_tests/core_reloc.c .c = 0, /* BUG in clang, should be 3 */ c 71 tools/testing/selftests/bpf/prog_tests/global_data.c __u64 c; c 98 tools/testing/selftests/bpf/prog_tests/global_data.c err, val.a, val.b, val.c, tests[i].val.a, tests[i].val.b, tests[i].val.c); c 31 tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c bool c: 1; /* it's really a _Bool type */ c 61 tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c long c; c 63 tools/testing/selftests/bpf/progs/btf_dump_test_case_namespacing.c enum C c; c 34 tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c char c; c 42 tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c char c: 1; c 50 tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c int c; c 13 tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c char c; c 81 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c int c; c 102 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c we_need_to_go_deeper_ptr_t c; c 165 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c int c; c 182 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c struct struct_with_embedded_stuff *c; c 10 tools/testing/selftests/bpf/progs/core_reloc_types.h int c; c 17 tools/testing/selftests/bpf/progs/core_reloc_types.h int c; c 324 tools/testing/selftests/bpf/progs/core_reloc_types.h int c; c 331 tools/testing/selftests/bpf/progs/core_reloc_types.h struct core_reloc_arrays_substruct c[3]; c 339 tools/testing/selftests/bpf/progs/core_reloc_types.h struct core_reloc_arrays_substruct c[4]; c 349 tools/testing/selftests/bpf/progs/core_reloc_types.h int c; c 351 tools/testing/selftests/bpf/progs/core_reloc_types.h } c[3]; c 362 tools/testing/selftests/bpf/progs/core_reloc_types.h struct core_reloc_arrays_substruct c[3]; c 369 tools/testing/selftests/bpf/progs/core_reloc_types.h struct core_reloc_arrays_substruct c[3]; c 376 tools/testing/selftests/bpf/progs/core_reloc_types.h struct core_reloc_arrays_substruct c[3]; c 383 tools/testing/selftests/bpf/progs/core_reloc_types.h struct core_reloc_arrays_substruct c[3]; c 390 tools/testing/selftests/bpf/progs/core_reloc_types.h int c[3]; /* value is not a struct */ c 405 tools/testing/selftests/bpf/progs/core_reloc_types.h enum core_reloc_primitives_enum c; c 418 tools/testing/selftests/bpf/progs/core_reloc_types.h } c; /* inline enum def with differing set of values */ c 424 tools/testing/selftests/bpf/progs/core_reloc_types.h enum core_reloc_primitives_enum c; c 433 tools/testing/selftests/bpf/progs/core_reloc_types.h enum core_reloc_primitives_enum c; c 440 tools/testing/selftests/bpf/progs/core_reloc_types.h int c; /* int instead of enum */ c 448 tools/testing/selftests/bpf/progs/core_reloc_types.h enum core_reloc_primitives_enum c; c 456 tools/testing/selftests/bpf/progs/core_reloc_types.h enum core_reloc_primitives_enum c; c 465 tools/testing/selftests/bpf/progs/core_reloc_types.h int a, b, c, d, e, f, g, h; c 485 tools/testing/selftests/bpf/progs/core_reloc_types.h char *c; c 498 tools/testing/selftests/bpf/progs/core_reloc_types.h char_ptr_t c; c 528 tools/testing/selftests/bpf/progs/core_reloc_types.h fancy_char_ptr_t c; c 648 tools/testing/selftests/bpf/progs/core_reloc_types.h int a, b, c; c 665 tools/testing/selftests/bpf/progs/core_reloc_types.h int c; c 23 tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c int c; c 30 tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c struct core_reloc_arrays_substruct c[3]; c 47 tools/testing/selftests/bpf/progs/test_core_reloc_arrays.c if (BPF_CORE_READ(&out->c1c, &in->c[1].c)) c 18 tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c int c; c 23 tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c int c; c 38 tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c int c; c 57 tools/testing/selftests/bpf/progs/test_core_reloc_flavors.c if (BPF_CORE_READ(&out->c, &in_orig->c)) c 16 tools/testing/selftests/bpf/progs/test_core_reloc_misc.c int a, b, c; c 52 tools/testing/selftests/bpf/progs/test_core_reloc_misc.c if (BPF_CORE_READ(&out->c, &in_ext[2])) /* accessor: 2 */ c 16 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c int a, b, c, d, e, f, g, h; c 36 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c char *c; c 52 tools/testing/selftests/bpf/progs/test_core_reloc_mods.c BPF_CORE_READ(&out->c, &in->c) || c 23 tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c enum core_reloc_primitives_enum c; c 36 tools/testing/selftests/bpf/progs/test_core_reloc_primitives.c BPF_CORE_READ(&out->c, &in->c) || c 27 tools/testing/selftests/bpf/progs/test_global_data.c __u64 c; c 55 tools/testing/selftests/bpf/progs/test_global_data.c .c = 0x1111111111111111ULL, c 62 tools/testing/selftests/bpf/progs/test_global_data.c .c = 0x2111111111111111ULL, c 100 tools/testing/selftests/bpf/progs/test_global_data.c test_reloc(number, 9, &struct0.c); c 12 tools/testing/selftests/bpf/progs/test_jhash.h #define __jhash_mix(a, b, c) \ c 14 tools/testing/selftests/bpf/progs/test_jhash.h a -= c; a ^= rol32(c, 4); c += b; \ c 15 tools/testing/selftests/bpf/progs/test_jhash.h b -= a; b ^= rol32(a, 6); a += c; \ c 16 tools/testing/selftests/bpf/progs/test_jhash.h c -= b; c ^= rol32(b, 8); b += a; \ c 17 tools/testing/selftests/bpf/progs/test_jhash.h a -= c; a ^= rol32(c, 16); c += b; \ c 18 tools/testing/selftests/bpf/progs/test_jhash.h b -= a; b ^= rol32(a, 19); a += c; \ c 19 tools/testing/selftests/bpf/progs/test_jhash.h c -= b; c ^= rol32(b, 4); b += a; \ c 22 tools/testing/selftests/bpf/progs/test_jhash.h #define __jhash_final(a, b, c) \ c 24 tools/testing/selftests/bpf/progs/test_jhash.h c ^= b; c -= rol32(b, 14); \ c 25 tools/testing/selftests/bpf/progs/test_jhash.h a ^= c; a -= rol32(c, 11); \ c 27 tools/testing/selftests/bpf/progs/test_jhash.h c ^= b; c -= rol32(b, 16); \ c 28 tools/testing/selftests/bpf/progs/test_jhash.h a ^= c; a -= rol32(c, 4); \ c 30 tools/testing/selftests/bpf/progs/test_jhash.h c ^= b; c -= rol32(b, 24); \ c 38 tools/testing/selftests/bpf/progs/test_jhash.h u32 a, b, c; c 41 tools/testing/selftests/bpf/progs/test_jhash.h a = b = c = JHASH_INITVAL + length + initval; c 46 tools/testing/selftests/bpf/progs/test_jhash.h c += *(volatile u32 *)(k + 8); c 47 tools/testing/selftests/bpf/progs/test_jhash.h __jhash_mix(a, b, c); c 52 tools/testing/selftests/bpf/progs/test_jhash.h case 12: c += (u32)k[11]<<24; c 53 tools/testing/selftests/bpf/progs/test_jhash.h case 11: c += (u32)k[10]<<16; c 54 tools/testing/selftests/bpf/progs/test_jhash.h case 10: c += (u32)k[9]<<8; c 55 tools/testing/selftests/bpf/progs/test_jhash.h case 9: c += k[8]; c 64 tools/testing/selftests/bpf/progs/test_jhash.h c ^= a; c 65 tools/testing/selftests/bpf/progs/test_jhash.h __jhash_final(a, b, c); c 70 tools/testing/selftests/bpf/progs/test_jhash.h return c; c 34 tools/testing/selftests/bpf/progs/test_l4lb.c #define __jhash_mix(a, b, c) \ c 36 tools/testing/selftests/bpf/progs/test_l4lb.c a -= c; a ^= rol32(c, 4); c += b; \ c 37 tools/testing/selftests/bpf/progs/test_l4lb.c b -= a; b ^= rol32(a, 6); a += c; \ c 38 tools/testing/selftests/bpf/progs/test_l4lb.c c -= b; c ^= rol32(b, 8); b += a; \ c 39 tools/testing/selftests/bpf/progs/test_l4lb.c a -= c; a ^= rol32(c, 16); c += b; \ c 40 tools/testing/selftests/bpf/progs/test_l4lb.c b -= a; b ^= rol32(a, 19); a += c; \ c 41 tools/testing/selftests/bpf/progs/test_l4lb.c c -= b; c ^= rol32(b, 4); b += a; \ c 44 tools/testing/selftests/bpf/progs/test_l4lb.c #define __jhash_final(a, b, c) \ c 46 tools/testing/selftests/bpf/progs/test_l4lb.c c ^= b; c -= rol32(b, 14); \ c 47 tools/testing/selftests/bpf/progs/test_l4lb.c a ^= c; a -= rol32(c, 11); \ c 49 tools/testing/selftests/bpf/progs/test_l4lb.c c ^= b; c -= rol32(b, 16); \ c 50 tools/testing/selftests/bpf/progs/test_l4lb.c a ^= c; a -= rol32(c, 4); \ c 52 tools/testing/selftests/bpf/progs/test_l4lb.c c ^= b; c -= rol32(b, 24); \ c 61 tools/testing/selftests/bpf/progs/test_l4lb.c u32 a, b, c; c 64 tools/testing/selftests/bpf/progs/test_l4lb.c a = b = c = JHASH_INITVAL + length + initval; c 69 tools/testing/selftests/bpf/progs/test_l4lb.c c += *(u32 *)(k + 8); c 70 tools/testing/selftests/bpf/progs/test_l4lb.c __jhash_mix(a, b, c); c 75 tools/testing/selftests/bpf/progs/test_l4lb.c case 12: c += (u32)k[11]<<24; c 76 tools/testing/selftests/bpf/progs/test_l4lb.c case 11: c += (u32)k[10]<<16; c 77 tools/testing/selftests/bpf/progs/test_l4lb.c case 10: c += (u32)k[9]<<8; c 78 tools/testing/selftests/bpf/progs/test_l4lb.c case 9: c += k[8]; c 87 tools/testing/selftests/bpf/progs/test_l4lb.c __jhash_final(a, b, c); c 92 tools/testing/selftests/bpf/progs/test_l4lb.c return c; c 95 tools/testing/selftests/bpf/progs/test_l4lb.c static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) c 99 tools/testing/selftests/bpf/progs/test_l4lb.c c += initval; c 100 tools/testing/selftests/bpf/progs/test_l4lb.c __jhash_final(a, b, c); c 101 tools/testing/selftests/bpf/progs/test_l4lb.c return c; c 30 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c #define __jhash_mix(a, b, c) \ c 32 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c a -= c; a ^= rol32(c, 4); c += b; \ c 33 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b -= a; b ^= rol32(a, 6); a += c; \ c 34 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c c -= b; c ^= rol32(b, 8); b += a; \ c 35 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c a -= c; a ^= rol32(c, 16); c += b; \ c 36 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b -= a; b ^= rol32(a, 19); a += c; \ c 37 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c c -= b; c ^= rol32(b, 4); b += a; \ c 40 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c #define __jhash_final(a, b, c) \ c 42 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c c ^= b; c -= rol32(b, 14); \ c 43 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c a ^= c; a -= rol32(c, 11); \ c 45 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c c ^= b; c -= rol32(b, 16); \ c 46 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c a ^= c; a -= rol32(c, 4); \ c 48 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c c ^= b; c -= rol32(b, 24); \ c 57 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c u32 a, b, c; c 60 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c a = b = c = JHASH_INITVAL + length + initval; c 65 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c c += *(u32 *)(k + 8); c 66 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c __jhash_mix(a, b, c); c 71 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c case 12: c += (u32)k[11]<<24; c 72 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c case 11: c += (u32)k[10]<<16; c 73 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c case 10: c += (u32)k[9]<<8; c 74 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c case 9: c += k[8]; c 83 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c __jhash_final(a, b, c); c 88 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c return c; c 91 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) c 95 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c c += initval; c 96 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c __jhash_final(a, b, c); c 97 tools/testing/selftests/bpf/progs/test_l4lb_noinline.c return c; c 27 tools/testing/selftests/bpf/progs/test_xdp_noinline.c #define __jhash_mix(a, b, c) \ c 29 tools/testing/selftests/bpf/progs/test_xdp_noinline.c a -= c; a ^= rol32(c, 4); c += b; \ c 30 tools/testing/selftests/bpf/progs/test_xdp_noinline.c b -= a; b ^= rol32(a, 6); a += c; \ c 31 tools/testing/selftests/bpf/progs/test_xdp_noinline.c c -= b; c ^= rol32(b, 8); b += a; \ c 32 tools/testing/selftests/bpf/progs/test_xdp_noinline.c a -= c; a ^= rol32(c, 16); c += b; \ c 33 tools/testing/selftests/bpf/progs/test_xdp_noinline.c b -= a; b ^= rol32(a, 19); a += c; \ c 34 tools/testing/selftests/bpf/progs/test_xdp_noinline.c c -= b; c ^= rol32(b, 4); b += a; \ c 37 tools/testing/selftests/bpf/progs/test_xdp_noinline.c #define __jhash_final(a, b, c) \ c 39 tools/testing/selftests/bpf/progs/test_xdp_noinline.c c ^= b; c -= rol32(b, 14); \ c 40 tools/testing/selftests/bpf/progs/test_xdp_noinline.c a ^= c; a -= rol32(c, 11); \ c 42 tools/testing/selftests/bpf/progs/test_xdp_noinline.c c ^= b; c -= rol32(b, 16); \ c 43 tools/testing/selftests/bpf/progs/test_xdp_noinline.c a ^= c; a -= rol32(c, 4); \ c 45 tools/testing/selftests/bpf/progs/test_xdp_noinline.c c ^= b; c -= rol32(b, 24); \ c 55 tools/testing/selftests/bpf/progs/test_xdp_noinline.c u32 a, b, c; c 58 tools/testing/selftests/bpf/progs/test_xdp_noinline.c a = b = c = JHASH_INITVAL + length + initval; c 63 tools/testing/selftests/bpf/progs/test_xdp_noinline.c c += *(u32 *)(k + 8); c 64 tools/testing/selftests/bpf/progs/test_xdp_noinline.c __jhash_mix(a, b, c); c 69 tools/testing/selftests/bpf/progs/test_xdp_noinline.c case 12: c += (u32)k[11]<<24; c 70 tools/testing/selftests/bpf/progs/test_xdp_noinline.c case 11: c += (u32)k[10]<<16; c 71 tools/testing/selftests/bpf/progs/test_xdp_noinline.c case 10: c += (u32)k[9]<<8; c 72 tools/testing/selftests/bpf/progs/test_xdp_noinline.c case 9: c += k[8]; c 81 tools/testing/selftests/bpf/progs/test_xdp_noinline.c __jhash_final(a, b, c); c 86 tools/testing/selftests/bpf/progs/test_xdp_noinline.c return c; c 90 tools/testing/selftests/bpf/progs/test_xdp_noinline.c u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) c 94 tools/testing/selftests/bpf/progs/test_xdp_noinline.c c += initval; c 95 tools/testing/selftests/bpf/progs/test_xdp_noinline.c __jhash_final(a, b, c); c 96 tools/testing/selftests/bpf/progs/test_xdp_noinline.c return c; c 624 tools/testing/selftests/bpf/test_flow_dissector.c int c; c 626 tools/testing/selftests/bpf/test_flow_dissector.c while ((c = getopt(argc, argv, "d:D:e:f:Fhi:l:n:o:O:Rs:S:t:Tx:X:")) != -1) { c 627 tools/testing/selftests/bpf/test_flow_dissector.c switch (c) { c 1237 tools/testing/selftests/bpf/test_maps.c long long c; c 1249 tools/testing/selftests/bpf/test_maps.c key = (struct bigkey) { .c = i }; c 1255 tools/testing/selftests/bpf/test_maps.c key.c = -1; c 1261 tools/testing/selftests/bpf/test_maps.c key.c = -1; c 1266 tools/testing/selftests/bpf/test_maps.c key.c = 0; c 269 tools/testing/selftests/cgroup/test_memcontrol.c long c[4]; c 358 tools/testing/selftests/cgroup/test_memcontrol.c c[i] = cg_read_long(children[i], "memory.current"); c 360 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(c[0], MB(33), 10)) c 363 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(c[1], MB(17), 10)) c 366 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(c[2], 0, 1)) c 426 tools/testing/selftests/cgroup/test_memcontrol.c long c[4]; c 505 tools/testing/selftests/cgroup/test_memcontrol.c c[i] = cg_read_long(children[i], "memory.current"); c 507 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(c[0], MB(33), 10)) c 510 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(c[1], MB(17), 10)) c 513 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(c[2], 0, 1)) c 361 tools/testing/selftests/futex/functional/futex_requeue_pi.c int c, ret; c 363 tools/testing/selftests/futex/functional/futex_requeue_pi.c while ((c = getopt(argc, argv, "bchlot:v:")) != -1) { c 364 tools/testing/selftests/futex/functional/futex_requeue_pi.c switch (c) { c 58 tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c int c; c 60 tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c while ((c = getopt(argc, argv, "chv:")) != -1) { c 61 tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c switch (c) { c 123 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c int c, res, ret = RET_PASS; c 125 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c while ((c = getopt(argc, argv, "chv:")) != -1) { c 126 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c switch (c) { c 77 tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c int c; c 79 tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c while ((c = getopt(argc, argv, "chv:")) != -1) { c 80 tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c switch (c) { c 45 tools/testing/selftests/futex/functional/futex_wait_timeout.c int c; c 47 tools/testing/selftests/futex/functional/futex_wait_timeout.c while ((c = getopt(argc, argv, "cht:v:")) != -1) { c 48 tools/testing/selftests/futex/functional/futex_wait_timeout.c switch (c) { c 68 tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c int c, ret = RET_PASS; c 72 tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c while ((c = getopt(argc, argv, "chv:")) != -1) { c 73 tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c switch (c) { c 44 tools/testing/selftests/futex/functional/futex_wait_wouldblock.c int c; c 46 tools/testing/selftests/futex/functional/futex_wait_wouldblock.c while ((c = getopt(argc, argv, "cht:v:")) != -1) { c 47 tools/testing/selftests/futex/functional/futex_wait_wouldblock.c switch (c) { c 28 tools/testing/selftests/ia64/aliasing-test.c int *c; c 47 tools/testing/selftests/ia64/aliasing-test.c c = (int *) addr; c 48 tools/testing/selftests/ia64/aliasing-test.c while (c < (int *) (addr + length)) c 49 tools/testing/selftests/ia64/aliasing-test.c sum += *c++; c 39 tools/testing/selftests/kvm/lib/assert.c char *c; c 42 tools/testing/selftests/kvm/lib/assert.c c = &cmd[0]; c 43 tools/testing/selftests/kvm/lib/assert.c c += sprintf(c, "%s", addr2line); c 50 tools/testing/selftests/kvm/lib/assert.c c += sprintf(c, " %lx", ((unsigned long) stack[i]) - 1); c 51 tools/testing/selftests/kvm/lib/assert.c c += sprintf(c, "%s", pipeline); c 433 tools/testing/selftests/net/ip_defrag.c int c; c 435 tools/testing/selftests/net/ip_defrag.c while ((c = getopt(argc, argv, "46opv")) != -1) { c 436 tools/testing/selftests/net/ip_defrag.c switch (c) { c 166 tools/testing/selftests/net/ipv6_flowlabel.c int c; c 168 tools/testing/selftests/net/ipv6_flowlabel.c while ((c = getopt(argc, argv, "l:")) != -1) { c 169 tools/testing/selftests/net/ipv6_flowlabel.c switch (c) { c 167 tools/testing/selftests/net/ipv6_flowlabel_mgr.c int c; c 169 tools/testing/selftests/net/ipv6_flowlabel_mgr.c while ((c = getopt(argc, argv, "lv")) != -1) { c 170 tools/testing/selftests/net/ipv6_flowlabel_mgr.c switch (c) { c 706 tools/testing/selftests/net/msg_zerocopy.c int c; c 712 tools/testing/selftests/net/msg_zerocopy.c while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) { c 713 tools/testing/selftests/net/msg_zerocopy.c switch (c) { c 310 tools/testing/selftests/net/psock_snd.c int c; c 312 tools/testing/selftests/net/psock_snd.c while ((c = getopt(argc, argv, "bcCdgl:qt:vV")) != -1) { c 313 tools/testing/selftests/net/psock_snd.c switch (c) { c 309 tools/testing/selftests/net/so_txtime.c int c, ilen, olen; c 311 tools/testing/selftests/net/so_txtime.c while ((c = getopt(argc, argv, "46c:")) != -1) { c 312 tools/testing/selftests/net/so_txtime.c switch (c) { c 298 tools/testing/selftests/net/tcp_fastopen_backup_key.c int c; c 300 tools/testing/selftests/net/tcp_fastopen_backup_key.c while ((c = getopt(argc, argv, "46sr")) != -1) { c 301 tools/testing/selftests/net/tcp_fastopen_backup_key.c switch (c) { c 99 tools/testing/selftests/net/tcp_inq.c int c, one = 1, inq = -1; c 108 tools/testing/selftests/net/tcp_inq.c while ((c = getopt(argc, argv, "46p:")) != -1) { c 109 tools/testing/selftests/net/tcp_inq.c switch (c) { c 307 tools/testing/selftests/net/tcp_mmap.c int fd, c, on = 1; c 312 tools/testing/selftests/net/tcp_mmap.c while ((c = getopt(argc, argv, "46p:svr:w:H:zxkP:M:")) != -1) { c 313 tools/testing/selftests/net/tcp_mmap.c switch (c) { c 643 tools/testing/selftests/net/udpgso.c int c; c 645 tools/testing/selftests/net/udpgso.c while ((c = getopt(argc, argv, "46cCmst:")) != -1) { c 646 tools/testing/selftests/net/udpgso.c switch (c) { c 293 tools/testing/selftests/net/udpgso_bench_rx.c int c; c 297 tools/testing/selftests/net/udpgso_bench_rx.c while ((c = getopt(argc, argv, "4b:C:Gl:n:p:rR:S:tv")) != -1) { c 298 tools/testing/selftests/net/udpgso_bench_rx.c switch (c) { c 423 tools/testing/selftests/net/udpgso_bench_tx.c int c; c 425 tools/testing/selftests/net/udpgso_bench_tx.c while ((c = getopt(argc, argv, "46acC:D:Hl:mM:p:s:PS:tTuvz")) != -1) { c 426 tools/testing/selftests/net/udpgso_bench_tx.c switch (c) { c 573 tools/testing/selftests/networking/timestamping/txtimestamp.c int c; c 575 tools/testing/selftests/networking/timestamping/txtimestamp.c while ((c = getopt(argc, argv, "46c:CDFhIl:Lnp:PrRuv:V:x")) != -1) { c 576 tools/testing/selftests/networking/timestamping/txtimestamp.c switch (c) { c 32 tools/testing/selftests/nsfs/owner.c char c; c 51 tools/testing/selftests/nsfs/owner.c if (read(pfd[0], &c, 1) != 0) c 208 tools/testing/selftests/pidfd/pidfd_test.c char c; c 223 tools/testing/selftests/pidfd/pidfd_test.c (void)read(pipe_fds[0], &c, 1); c 359 tools/testing/selftests/pidfd/pidfd_test.c int c; c 377 tools/testing/selftests/pidfd/pidfd_test.c c = epoll_wait(epoll_fd, events, MAX_EVENTS, 5000); c 378 tools/testing/selftests/pidfd/pidfd_test.c if (c != 1 || !(events[0].events & EPOLLIN)) c 381 tools/testing/selftests/pidfd/pidfd_test.c test_name, c, events[0].events, errno); c 118 tools/testing/selftests/powerpc/alignment/alignment_handler.c char *c = dst; c 121 tools/testing/selftests/powerpc/alignment/alignment_handler.c c += offset; c 124 tools/testing/selftests/powerpc/alignment/alignment_handler.c c[i] = i; c 39 tools/testing/selftests/powerpc/benchmarks/context_switch.c vector int a, b, c; c 50 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = a + b; c 68 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = a + b; c 70 tools/testing/selftests/powerpc/benchmarks/context_switch.c asm volatile("# %0 %1 %2": : "r"(&tv), "r"(&fp), "r"(&c)); c 177 tools/testing/selftests/powerpc/benchmarks/context_switch.c assert(read(pipe_fd1[READ], &c, 1) == 1); c 180 tools/testing/selftests/powerpc/benchmarks/context_switch.c assert(write(pipe_fd2[WRITE], &c, 1) == 1); c 192 tools/testing/selftests/powerpc/benchmarks/context_switch.c assert(write(pipe_fd1[WRITE], &c, 1) == 1); c 195 tools/testing/selftests/powerpc/benchmarks/context_switch.c assert(read(pipe_fd2[READ], &c, 1) == 1); c 272 tools/testing/selftests/powerpc/benchmarks/context_switch.c int c; c 277 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = cmpxchg(m, 0, 1); c 278 tools/testing/selftests/powerpc/benchmarks/context_switch.c if (!c) c 281 tools/testing/selftests/powerpc/benchmarks/context_switch.c if (c == 1) c 282 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = xchg(m, 2); c 284 tools/testing/selftests/powerpc/benchmarks/context_switch.c while (c) { c 286 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = xchg(m, 2); c 406 tools/testing/selftests/powerpc/benchmarks/context_switch.c signed char c; c 415 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = getopt_long(argc, argv, "", options, &option_index); c 417 tools/testing/selftests/powerpc/benchmarks/context_switch.c if (c == -1) c 420 tools/testing/selftests/powerpc/benchmarks/context_switch.c switch (c) { c 231 tools/testing/selftests/powerpc/benchmarks/fork.c signed char c; c 236 tools/testing/selftests/powerpc/benchmarks/fork.c c = getopt_long(argc, argv, "", options, &option_index); c 238 tools/testing/selftests/powerpc/benchmarks/fork.c if (c == -1) c 241 tools/testing/selftests/powerpc/benchmarks/fork.c switch (c) { c 42 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c char *c = mmap(NULL, MEMSIZE, PROT_READ|PROT_WRITE, c 44 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c FAIL_IF(c == MAP_FAILED); c 48 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c c[count << 16] = 'c'; c 50 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c munmap(c, MEMSIZE); c 62 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c signed char c; c 66 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c c = getopt_long(argc, argv, "", options, &option_index); c 68 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c if (c == -1) c 71 tools/testing/selftests/powerpc/benchmarks/mmap_bench.c switch (c) { c 26 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c int p2c[2], c2p[2], rc, status, c, *p; c 49 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c FAIL_IF(read(p2c[0], &c, 1) != 1); c 55 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c FAIL_IF(write(c2p[1], &c, 1) != 1); c 56 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c FAIL_IF(read(p2c[0], &c, 1) != 1); c 60 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c c = 0; c 61 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c FAIL_IF(write(p2c[1], &c, 1) != 1); c 62 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c FAIL_IF(read(c2p[0], &c, 1) != 1); c 74 tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c FAIL_IF(write(p2c[1], &c, 1) != 1); c 41 tools/testing/selftests/powerpc/mm/segv_errors.c char c, *p = NULL; c 56 tools/testing/selftests/powerpc/mm/segv_errors.c c = *p; c 66 tools/testing/selftests/powerpc/mm/segv_errors.c *p = c; c 615 tools/testing/selftests/powerpc/mm/tlbie_test.c int c; c 626 tools/testing/selftests/powerpc/mm/tlbie_test.c while ((c = getopt(argc, argv, "r:hn:l:t:")) != -1) { c 627 tools/testing/selftests/powerpc/mm/tlbie_test.c switch(c) { c 35 tools/testing/selftests/powerpc/pmu/lib.c char c = PARENT_TOKEN; c 37 tools/testing/selftests/powerpc/pmu/lib.c FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); c 38 tools/testing/selftests/powerpc/pmu/lib.c FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1); c 39 tools/testing/selftests/powerpc/pmu/lib.c if (c != CHILD_TOKEN) /* sometimes expected */ c 47 tools/testing/selftests/powerpc/pmu/lib.c char c; c 49 tools/testing/selftests/powerpc/pmu/lib.c FAIL_IF(read(read_pipe.read_fd, &c, 1) != 1); c 50 tools/testing/selftests/powerpc/pmu/lib.c FAIL_IF(c != PARENT_TOKEN); c 57 tools/testing/selftests/powerpc/pmu/lib.c char c = CHILD_TOKEN; c 59 tools/testing/selftests/powerpc/pmu/lib.c FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); c 66 tools/testing/selftests/powerpc/pmu/lib.c char c = ~CHILD_TOKEN; c 68 tools/testing/selftests/powerpc/pmu/lib.c FAIL_IF(write(write_pipe.write_fd, &c, 1) != 1); c 21 tools/testing/selftests/powerpc/primitives/word-at-a-time.h static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c) c 23 tools/testing/selftests/powerpc/primitives/word-at-a-time.h unsigned long mask = (val & c->low_bits) + c->low_bits; c 37 tools/testing/selftests/powerpc/primitives/word-at-a-time.h static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) c 39 tools/testing/selftests/powerpc/primitives/word-at-a-time.h unsigned long rhs = val | c->low_bits; c 41 tools/testing/selftests/powerpc/primitives/word-at-a-time.h return (val + c->high_bits) & ~rhs; c 60 tools/testing/selftests/powerpc/primitives/word-at-a-time.h static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) c 71 tools/testing/selftests/powerpc/primitives/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) c 139 tools/testing/selftests/powerpc/primitives/word-at-a-time.h static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) c 141 tools/testing/selftests/powerpc/primitives/word-at-a-time.h unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; c 146 tools/testing/selftests/powerpc/primitives/word-at-a-time.h static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) c 17 tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c float c = FPR_3; c 49 tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c if (validate_fpr_float(fpr_buf, c)) c 17 tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c float c = FPR_3; c 78 tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c if (validate_fpr_float(fpr_buf, c)) c 17 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c float c = FPR_3; c 85 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c if (validate_fpr_float(fpr_buf, c)) c 66 tools/testing/selftests/powerpc/stringloops/strlen.c char c; c 69 tools/testing/selftests/powerpc/stringloops/strlen.c c = random() & 0x7f; c 70 tools/testing/selftests/powerpc/stringloops/strlen.c } while (!c); c 71 tools/testing/selftests/powerpc/stringloops/strlen.c s[i] = c; c 79 tools/testing/selftests/powerpc/stringloops/strlen.c char c; c 82 tools/testing/selftests/powerpc/stringloops/strlen.c c = random() & 0x7f; c 83 tools/testing/selftests/powerpc/stringloops/strlen.c } while (!c); c 84 tools/testing/selftests/powerpc/stringloops/strlen.c s[j] = c; c 93 tools/testing/selftests/powerpc/stringloops/strlen.c char c; c 96 tools/testing/selftests/powerpc/stringloops/strlen.c c = random() & 0x7f; c 97 tools/testing/selftests/powerpc/stringloops/strlen.c } while (!c); c 98 tools/testing/selftests/powerpc/stringloops/strlen.c s[i] = c; c 56 tools/testing/selftests/proc/fd-001-lookup.c unsigned int c; c 64 tools/testing/selftests/proc/fd-001-lookup.c for (c = 1; c <= 255; c++) { c 65 tools/testing/selftests/proc/fd-001-lookup.c if (c == '/') c 67 tools/testing/selftests/proc/fd-001-lookup.c snprintf(buf, sizeof(buf), "/proc/self/fd/%c%u", c, fd); c 72 tools/testing/selftests/proc/fd-001-lookup.c for (c = 1; c <= 255; c++) { c 73 tools/testing/selftests/proc/fd-001-lookup.c if (c == '/') c 75 tools/testing/selftests/proc/fd-001-lookup.c snprintf(buf, sizeof(buf), "/proc/self/fd/%u%c", fd, c); c 158 tools/testing/selftests/ptp/testptp.c int c, cnt, fd; c 183 tools/testing/selftests/ptp/testptp.c while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) { c 184 tools/testing/selftests/ptp/testptp.c switch (c) { c 185 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h static inline void init_completion(struct completion *c) c 187 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h c->count = 0; c 190 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h static inline void wait_for_completion(struct completion *c) c 192 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1); c 197 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h static inline void complete(struct completion *c) c 199 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h unsigned int prev_count = __sync_fetch_and_add(&c->count, 1); c 205 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h static inline bool try_wait_for_completion(struct completion *c) c 210 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h static inline bool completion_done(struct completion *c) c 212 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h return c->count; c 216 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h static inline void complete_all(struct completion *c) c 33 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ c 48 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h #define rcu_lockdep_assert(c, s) do { } while (0) c 49 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h #define RCU_LOCKDEP_WARN(c, s) do { } while (0) c 21 tools/testing/selftests/rseq/basic_percpu_ops_test.c struct percpu_lock_entry c[CPU_SETSIZE]; c 30 tools/testing/selftests/rseq/basic_percpu_ops_test.c struct test_data_entry c[CPU_SETSIZE]; c 44 tools/testing/selftests/rseq/basic_percpu_ops_test.c struct percpu_list_entry c[CPU_SETSIZE]; c 56 tools/testing/selftests/rseq/basic_percpu_ops_test.c ret = rseq_cmpeqv_storev(&lock->c[cpu].v, c 72 tools/testing/selftests/rseq/basic_percpu_ops_test.c assert(lock->c[cpu].v == 1); c 77 tools/testing/selftests/rseq/basic_percpu_ops_test.c rseq_smp_store_release(&lock->c[cpu].v, 0); c 92 tools/testing/selftests/rseq/basic_percpu_ops_test.c data->c[cpu].count++; c 130 tools/testing/selftests/rseq/basic_percpu_ops_test.c sum += data.c[i].count; c 147 tools/testing/selftests/rseq/basic_percpu_ops_test.c expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); c 149 tools/testing/selftests/rseq/basic_percpu_ops_test.c targetptr = (intptr_t *)&list->c[cpu].head; c 175 tools/testing/selftests/rseq/basic_percpu_ops_test.c targetptr = (intptr_t *)&list->c[cpu].head; c 200 tools/testing/selftests/rseq/basic_percpu_ops_test.c node = list->c[cpu].head; c 203 tools/testing/selftests/rseq/basic_percpu_ops_test.c list->c[cpu].head = node->next; c 260 tools/testing/selftests/rseq/basic_percpu_ops_test.c node->next = list.c[i].head; c 261 tools/testing/selftests/rseq/basic_percpu_ops_test.c list.c[i].head = node; c 251 tools/testing/selftests/rseq/param_test.c struct percpu_lock_entry c[CPU_SETSIZE]; c 260 tools/testing/selftests/rseq/param_test.c struct test_data_entry c[CPU_SETSIZE]; c 270 tools/testing/selftests/rseq/param_test.c struct test_data_entry c[CPU_SETSIZE]; c 289 tools/testing/selftests/rseq/param_test.c struct percpu_list_entry c[CPU_SETSIZE]; c 305 tools/testing/selftests/rseq/param_test.c struct percpu_buffer_entry c[CPU_SETSIZE]; c 322 tools/testing/selftests/rseq/param_test.c struct percpu_memcpy_buffer_entry c[CPU_SETSIZE]; c 334 tools/testing/selftests/rseq/param_test.c ret = rseq_cmpeqv_storev(&lock->c[cpu].v, c 350 tools/testing/selftests/rseq/param_test.c assert(lock->c[cpu].v == 1); c 355 tools/testing/selftests/rseq/param_test.c rseq_smp_store_release(&lock->c[cpu].v, 0); c 372 tools/testing/selftests/rseq/param_test.c data->c[cpu].count++; c 432 tools/testing/selftests/rseq/param_test.c sum += data.c[i].count; c 454 tools/testing/selftests/rseq/param_test.c ret = rseq_addv(&data->c[cpu].count, 1, cpu); c 508 tools/testing/selftests/rseq/param_test.c sum += data.c[i].count; c 525 tools/testing/selftests/rseq/param_test.c expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); c 527 tools/testing/selftests/rseq/param_test.c targetptr = (intptr_t *)&list->c[cpu].head; c 556 tools/testing/selftests/rseq/param_test.c targetptr = (intptr_t *)&list->c[cpu].head; c 583 tools/testing/selftests/rseq/param_test.c node = list->c[cpu].head; c 586 tools/testing/selftests/rseq/param_test.c list->c[cpu].head = node->next; c 642 tools/testing/selftests/rseq/param_test.c node->next = list.c[i].head; c 643 tools/testing/selftests/rseq/param_test.c list.c[i].head = node; c 700 tools/testing/selftests/rseq/param_test.c offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); c 701 tools/testing/selftests/rseq/param_test.c if (offset == buffer->c[cpu].buflen) c 704 tools/testing/selftests/rseq/param_test.c targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset]; c 706 tools/testing/selftests/rseq/param_test.c targetptr_final = &buffer->c[cpu].offset; c 739 tools/testing/selftests/rseq/param_test.c offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); c 744 tools/testing/selftests/rseq/param_test.c head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]); c 746 tools/testing/selftests/rseq/param_test.c targetptr = (intptr_t *)&buffer->c[cpu].offset; c 748 tools/testing/selftests/rseq/param_test.c (intptr_t *)&buffer->c[cpu].array[offset - 1], c 769 tools/testing/selftests/rseq/param_test.c offset = buffer->c[cpu].offset; c 772 tools/testing/selftests/rseq/param_test.c head = buffer->c[cpu].array[offset - 1]; c 773 tools/testing/selftests/rseq/param_test.c buffer->c[cpu].offset = offset - 1; c 826 tools/testing/selftests/rseq/param_test.c buffer.c[i].array = c 827 tools/testing/selftests/rseq/param_test.c malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE * c 829 tools/testing/selftests/rseq/param_test.c assert(buffer.c[i].array); c 830 tools/testing/selftests/rseq/param_test.c buffer.c[i].buflen = CPU_SETSIZE * BUFFER_ITEM_PER_CPU; c 846 tools/testing/selftests/rseq/param_test.c buffer.c[i].array[j - 1] = node; c 847 tools/testing/selftests/rseq/param_test.c buffer.c[i].offset++; c 880 tools/testing/selftests/rseq/param_test.c free(buffer.c[i].array); c 906 tools/testing/selftests/rseq/param_test.c offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); c 907 tools/testing/selftests/rseq/param_test.c if (offset == buffer->c[cpu].buflen) c 909 tools/testing/selftests/rseq/param_test.c destptr = (char *)&buffer->c[cpu].array[offset]; c 914 tools/testing/selftests/rseq/param_test.c targetptr_final = &buffer->c[cpu].offset; c 950 tools/testing/selftests/rseq/param_test.c offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); c 954 tools/testing/selftests/rseq/param_test.c srcptr = (char *)&buffer->c[cpu].array[offset - 1]; c 958 tools/testing/selftests/rseq/param_test.c targetptr_final = &buffer->c[cpu].offset; c 983 tools/testing/selftests/rseq/param_test.c offset = buffer->c[cpu].offset; c 986 tools/testing/selftests/rseq/param_test.c memcpy(item, &buffer->c[cpu].array[offset - 1], sizeof(*item)); c 987 tools/testing/selftests/rseq/param_test.c buffer->c[cpu].offset = offset - 1; c 1041 tools/testing/selftests/rseq/param_test.c buffer.c[i].array = c 1042 tools/testing/selftests/rseq/param_test.c malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE * c 1044 tools/testing/selftests/rseq/param_test.c assert(buffer.c[i].array); c 1045 tools/testing/selftests/rseq/param_test.c buffer.c[i].buflen = CPU_SETSIZE * MEMCPY_BUFFER_ITEM_PER_CPU; c 1056 tools/testing/selftests/rseq/param_test.c buffer.c[i].array[j - 1].data1 = j; c 1057 tools/testing/selftests/rseq/param_test.c buffer.c[i].array[j - 1].data2 = j + 1; c 1058 tools/testing/selftests/rseq/param_test.c buffer.c[i].offset++; c 1092 tools/testing/selftests/rseq/param_test.c free(buffer.c[i].array); c 3246 tools/testing/selftests/seccomp/seccomp_bpf.c char c; c 3291 tools/testing/selftests/seccomp/seccomp_bpf.c EXPECT_EQ(read(sk_pair[0], &c, 1), 1); c 78 tools/testing/selftests/sync/sync_fence.c int a, b, c, d, valid; c 84 tools/testing/selftests/sync/sync_fence.c c = sw_sync_fence_create(timeline, "allocFence", 3); c 88 tools/testing/selftests/sync/sync_fence.c sw_sync_fence_is_valid(c); c 92 tools/testing/selftests/sync/sync_fence.c d = sync_merge("mergeFence", c, d); c 120 tools/testing/selftests/sync/sync_fence.c ASSERT(sync_fence_count_with_status(c, FENCE_STATUS_SIGNALED) == 1, c 127 tools/testing/selftests/sync/sync_fence.c sw_sync_fence_destroy(c); c 634 tools/testing/selftests/vm/userfaultfd.c char c; c 636 tools/testing/selftests/vm/userfaultfd.c if (write(pipefd[cpu*2+1], &c, 1) != 1) { c 915 tools/testing/selftests/vm/userfaultfd.c char c; c 956 tools/testing/selftests/vm/userfaultfd.c if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) c 975 tools/testing/selftests/vm/userfaultfd.c char c; c 1021 tools/testing/selftests/vm/userfaultfd.c if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) c 99 tools/testing/selftests/watchdog/watchdog-test.c int c; c 106 tools/testing/selftests/watchdog/watchdog-test.c while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) { c 107 tools/testing/selftests/watchdog/watchdog-test.c if (c == 'f') c 136 tools/testing/selftests/watchdog/watchdog-test.c while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) { c 137 tools/testing/selftests/watchdog/watchdog-test.c switch (c) { c 227 tools/testing/selftests/x86/protection_keys.c char *c = (void *)dumpme; c 231 tools/testing/selftests/x86/protection_keys.c u64 *ptr = (u64 *)(c + i); c 226 tools/thermal/tmon/tmon.c int id2 = 0, c; c 235 tools/thermal/tmon/tmon.c while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) { c 236 tools/thermal/tmon/tmon.c switch (c) { c 336 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c iobuf[i].iocb[j]->u.c.flags |= IOCB_FLAG_RESFD; c 337 tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c iobuf[i].iocb[j]->u.c.resfd = evfd; c 328 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c iocb_in->u.c.flags |= IOCB_FLAG_RESFD; c 329 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c iocb_in->u.c.resfd = evfd; c 342 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c iocb_out->u.c.flags |= IOCB_FLAG_RESFD; c 343 tools/usb/ffs-aio-example/simple/device_app/aio_simple.c iocb_out->u.c.resfd = evfd; c 366 tools/usb/testusb.c int c; c 393 tools/usb/testusb.c while ((c = getopt (argc, argv, "D:aA:c:g:hlns:t:v:")) != EOF) c 394 tools/usb/testusb.c switch (c) { c 107 tools/usb/usbip/libsrc/names.c struct class *c; c 109 tools/usb/usbip/libsrc/names.c c = classes[hashnum(classid)]; c 110 tools/usb/usbip/libsrc/names.c for (; c; c = c->next) c 111 tools/usb/usbip/libsrc/names.c if (c->classid == classid) c 112 tools/usb/usbip/libsrc/names.c return c->name; c 230 tools/usb/usbip/libsrc/names.c struct class *c; c 233 tools/usb/usbip/libsrc/names.c c = classes[h]; c 234 tools/usb/usbip/libsrc/names.c for (; c; c = c->next) c 235 tools/usb/usbip/libsrc/names.c if (c->classid == classid) c 237 tools/usb/usbip/libsrc/names.c c = my_malloc(sizeof(struct class) + strlen(name)); c 238 tools/usb/usbip/libsrc/names.c if (!c) c 240 tools/usb/usbip/libsrc/names.c strcpy(c->name, name); c 241 tools/usb/usbip/libsrc/names.c c->classid = classid; c 242 tools/usb/usbip/libsrc/names.c c->next = classes[h]; c 243 tools/usb/usbip/libsrc/names.c classes[h] = c; c 298 tools/usb/usbip/libsrc/usbip_common.c const char *c, *s, *p; c 313 tools/usb/usbip/libsrc/usbip_common.c c = names_class(class); c 314 tools/usb/usbip/libsrc/usbip_common.c if (!c) c 315 tools/usb/usbip/libsrc/usbip_common.c c = "unknown class"; c 317 tools/usb/usbip/libsrc/usbip_common.c snprintf(buff, size, "%s / %s / %s (%02x/%02x/%02x)", c, s, p, class, subclass, protocol); c 43 tools/usb/usbip/libsrc/vhci_driver.c char *c; c 46 tools/usb/usbip/libsrc/vhci_driver.c c = strchr(value, '\n'); c 47 tools/usb/usbip/libsrc/vhci_driver.c if (!c) c 49 tools/usb/usbip/libsrc/vhci_driver.c c++; c 51 tools/usb/usbip/libsrc/vhci_driver.c while (*c != '\0') { c 58 tools/usb/usbip/libsrc/vhci_driver.c ret = sscanf(c, "%2s %d %d %d %x %u %31s\n", c 98 tools/usb/usbip/libsrc/vhci_driver.c c = strchr(c, '\n'); c 99 tools/usb/usbip/libsrc/vhci_driver.c if (!c) c 101 tools/usb/usbip/libsrc/vhci_driver.c c++; c 141 tools/virtio/linux/kernel.h #define list_for_each_entry(a, b, c) while (0) c 300 tools/virtio/ringtest/main.c long int c; c 327 tools/virtio/ringtest/main.c c = strtol(optarg, &endptr, 0); c 329 tools/virtio/ringtest/main.c assert(c > 0 && c < INT_MAX); c 330 tools/virtio/ringtest/main.c runcycles = c; c 333 tools/virtio/ringtest/main.c c = strtol(optarg, &endptr, 0); c 335 tools/virtio/ringtest/main.c assert(c > 0 && c < INT_MAX); c 336 tools/virtio/ringtest/main.c max_outstanding = c; c 339 tools/virtio/ringtest/main.c c = strtol(optarg, &endptr, 0); c 341 tools/virtio/ringtest/main.c assert(c > 0 && c < INT_MAX); c 342 tools/virtio/ringtest/main.c param = c; c 345 tools/virtio/ringtest/main.c c = strtol(optarg, &endptr, 0); c 347 tools/virtio/ringtest/main.c assert(c > 0 && c < INT_MAX); c 348 tools/virtio/ringtest/main.c batch = c; c 1256 tools/vm/page-types.c int c; c 1260 tools/vm/page-types.c while ((c = getopt_long(argc, argv, c 1263 tools/vm/page-types.c switch (c) { c 1383 tools/vm/slabinfo.c int c; c 1389 tools/vm/slabinfo.c while ((c = getopt_long(argc, argv, "aABd::DefhilLnN:oPrsStTUvXz1", c 1391 tools/vm/slabinfo.c switch (c) { c 318 virt/kvm/arm/vgic/vgic-kvm-device.c int c; c 326 virt/kvm/arm/vgic/vgic-kvm-device.c kvm_for_each_vcpu(c, tmp_vcpu, kvm) { c 328 virt/kvm/arm/vgic/vgic-kvm-device.c unlock_vcpus(kvm, c - 1); c 116 virt/kvm/arm/vgic/vgic-mmio-v2.c int c; c 134 virt/kvm/arm/vgic/vgic-mmio-v2.c kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) { c 137 virt/kvm/arm/vgic/vgic-mmio-v2.c if (!(targets & (1U << c))) c 664 virt/kvm/arm/vgic/vgic-mmio-v3.c int c, ret = 0; c 666 virt/kvm/arm/vgic/vgic-mmio-v3.c kvm_for_each_vcpu(c, vcpu, kvm) { c 675 virt/kvm/arm/vgic/vgic-mmio-v3.c for (c--; c >= 0; c--) { c 676 virt/kvm/arm/vgic/vgic-mmio-v3.c vcpu = kvm_get_vcpu(kvm, c); c 892 virt/kvm/arm/vgic/vgic-mmio-v3.c int sgi, c; c 910 virt/kvm/arm/vgic/vgic-mmio-v3.c kvm_for_each_vcpu(c, c_vcpu, kvm) { c 918 virt/kvm/arm/vgic/vgic-mmio-v3.c if (broadcast && c == vcpu_id) c 501 virt/kvm/arm/vgic/vgic-v3.c int c; c 506 virt/kvm/arm/vgic/vgic-v3.c kvm_for_each_vcpu(c, vcpu, kvm) { c 510 virt/kvm/arm/vgic/vgic-v3.c kvm_debug("vcpu %d redistributor base not set\n", c); c 985 virt/kvm/arm/vgic/vgic.c int c; c 991 virt/kvm/arm/vgic/vgic.c kvm_for_each_vcpu(c, vcpu, kvm) { c 123 virt/kvm/kvm_main.c #define KVM_COMPAT(c) .compat_ioctl = (c) c 139 virt/kvm/kvm_main.c #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl, \